Blame projects/neural/layer.inc.cpp

e865c9
#ifndef LAYER_INC_CPP
e865c9
#define LAYER_INC_CPP
e865c9
e865c9
b579b3
#include "common.inc.cpp"
e865c9
e865c9
e865c9
b579b3
class WeightHolder {
e865c9
public:
b579b3
  const int weightsCount;
b579b3
  Weight *weights;
15c502
  
b579b3
  const char *filename;
b579b3
  
b579b3
  explicit WeightHolder(int weightsCount = 0, Weight *weights = nullptr):
b579b3
    weightsCount(weightsCount), weights(weights), filename()
b579b3
    { assert(weightsCount >= 0); }
b579b3
  
b579b3
  
b579b3
  virtual ~WeightHolder() { }
b579b3
  
b579b3
  
b579b3
  bool save(bool demoOnly = false) {
b579b3
    if (filename && weightsCount && !demoOnly) {
b579b3
      FILE *f = fopen(filename, "wb");
b579b3
      if (!f)
b579b3
        return printf("cannot open file for write: %s\n", filename), false;
b579b3
      if (!fwrite(weights, sizeof(*weights)*weightsCount, 1, f))
b579b3
        return fclose(f), printf("cannot write to file: %s\n", filename), false;
b579b3
      fclose(f);
b579b3
    }
b579b3
    return saveDemo();
b579b3
  }
e865c9
e865c9
b579b3
  bool load() {
b579b3
    if (filename && weightsCount) {
b579b3
      FILE *f = fopen(filename, "rb");
b579b3
      if (!f)
b579b3
        return printf("cannot open file for read: %s\n", filename), false;
b579b3
      if (!fread(weights, sizeof(*weights)*weightsCount, 1, f))
b579b3
        return fclose(f), printf("cannot read from file: %s\n", filename), false;
b579b3
      fclose(f);
b579b3
    }
b579b3
    return true;
e865c9
  }
b579b3
  
e865c9
b579b3
  virtual bool saveDemo() { return true; }
e865c9
};
e865c9
e865c9
b579b3
b579b3
class Layer: public WeightHolder {
e865c9
public:
e865c9
  Layer *prev, *next;
e865c9
e865c9
  Layout layout;
e865c9
e865c9
  Neuron *neurons;
e865c9
  int neuronsCount;
e865c9
e865c9
  bool ownWeights;
e865c9
b579b3
  bool skipTrain;
e865c9
e865c9
  Stat stat;
e865c9
e865c9
  Layout::List mtLayouts;
036a8f
  Layout::List mtPrevLayouts;
e865c9
e865c9
e865c9
  Layer(Layer *prev, const Layout &layout, int weightsCount = 0, Weight *weights = nullptr):
b579b3
    WeightHolder(weightsCount, weights),
e865c9
    prev(prev ? &prev->back() : nullptr),
e865c9
    next(),
e865c9
    layout(layout),
e865c9
    neurons(),
e865c9
    neuronsCount(layout.getCount()),
e865c9
    ownWeights(!weights && weightsCount),
b579b3
    skipTrain()
e865c9
  {
e865c9
    assert(layout);
e865c9
    assert(neuronsCount > 0);
e865c9
    assert(weightsCount >= 0);
15c502
    assert(prev || !weightsCount);
e865c9
e865c9
    if (this->prev) this->prev->next = this;
e865c9
    if (neuronsCount) {
e865c9
      neurons = new Neuron[neuronsCount];
e865c9
      memset(neurons, 0, sizeof(*neurons)*neuronsCount);
e865c9
    }
e865c9
    if (ownWeights) {
e865c9
      this->weights = new Weight[weightsCount];
e865c9
      memset(this->weights, 0, sizeof(*this->weights)*weightsCount);
e865c9
    }
e865c9
e865c9
    stat.neurons = neuronsCount;
e865c9
    stat.activeNeurons = layout.getActiveCount();
e865c9
    stat.weights = weightsCount;
e865c9
    stat.links = weightsCount;
e865c9
    stat.memsize = neuronsCount*sizeof(*neurons);
e865c9
    if (ownWeights) stat.memsize += weightsCount*sizeof(*weights);
e865c9
  }
e865c9
e865c9
b579b3
  ~Layer() {
e865c9
    if (next) delete next;
e865c9
    if (neurons) delete[] neurons;
e865c9
    if (ownWeights) delete[] weights;
e865c9
  }
e865c9
e865c9
e865c9
  inline Layer& front()
e865c9
    { Layer *l = this; while(l->prev) l = l->prev; return *l; }
e865c9
  inline Layer& back()
e865c9
    { Layer *l = this; while(l->next) l = l->next; return *l; }
e865c9
  inline Stat sumStat() const
e865c9
    { Stat s; for(const Layer *l = this; l; l = l->next) s += l->stat; return s; }
e865c9
b579b3
  bool save(bool demoOnly = false)
b579b3
    { return WeightHolder::save(demoOnly) && (!next || next->save(demoOnly)); }
b579b3
  bool load()
b579b3
    { return WeightHolder::load() && (!next || next->load()); }
e865c9
b579b3
 
e865c9
  void clearAccum() {
e865c9
    Accum a = {};
e865c9
    for(Neuron *in = neurons, *e = in + neuronsCount; in < e; ++in)
e865c9
      in->a = a;
e865c9
  }
e865c9
e865c9
  
e865c9
  void fillWeights(WeightReal wmin, WeightReal wmax) {
e865c9
    WeightReal k = (wmax - wmin)/RAND_MAX;
e865c9
    for(Weight *iw = weights, *e = iw + weightsCount; iw < e; ++iw)
e865c9
      iw->w = rand()*k + wmin;
e865c9
  }
e865c9
 
b579b3
036a8f
  virtual void split(int threadsCount) {
036a8f
    layout.split(mtLayouts, threadsCount);
036a8f
    if (prev) prev->layout.split(mtPrevLayouts, threadsCount);
036a8f
  }
e865c9
  virtual void pass(Barrier &barrier) { }
e865c9
  virtual void backpassWeights(Barrier &barrier) { }
e865c9
  virtual void backpassDeltas(Barrier &barrier) { }
e865c9
  
e865c9
  virtual void testPass() { }
e865c9
  virtual void testBackpass() { }
036a8f
  
036a8f
  
b579b3
  void passFull(const Layer *last = nullptr, int threadsCount = 1) {
b579b3
    struct H {
b579b3
      Layer &layer;
b579b3
      const Layer *last;
b579b3
      std::atomic<unsigned int=""> barrierCounter;</unsigned>
b579b3
      std::vector<std::thread*> threads;</std::thread*>
b579b3
      
b579b3
      H(Layer &layer, const Layer *last, int threadsCount): layer(layer), last(last), barrierCounter(0), threads(threadsCount) { }
b579b3
      
b579b3
      void func(int tid, unsigned int seed) {
b579b3
        Barrier barrier(barrierCounter, tid, threads.size(), seed);
b579b3
        for(Layer *l = layer.next; l; l = l->next) {
b579b3
          l->pass(barrier);
b579b3
          if (l == last || !l->next) break;
b579b3
          barrier.wait();
b579b3
        }
b579b3
      }
b579b3
    } h(*this, last, threadsCount);
b579b3
    
b579b3
    for(Layer *l = this; l; l = l->next)
b579b3
      l->split(threadsCount);
b579b3
    for(int i = 1; i < threadsCount; ++i)
b579b3
      h.threads[i] = new std::thread(&H::func, &h, i, rand());
b579b3
    h.func(0, rand());
b579b3
    for(int i = 1; i < threadsCount; ++i)
b579b3
      { h.threads[i]->join(); delete h.threads[i]; }
b579b3
  }
e865c9
};
e865c9
e865c9
e865c9
#endif