Blob Blame Raw
#ifndef SEGMENT_CONV_INC_CPP
#define SEGMENT_CONV_INC_CPP


#include "segment.inc.cpp"
#include "func.inc.cpp"
#include "layer.conv.inc.cpp"


class SegmentConv: public Segment {
public:
  enum {
    KSX = 4,
    KSY = 4,
  };
  
  const int msx, msy, msz;
  
  NeuronReal *m_values;
  NeuronReal *b_values;
  
  SegmentConv(int sx, int sy, int sz, int msz, Weight *weights = nullptr):
    Segment(sx, sy, sz, msz*KSY*KSX*sz, weights), msx((sx - KSX)/2 + 1), msy((sy - KSY)/2 + 1), msz(msz)
  {
    assert(msx > 0);
    assert(msy > 0);
    assert(msz > 0);
    m_values = new NeuronReal[msx*msy*msz + sx*sy*sz];
    b_values = m_values + msx*msy*msz;
    clear();
  }
  ~SegmentConv()
    { delete[] m_values; }  
  
  
  void clear() override
    { memset(m_values, 0, sizeof(*m_values)*(msx*msy*msz + sx*sy*sz)); }

    
  inline void check(int x, int y, int z) {
    Segment::check(x, y, z);
    assert(layout.getD() == sz);
  }


  
  Quality pass(Barrier &barrier, int x, int y, int z, NeuronReal trainRatio) override {
    check(x, y, z);
    Layout l = layout;
    NeuronReal *f_values = this->f_values + (y*l.sx + x)*l.sz + z;
  }
  
  __attribute__((always_inline))
  inline Quality pass(Barrier &barrier, NeuronReal *f_values, NeuronReal trainRatio) {
    Layout l = layout;
    int tid = barrier.tid;
    int threads = barrier.threads;
    
    int sx = this->sx;
    //int sy = this->sy;
    int sz = this->sz;
    int msx = this->msx;
    int msy = this->msy;
    int msz = this->msz;
    int msxz = msx*msz;
    
    int ksxz = KSX*sz;
    int ksyxz = KSY*ksxz;
    
    int fv_dkx = l.sz - sz;
    int fv_dky = (l.sx - KSX)*l.sz;
    int fv_dmx = 2*l.sz;
    int fv_dmy = 2*(l.sx - msx)*l.sz;
    
    int mn_dtz = threads - msx*msy*msz;
    
    // stage 1: pass from front to mid
    
    int f_sxz = l.sx*l.sz;
    int f_sz2 = l.sz*2;
    int f_sxz2 = l.sx*f_sz2;
    
    int m_cnt = msx*msy*msz;
    int mi0 = m_cnt*tid/threads;
    int mi1 = m_cnt*(tid+1)/threads;
    
    for(int i = mi0; i < mi1; ++i) {
      int my = i/msxz;
      int mx = i%msxz/msz;
      int mz = i%msz;
      
      AccumReal a = 0;
      int wi = i*ksyxz;
      int fvi = my*f_sxz2 + mx*f_sz2 + mz;
      for(int ky = 0; ky < KSY; ++ky, fvi += f_sxz, wi += ksxz) {
        Weight *iw = &weights[wi];
        NeuronReal *ifv = &f_values[fvi];
        for(int i = 0; i < ksxz; ++i)
          a += ifv[i]*iw[i].w;
      }
      
      m_values[i] = a > 0 ? a : 0;
    }
    
    barrier.wait();
    
    // stage 2: pass from mid to back and verify
    
    AccumReal qa = 0;
    for(int by = 2 + tid; by < 10; by += threads)
    for(int bx = 2; bx < 10; ++bx)
    for(int bz = 0; bz < sz; ++bz) {
      AccumReal a = 0;
      Neuron &bn = b_neurons[ (by*sx + bx)*sz + bz ];
      
      for(int ky = by%2; ky < KSY; ky += 2)
      for(int kx = bx%2; kx < KSX; kx += 2) {
        int mx = (bx - kx)/2;
        int my = (by - ky)/2;
        assert(mx >= 0 && mx < msx && (bx - kx)%2 == 0);
        assert(my >= 0 && my < msy && (by - ky)%2 == 0);
        for(int mz = 0; mz < msz; ++mz) {
          Neuron &mn = m_neurons[ (my*msx + mx)*msz + mz ];
          Weight &w = weights[ ((mz*ksy + ky)*ksx + kx)*sz + bz ];
          a += mn.v * w.w;
        }
      }
      
      if (a > 0) bn.v = a, bn.d = 1; else bn.v = bn.d = 0;
      
      NeuronReal fn = f_values[ (by*l.sx + bx)*l.sz + bz ];
      NeuronReal d = fn - bn.v;
      bn.d *= d*trainRatio;
      qa += d*d;
    }
    Quality q(qa/(64*sz));
    
    if (trainRatio <= 0) return q;
    
    barrier.wait();
    
    // stage 3: backpass deltas
    
    for(int mz = tid; mz < msz; mz += threads)
    for(int my = 1; my < 4; ++my)
    for(int mx = 1; mx < 4; ++mx) {
      AccumReal a = 0;
      Neuron &mn = m_neurons[ (my*msx + mx)*msz + mz ];
      
      for(int ky = 0; ky < ksy; ++ky)
      for(int kx = 0; kx < ksx; ++kx)
      for(int kz = 0; kz < sz;  ++kz) {
        int bx = mx*2 + kx;
        int by = my*2 + ky;
        Neuron &bn = b_neurons[ (by*sx + bx)*sz + kz ];
        Weight &w = weights[ ((mz*ksy + ky)*ksx + kx)*sz + kz ];
        a += bn.d * w.w;
      }
      mn.d *= a;
    }
    
    barrier.wait();
    
    // stage 4: update weights

    for(int mz = tid; mz < msz; mz += threads)
    for(int by = 4; by <  8; ++by)
    for(int bx = 4; bx <  8; ++bx)
    for(int bz = 0; bz < sz; ++bz) {
      Neuron &bn = b_neurons[ (by*sx + bx)*sz + bz ];
      NeuronReal fv = f_values[ (by*l.sx + bx)*l.sz + bz ];
      
      for(int ky = by%2; ky < ksy; ky += 2)
      for(int kx = bx%2; kx < ksx; kx += 2) {
        int mx = (bx - kx)/2;
        int my = (by - ky)/2;
        assert(mx >= 1 && mx < 4 && (bx - kx)%2 == 0);
        assert(my >= 1 && my < 4 && (by - ky)%2 == 0);
        Neuron &mn = m_neurons[ (my*msx + mx)*msz + mz ];
        Weight &w = weights[ ((mz*ksy + ky)*ksx + kx)*sz + bz ];
        w.w += bn.d*mn.v + mn.d*fv;
      }
    }
    
    return q;
  }
  
  
  
  Quality testPass(int x, int y, int z, NeuronReal trainRatio) override {
    check(x, y, z);
    
    Layout l = layout;
    
    // stage 1: pass
    
    clear();
    
    for(int my = 0; my < msy; ++my)
    for(int mx = 0; mx < msx; ++mx)
    for(int mz = 0; mz < msz; ++mz) {
      AccumReal a = 0;
      for(int ky = 0; ky < KSY; ++ky)
      for(int kx = 0; kx < KSX; ++kx)
      for(int kz = 0; kz < sz;  ++kz) {
        int fx = x + mx*2 + kx;
        int fy = y + my*2 + ky;
        int fz = z + kz;
        NeuronReal fv = f_values[ (fy*l.sx + fx)*l.sz + fz ];
        Weight &w = weights[ ((mz*KSY + ky)*KSX + kx)*sz + kz ];
        a += fv * w.w;
      }
      
      NeuronReal &mv = m_values[ (my*msx + mx)*msz + mz ];
      if (a < 0) { mv = 0; continue; }
      mv = a;
      
      for(int ky = 0; ky < KSY; ++ky)
      for(int kx = 0; kx < KSX; ++kx)
      for(int kz = 0; kz < sz;  ++kz) {
        int bx = mx*2 + kx;
        int by = my*2 + ky;
        int bz = kz;
        NeuronReal &bv = b_values[ (by*sx + bx)*sz + bz ];
        Weight &w = weights[ ((mz*KSY + ky)*KSX + kx)*sz + kz ];
        bv += a * w.w;
      }
    }
    
    // stage 2: finalize values and verify
    
    AccumReal qa = 0;
    for(int by = 0; by < sy; ++by)
    for(int bx = 0; bx < sx; ++bx)
    for(int bz = 0; bz < sz; ++bz) {
        NeuronReal fn = f_values[ ((y + by)*l.sx + x + bx)*l.sz + z + bz ];
        NeuronReal &bv = b_values[ (by*sx + bx)*sz + bz ];
        if (bv > 0) {
          NeuronReal d = fn - bv;
          bv = d*trainRatio;
          qa += d*d;
        } else {
          bv = 0;
          qa += fn*fn;
        }
    }
    Quality q(qa/(KSX*KSY*sz));
    
    if (trainRatio <= 0) return q;
    
    // stage 3: backpass deltas and update weights
    
    for(int my = 0; my < msy; ++my)
    for(int mx = 0; mx < msx; ++mx)
    for(int mz = 0; mz < msz; ++mz) {
      NeuronReal mv = m_values[ (my*msx + mx)*msz + mz ];
      if (!mv) continue;
      
      AccumReal a = 0;
      for(int ky = 0; ky < KSY; ++ky)
      for(int kx = 0; kx < KSX; ++kx)
      for(int kz = 0; kz < sz;  ++kz) {
        int bx = mx*2 + kx;
        int by = my*2 + ky;
        int bz = kz;
        NeuronReal bv = b_values[ (by*sx + bx)*sz + bz ];
        Weight &w = weights[ ((mz*KSY + ky)*KSX + kx)*sz + kz ];
        a += bv * w.w;
      }

      for(int ky = 0; ky < KSY; ++ky)
      for(int kx = 0; kx < KSX; ++kx)
      for(int kz = 0; kz < sz;  ++kz) {
        int bx = mx*2 + kx;
        int by = my*2 + ky;
        int bz = kz;
        NeuronReal fv = f_values[ ((y + by)*l.sx + x + bx)*l.sz + z + bz ];
        NeuronReal bv = b_values[ (by*sx + bx)*sz + bz ];
        Weight &w = weights[ ((mz*KSY + ky)*KSX + kx)*sz + kz ];
        w.w += bv*mv + fv*a;
      }
    }
    
    return q;
  }

  
  bool saveDemo() override
    { return !filename || saveConvDemoImage(filename, msz, KSX, KSY, sz, weights); }
};




#endif