Blame simple/neural/nnlayer.inc.c

ec0d7e
#ifndef NNLAYER_INC_C
ec0d7e
#define NNLAYER_INC_C
ec0d7e
ec0d7e
ec0d7e
#include <math.h></math.h>
ec0d7e
#include <stdio.h></stdio.h>
ec0d7e
#include <stdlib.h></stdlib.h>
ec0d7e
#include <string.h></string.h>
ec0d7e
#include <assert.h></assert.h>
ec0d7e
ec0d7e
ec0d7e
ec0d7e
typedef struct NeuralLayer {
ec0d7e
  struct NeuralLayer *prev, *next;
ec0d7e
  int size;
ec0d7e
  double trainRatio;
56d550
  double *a, *b, *d, *da, *w;
ec0d7e
} NeuralLayer;
ec0d7e
ec0d7e
ec0d7e
ec0d7e
NeuralLayer* nlNew(NeuralLayer *prev, int size, double trainRatio) {
ec0d7e
  assert(size > 0);
ec0d7e
  while(prev && prev->next) prev = prev->next;
ec0d7e
  NeuralLayer *nl = calloc(sizeof(NeuralLayer), 1);
ec0d7e
  nl->size = size;
ec0d7e
  nl->prev = prev;
ec0d7e
  nl->trainRatio = trainRatio;
56d550
  nl->a = calloc(sizeof(*nl->a), size*4);
ec0d7e
  nl->d = nl->a + size;
ec0d7e
  nl->da = nl->d + size;
ec0d7e
  if (prev) {
ec0d7e
    assert(prev->size > 0);
56d550
    nl->b = calloc(sizeof(*nl->b), size*(1 + prev->size));
56d550
    nl->w = nl->b + size;
56d550
    for(double *p = nl->b, *e = p + size*(1 + prev->size); p < e; ++p)
ec0d7e
      *p = rand()/(double)RAND_MAX*2 - 1;
ec0d7e
    prev->next = nl;
ec0d7e
  }
ec0d7e
  return nl;
ec0d7e
}
ec0d7e
ec0d7e
ec0d7e
void nlFree(NeuralLayer *nl) {
ec0d7e
  if (nl->next) nlFree(nl->next);
ec0d7e
  if (nl->prev) nl->prev->next = NULL;
ec0d7e
  free(nl->a);
56d550
  if (nl->prev) free(nl->b);
ec0d7e
  free(nl);
ec0d7e
}
ec0d7e
ec0d7e
ec0d7e
NeuralLayer* nlFront(NeuralLayer *nl)
ec0d7e
  { while(nl->prev) nl = nl->prev; return nl; }
ec0d7e
NeuralLayer* nlBack(NeuralLayer *nl)
ec0d7e
  { while(nl->next) nl = nl->next; return nl; }
ec0d7e
ec0d7e
ec0d7e
int nlToStream(NeuralLayer *nl, FILE *f) {
ec0d7e
  if (nl->prev)
56d550
    if (!fwrite(nl->b, sizeof(double) * nl->size * (1 + nl->prev->size), 1, f))
ec0d7e
      return 0;
ec0d7e
  return nl->next ? nlToStream(nl->next, f) : 1;
ec0d7e
}
ec0d7e
ec0d7e
ec0d7e
int nlFromStream(NeuralLayer *nl, FILE *f) {
ec0d7e
  if (nl->prev)
56d550
    if (!fread(nl->b, sizeof(double) * nl->size * (1 + nl->prev->size), 1, f))
ec0d7e
      return 0;
ec0d7e
  return nl->next ? nlFromStream(nl->next, f) : 1;
ec0d7e
}
ec0d7e
ec0d7e
ec0d7e
int nlSave(NeuralLayer *nl, const char *filename) {
ec0d7e
  FILE *f = fopen(filename, "wb");
ec0d7e
  if (!f)
ec0d7e
    return printf("cannot open file '%s' for write\n", filename), 0;
ec0d7e
  if (!nlToStream(nl, f))
ec0d7e
    return printf("cannot write to file '%s'\n", filename), fclose(f), 0;
ec0d7e
  fclose(f);
ec0d7e
  return 1;
ec0d7e
}
ec0d7e
ec0d7e
ec0d7e
int nlLoad(NeuralLayer *nl, const char *filename) {
ec0d7e
  FILE *f = fopen(filename, "rb");
ec0d7e
  if (!f)
ec0d7e
    return printf("cannot open file '%s' for read\n", filename), 0;
ec0d7e
  if (!nlFromStream(nl, f))
ec0d7e
    return printf("cannot read from file '%s'\n", filename), fclose(f), 0;
ec0d7e
  fclose(f);
ec0d7e
  return 1;
ec0d7e
}
ec0d7e
ec0d7e
ec0d7e
NeuralLayer* nlPass(NeuralLayer *nl) {
ec0d7e
  NeuralLayer *nlp = nl->prev;
ec0d7e
  if (nlp) {
ec0d7e
    double *nlpa = nlp->a, *ee = nlpa + nlp->size;
ec0d7e
    double *w = nl->w;
56d550
    for(double *a = nl->a, *b = nl->b, *d = nl->d, *e = a + nl->size; a < e; ++a, ++b, ++d) {
56d550
      double s = *b;
ec0d7e
      for(double *pa = nlpa; pa < ee; ++pa, ++w)
ec0d7e
        s += *w * *pa;
ec0d7e
      double ex = exp(-s);
ec0d7e
      double ex1 = ex + 1;
ec0d7e
      double ex2 = 1/ex1;
ec0d7e
      *a = ex2;        // sigmoid
ec0d7e
      *d = ex*ex2*ex2; // sigmoid derivation
ec0d7e
    }
ec0d7e
  }
ec0d7e
  return nl->next ? nlPass(nl->next) : nl;
ec0d7e
}
ec0d7e
ec0d7e
ec0d7e
NeuralLayer* nlBackpass(NeuralLayer *nl) {
ec0d7e
  NeuralLayer *nlp = nl->prev;
ec0d7e
  if (nlp) {
ec0d7e
    double tr = nl->trainRatio;
ec0d7e
    double *nlpa = nlp->a, *nlpda = nlp->da, *ee = nlpa + nlp->size;
ec0d7e
    double *w = nl->w;
ec0d7e
    if (nlp->prev) {
ec0d7e
      memset(nlp->da, 0, sizeof(*nlp->da) * nlp->size);
56d550
      for(double *b = nl->b, *d = nl->d, *da = nl->da, *e = b + nl->size; b < e; ++b, ++d, ++da) {
ec0d7e
        double ds = *d * *da;
ec0d7e
        double dst = ds*tr;
56d550
        *b -= dst;
ec0d7e
        for(double *pa = nlpa, *pda = nlpda; pa < ee; ++pa, ++pda, ++w) {
ec0d7e
          *pda += ds * *w;
ec0d7e
          *w += dst * *pa;
ec0d7e
        }
ec0d7e
      }
ec0d7e
    } else {
56d550
      for(double *b = nl->b, *d = nl->d, *da = nl->da, *e = b + nl->size; b < e; ++b, ++d, ++da) {
ec0d7e
        double dst = *d * *da * tr;
56d550
        *b -= dst;
ec0d7e
        for(double *pa = nlpa; pa < ee; ++pa, ++w)
ec0d7e
          *w += dst * *pa;
ec0d7e
      }
ec0d7e
    }
ec0d7e
    return nlBackpass(nlp);
ec0d7e
  }
ec0d7e
  return nl;
ec0d7e
}
ec0d7e
ec0d7e
ec0d7e
double nlTrainPass(NeuralLayer *nl, double *x, double *y, double qmin) {
ec0d7e
  assert(!nl->prev);
ec0d7e
  double *fa = nl->a;
ec0d7e
  nl->a = x;
ec0d7e
  NeuralLayer *nlb = nlPass(nl);
ec0d7e
ec0d7e
  double qmax = 0;
ec0d7e
  for(double *a = nlb->a, *da = nlb->da, *e = a + nlb->size; a < e; ++a, ++da, ++y) {
ec0d7e
    double d = *y - *a;
ec0d7e
    *da = d;
ec0d7e
    double q = fabs(d);
ec0d7e
    if (qmax < q) qmax = q;
ec0d7e
  }
ec0d7e
  if (qmax > qmin) nlBackpass(nlb);
ec0d7e
ec0d7e
  nl->a = fa;
ec0d7e
  return qmax;
ec0d7e
}
ec0d7e
ec0d7e
ec0d7e
#endif