#ifndef NNLAYER_INC_C
#define NNLAYER_INC_C
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
typedef struct NeuralLayer {
struct NeuralLayer *prev, *next;
int size;
double trainRatio;
double *a, *b, *d, *da, *w;
} NeuralLayer;
NeuralLayer* nlNew(NeuralLayer *prev, int size, double trainRatio) {
assert(size > 0);
while(prev && prev->next) prev = prev->next;
NeuralLayer *nl = calloc(sizeof(NeuralLayer), 1);
nl->size = size;
nl->prev = prev;
nl->trainRatio = trainRatio;
nl->a = calloc(sizeof(*nl->a), size*4);
nl->d = nl->a + size;
nl->da = nl->d + size;
if (prev) {
assert(prev->size > 0);
nl->b = calloc(sizeof(*nl->b), size*(1 + prev->size));
nl->w = nl->b + size;
for(double *p = nl->b, *e = p + size*(1 + prev->size); p < e; ++p)
*p = rand()/(double)RAND_MAX*2 - 1;
prev->next = nl;
}
return nl;
}
void nlFree(NeuralLayer *nl) {
if (nl->next) nlFree(nl->next);
if (nl->prev) nl->prev->next = NULL;
free(nl->a);
if (nl->prev) free(nl->b);
free(nl);
}
NeuralLayer* nlFront(NeuralLayer *nl)
{ while(nl->prev) nl = nl->prev; return nl; }
NeuralLayer* nlBack(NeuralLayer *nl)
{ while(nl->next) nl = nl->next; return nl; }
int nlToStream(NeuralLayer *nl, FILE *f) {
if (nl->prev)
if (!fwrite(nl->b, sizeof(double) * nl->size * (1 + nl->prev->size), 1, f))
return 0;
return nl->next ? nlToStream(nl->next, f) : 1;
}
int nlFromStream(NeuralLayer *nl, FILE *f) {
if (nl->prev)
if (!fread(nl->b, sizeof(double) * nl->size * (1 + nl->prev->size), 1, f))
return 0;
return nl->next ? nlFromStream(nl->next, f) : 1;
}
int nlSave(NeuralLayer *nl, const char *filename) {
FILE *f = fopen(filename, "wb");
if (!f)
return printf("cannot open file '%s' for write\n", filename), 0;
if (!nlToStream(nl, f))
return printf("cannot write to file '%s'\n", filename), fclose(f), 0;
fclose(f);
return 1;
}
int nlLoad(NeuralLayer *nl, const char *filename) {
FILE *f = fopen(filename, "rb");
if (!f)
return printf("cannot open file '%s' for read\n", filename), 0;
if (!nlFromStream(nl, f))
return printf("cannot read from file '%s'\n", filename), fclose(f), 0;
fclose(f);
return 1;
}
NeuralLayer* nlPass(NeuralLayer *nl) {
NeuralLayer *nlp = nl->prev;
if (nlp) {
double *nlpa = nlp->a, *ee = nlpa + nlp->size;
double *w = nl->w;
for(double *a = nl->a, *b = nl->b, *d = nl->d, *e = a + nl->size; a < e; ++a, ++b, ++d) {
double s = *b;
for(double *pa = nlpa; pa < ee; ++pa, ++w)
s += *w * *pa;
double ex = exp(-s);
double ex1 = ex + 1;
double ex2 = 1/ex1;
*a = ex2; // sigmoid
*d = ex*ex2*ex2; // sigmoid derivation
}
}
return nl->next ? nlPass(nl->next) : nl;
}
NeuralLayer* nlBackpass(NeuralLayer *nl) {
NeuralLayer *nlp = nl->prev;
if (nlp) {
double tr = nl->trainRatio;
double *nlpa = nlp->a, *nlpda = nlp->da, *ee = nlpa + nlp->size;
double *w = nl->w;
if (nlp->prev) {
memset(nlp->da, 0, sizeof(*nlp->da) * nlp->size);
for(double *b = nl->b, *d = nl->d, *da = nl->da, *e = b + nl->size; b < e; ++b, ++d, ++da) {
double ds = *d * *da;
double dst = ds*tr;
*b -= dst;
for(double *pa = nlpa, *pda = nlpda; pa < ee; ++pa, ++pda, ++w) {
*pda += ds * *w;
*w += dst * *pa;
}
}
} else {
for(double *b = nl->b, *d = nl->d, *da = nl->da, *e = b + nl->size; b < e; ++b, ++d, ++da) {
double dst = *d * *da * tr;
*b -= dst;
for(double *pa = nlpa; pa < ee; ++pa, ++w)
*w += dst * *pa;
}
}
return nlBackpass(nlp);
}
return nl;
}
double nlTrainPass(NeuralLayer *nl, double *x, double *y, double qmin) {
assert(!nl->prev);
double *fa = nl->a;
nl->a = x;
NeuralLayer *nlb = nlPass(nl);
double qmax = 0;
for(double *a = nlb->a, *da = nlb->da, *e = a + nlb->size; a < e; ++a, ++da, ++y) {
double d = *y - *a;
*da = d;
double q = fabs(d);
if (qmax < q) qmax = q;
}
if (qmax > qmin) nlBackpass(nlb);
nl->a = fa;
return qmax;
}
#endif