patterncMinor
Neural Networks in C
Viewed 0 times
neuralnetworksstackoverflow
Problem
This block of code is one of my first C header files I have made; it's ported from a Python program I made a few months ago for a project.
I was just looking for advice on how to increase the efficiency of the program. I tried to make everything as efficient as possible, but am I missing anything? Also, any suggestions to improve my programming in general are greatly appreciated!
```
/*
Neural networks library
By Ben Jones
1/13/2017
*/
#ifndef NEURAL_NETWORKS_INCLUDED
#define NEURAL_NETWORKS_INCLUDED
#include
#include
#include
#include
#include
typedef struct neuron_struct neuron;
struct neuron_struct {
float *weights;
float *dw;
float db;
float neuron_error;
unsigned int numinputs;
unsigned int numoutputs;
unsigned int inputindex;
float lastout;
float bias;
float error;
unsigned int *inpneurons;
unsigned int *outneurons;
float *inputs;
};
neuron create_neuron(float weights, unsigned int numweights){
static neuron output;
output.dw = malloc(sizeof(float)*numweights);
output.weights = malloc(sizeof(float)*numweights);
output.inputs = malloc(sizeof(float)*numweights);
output.numinputs = numweights;
int i;
for(i = 0; i dw);
free(n->weights);
free(n->inputs);
}
float neuron_eval(neuron n, float inputs){
float sum = 0;
int i;
for(i = 0; i numinputs; i++){
sum += inputs[i]*n->weights[i];
n->inputs[i] = inputs[i];
}
sum += n->bias;
n->lastout = 1/(1+exp(-sum));
return n->lastout;
}
void neuron_teach(neuron *n, float u, float m){
int i;
for(i = 0; i numinputs; i++){
n->dw[i] = n->inputs[i]n->neuron_erroru + m*n->dw[i];
n->weights[i] -= n->dw[i];
}
n->db = n->neuron_erroru + mn->db;
n->bias -= n->db;
}
void neuron_random_weights(neuron *n, float lower, float upper){
float rand_flt;
int i;
for(i = 0; i numinputs; i++){
rand_flt = ((float) rand())/RAND_MAX;
ran
I was just looking for advice on how to increase the efficiency of the program. I tried to make everything as efficient as possible, but am I missing anything? Also, any suggestions to improve my programming in general are greatly appreciated!
```
/*
Neural networks library
By Ben Jones
1/13/2017
*/
#ifndef NEURAL_NETWORKS_INCLUDED
#define NEURAL_NETWORKS_INCLUDED
#include
#include
#include
#include
#include
typedef struct neuron_struct neuron;
struct neuron_struct {
float *weights;
float *dw;
float db;
float neuron_error;
unsigned int numinputs;
unsigned int numoutputs;
unsigned int inputindex;
float lastout;
float bias;
float error;
unsigned int *inpneurons;
unsigned int *outneurons;
float *inputs;
};
neuron create_neuron(float weights, unsigned int numweights){
static neuron output;
output.dw = malloc(sizeof(float)*numweights);
output.weights = malloc(sizeof(float)*numweights);
output.inputs = malloc(sizeof(float)*numweights);
output.numinputs = numweights;
int i;
for(i = 0; i dw);
free(n->weights);
free(n->inputs);
}
float neuron_eval(neuron n, float inputs){
float sum = 0;
int i;
for(i = 0; i numinputs; i++){
sum += inputs[i]*n->weights[i];
n->inputs[i] = inputs[i];
}
sum += n->bias;
n->lastout = 1/(1+exp(-sum));
return n->lastout;
}
void neuron_teach(neuron *n, float u, float m){
int i;
for(i = 0; i numinputs; i++){
n->dw[i] = n->inputs[i]n->neuron_erroru + m*n->dw[i];
n->weights[i] -= n->dw[i];
}
n->db = n->neuron_erroru + mn->db;
n->bias -= n->db;
}
void neuron_random_weights(neuron *n, float lower, float upper){
float rand_flt;
int i;
for(i = 0; i numinputs; i++){
rand_flt = ((float) rand())/RAND_MAX;
ran
Solution
I must say your code is really clean and easy to read. The naming is very good and coherent, you should consider answering questions here.
Now, enough for the compliments, now my suggestions:
in
in
Performance-wise, maybe the optimizer could do it, but it's more readable, and cannot be slower:
in
in
Aside from that the code could benefit from some comments. There are currently none in your code.
Of course, use
Now, enough for the compliments, now my suggestions:
in
create_feed_forward you could use calloc which zeroes the memory way faster that you could do with a loopfloat *weights = calloc(output.maxlayersize,sizeof(float));in
feed_forward_teach you'd make core more readable by declaring a pointer on ff->layers[i] and use that in the inner loop.Performance-wise, maybe the optimizer could do it, but it's more readable, and cannot be slower:
for(i = ff->numlayers-1; i >= 0; i--){
layer *lay_i = ff->layers+i;
if(i == ff->numlayers-1){
for(j = 0; j layers[i].numneurons; j++){
lay_i->neurons[j].neuron_error = ff->errors[j]*(lay_i->neurons[j].lastout)*(1-lay_i->neurons[j].lastout);
}
} else {
for(j = 0; j layers[i].numneurons; j++){
lay_i->neurons[j].neuron_error = ff->layers[i+1].errors[j]*(lay_i->neurons[j].lastout)*(1-lay_i->neurons[j].lastout);
}
}in
create_network you could initialize output.neuron[i] in one line instead of two:output.neurons[i]=(neuron){.lastout = 0, .outneurons = malloc(sizeof(unsigned int)*numoutputs[i]), .numinputs = 0, .inputindex = 0, .numoutputs = numoutputs[i]};in
network_step, you allocate the block for new_nextupdates, same remark as create_feed_forward: use calloc, and remove the static qualifier, which is useless.bool *new_nextupdates = calloc(n->numneurons,sizeof(bool));Aside from that the code could benefit from some comments. There are currently none in your code.
Of course, use
-O2 or -O3 optimization flags when compiling, and consider running your code through a profiler to identify the parts to hammer on in priority.Code Snippets
float *weights = calloc(output.maxlayersize,sizeof(float));for(i = ff->numlayers-1; i >= 0; i--){
layer *lay_i = ff->layers+i;
if(i == ff->numlayers-1){
for(j = 0; j < ff->layers[i].numneurons; j++){
lay_i->neurons[j].neuron_error = ff->errors[j]*(lay_i->neurons[j].lastout)*(1-lay_i->neurons[j].lastout);
}
} else {
for(j = 0; j < ff->layers[i].numneurons; j++){
lay_i->neurons[j].neuron_error = ff->layers[i+1].errors[j]*(lay_i->neurons[j].lastout)*(1-lay_i->neurons[j].lastout);
}
}output.neurons[i]=(neuron){.lastout = 0, .outneurons = malloc(sizeof(unsigned int)*numoutputs[i]), .numinputs = 0, .inputindex = 0, .numoutputs = numoutputs[i]};bool *new_nextupdates = calloc(n->numneurons,sizeof(bool));Context
StackExchange Code Review Q#152611, answer score: 2
Revisions (0)
No revisions yet.