[libfann] 189/242: *** empty log message ***
Christian Kastner
chrisk-guest at moszumanska.debian.org
Sat Oct 4 21:10:42 UTC 2014
This is an automated email from the git hooks/post-receive script.
chrisk-guest pushed a commit to tag Version2_0_0
in repository libfann.
commit e82854f1eabee87fd44bd81552e272d0c2cf8ce0
Author: Steffen Nissen <lukesky at diku.dk>
Date: Mon Aug 1 20:47:08 2005 +0000
*** empty log message ***
---
benchmarks/benchmark.sh | 20 +-
benchmarks/quality.cc | 20 +-
examples/cascade_train.c | 107 +++---
fann.prj | 5 +-
src/fann.c | 825 +++++++++++++++++++++++++++--------------------
src/fann_cascade.c | 35 +-
src/fann_train.c | 63 ----
src/fann_train_data.c | 535 ++++++++++++++++++------------
src/include/fann_data.h | 13 -
9 files changed, 914 insertions(+), 709 deletions(-)
diff --git a/benchmarks/benchmark.sh b/benchmarks/benchmark.sh
index d3d6162..0a57a58 100755
--- a/benchmarks/benchmark.sh
+++ b/benchmarks/benchmark.sh
@@ -15,17 +15,19 @@ function benchmark_problem() {
algo="fann_cascade"; benchmark_algorithm;
algo="fann_rprop"; benchmark_algorithm;
#./quality_fixed $prob.$algo.train.out_fixed_train $prob.$algo.train.out_fixed_test $prob.$algo.fixed_train.out $prob.$algo.fixed_test.out *_fixed.net
- algo="fann_rprop_stepwise"; benchmark_algorithm;
+
+
+# algo="fann_rprop_stepwise"; benchmark_algorithm;
algo="fann_quickprop"; benchmark_algorithm;
#algo="fann_quickprop_stepwise"; benchmark_algorithm;
- algo="fann_batch"; benchmark_algorithm;
+# algo="fann_batch"; benchmark_algorithm;
#algo="fann_batch_stepwise"; benchmark_algorithm;
algo="fann_incremental"; benchmark_algorithm;
#algo="fann_incremental_stepwise"; benchmark_algorithm;
#comment out two following lines if the libraries are not available
- algo="lwnn"; benchmark_algorithm;
- algo="jneural"; benchmark_algorithm;
+# algo="lwnn"; benchmark_algorithm;
+# algo="jneural"; benchmark_algorithm;
}
#comment out some of the lines below if some of the problems should not be benchmarked
@@ -67,10 +69,10 @@ benchmark_problem;
prob="two-spiral"; n1=20; n2=10; sec_train=$max_seconds_training;
benchmark_problem;
-./performance fann fann_performance.out 1 2048 2 20
-./performance fann_stepwise fann_stepwise_performance.out 1 2048 2 20
-./performance_fixed fann fann_fixed_performance.out 1 2048 2 20
-./performance lwnn lwnn_performance.out 1 2048 2 20
-./performance jneural jneural_performance.out 1 256 2 20
+# ./performance fann fann_performance.out 1 2048 2 20
+# ./performance fann_stepwise fann_stepwise_performance.out 1 2048 2 20
+# ./performance_fixed fann fann_fixed_performance.out 1 2048 2 20
+# ./performance lwnn lwnn_performance.out 1 2048 2 20
+# ./performance jneural jneural_performance.out 1 256 2 20
gnuplot < gnuplot
diff --git a/benchmarks/quality.cc b/benchmarks/quality.cc
index 0e433af..be5558d 100644
--- a/benchmarks/quality.cc
+++ b/benchmarks/quality.cc
@@ -254,10 +254,24 @@ void quality_benchmark_cascade(
struct fann *ann;
ann = fann_create_shortcut(0.7, 2, num_input, num_output);
- fann_set_activation_steepness_hidden(ann, 1);
- fann_set_activation_steepness_output(ann, 1);
+
+ fann_set_training_algorithm(ann, FANN_TRAIN_RPROP);
fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC);
- fann_set_activation_function_output(ann, FANN_SIGMOID);
+ fann_set_activation_function_output(ann, FANN_LINEAR);
+ fann_set_activation_steepness_hidden(ann, 0.5);
+ fann_set_activation_steepness_output(ann, 0.5);
+
+ fann_set_train_error_function(ann, FANN_ERRORFUNC_LINEAR);
+ fann_set_rprop_increase_factor(ann, 1.2);
+ fann_set_rprop_decrease_factor(ann, 0.5);
+ fann_set_rprop_delta_min(ann, 0.0);
+ fann_set_rprop_delta_max(ann, 50.0);
+
+ ann->cascade_change_fraction = 0.01;
+ ann->cascade_stagnation_epochs = 12;
+ ann->cascade_num_candidates = 16;
+ ann->cascade_weight_multiplier = 0.5;
+
calibrate_timer();
while(total_elapsed < (double)seconds_of_training){
diff --git a/examples/cascade_train.c b/examples/cascade_train.c
index 22b07d5..ce5bcf2 100644
--- a/examples/cascade_train.c
+++ b/examples/cascade_train.c
@@ -33,10 +33,10 @@ int print_callback(unsigned int epochs, float error)
int main()
{
- const float learning_rate = (const float)0.7;
+ const float learning_rate = (const float)1.7;
const float desired_error = (const float)0.00001;
- unsigned int max_out_epochs = 150;
- unsigned int max_cand_epochs = 150;
+ unsigned int max_out_epochs = 1500;
+ unsigned int max_cand_epochs = 1500;
unsigned int max_neurons = 40;
unsigned int neurons_between_reports = 1;
/*int i;
@@ -51,12 +51,6 @@ int main()
/*
*/
- train_data = fann_read_train_from_file("xor.data");
- test_data = fann_read_train_from_file("xor.data");
-
- train_data = fann_read_train_from_file("../benchmarks/datasets/two-spiral2.train");
- test_data = fann_read_train_from_file("../benchmarks/datasets/two-spiral2.test");
-
train_data = fann_read_train_from_file("../benchmarks/datasets/parity8.train");
test_data = fann_read_train_from_file("../benchmarks/datasets/parity8.test");
@@ -78,11 +72,20 @@ int main()
train_data = fann_read_train_from_file("../benchmarks/datasets/soybean.train");
test_data = fann_read_train_from_file("../benchmarks/datasets/soybean.test");
+ train_data = fann_read_train_from_file("../benchmarks/datasets/thyroid.train");
+ test_data = fann_read_train_from_file("../benchmarks/datasets/thyroid.test");
+
train_data = fann_read_train_from_file("../benchmarks/datasets/robot.train");
test_data = fann_read_train_from_file("../benchmarks/datasets/robot.test");
- train_data = fann_read_train_from_file("../benchmarks/datasets/thyroid.train");
- test_data = fann_read_train_from_file("../benchmarks/datasets/thyroid.test");
+ train_data = fann_read_train_from_file("xor.data");
+ test_data = fann_read_train_from_file("xor.data");
+
+ train_data = fann_read_train_from_file("../benchmarks/datasets/two-spiral2.train");
+ test_data = fann_read_train_from_file("../benchmarks/datasets/two-spiral2.test");
+
+ train_data = fann_read_train_from_file("../benchmarks/datasets/building.train");
+ test_data = fann_read_train_from_file("../benchmarks/datasets/building.test");
fann_scale_train_data(train_data, -1, 1);
fann_scale_train_data(test_data, -1, 1);
@@ -91,12 +94,56 @@ int main()
ann = fann_create_shortcut(learning_rate, 2, train_data->num_input, train_data->num_output);
+ fann_set_training_algorithm(ann, FANN_TRAIN_BATCH);
+ fann_set_training_algorithm(ann, FANN_TRAIN_QUICKPROP);
fann_set_training_algorithm(ann, FANN_TRAIN_RPROP);
+
fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC);
fann_set_activation_function_output(ann, FANN_LINEAR);
fann_set_activation_steepness_hidden(ann, 1);
fann_set_activation_steepness_output(ann, 1);
+
+ fann_set_train_error_function(ann, FANN_ERRORFUNC_LINEAR);
+
+ fann_set_rprop_increase_factor(ann, 1.2);
+ fann_set_rprop_decrease_factor(ann, 0.5);
+ fann_set_rprop_delta_min(ann, 0.0);
+ fann_set_rprop_delta_max(ann, 50.0);
+
+ ann->cascade_change_fraction = 0.001;
+ ann->cascade_stagnation_epochs = 120;
+ ann->cascade_num_candidates = 16;
+ ann->cascade_weight_multiplier = 0.7;
+
+ fann_print_parameters(ann);
+ /*fann_print_connections(ann);*/
+
+ printf("Training network.\n");
+
+ fann_cascadetrain_on_data_callback(ann, train_data, desired_error, print_callback, max_out_epochs, max_cand_epochs, max_neurons, neurons_between_reports);
+
+ /*fann_train_on_data(ann, train_data, 300, 1, desired_error);*/
+ /*printf("\nTrain error: %f, Test error: %f\n\n", fann_test_data(ann, train_data), fann_test_data(ann, test_data));*/
+
+ fann_print_connections(ann);
+ /*fann_print_parameters(ann);*/
+
+ printf("\nTrain error: %f, Test error: %f\n\n", fann_test_data(ann, train_data), fann_test_data(ann, test_data));
+
+ printf("Saving network.\n");
+
+ fann_save(ann, "xor_float.net");
+
+ printf("Cleaning up.\n");
+ fann_destroy_train(train_data);
+ fann_destroy_train(test_data);
+ fann_destroy(ann);
+
+ return 0;
+}
+
+
/*
for(i = 0; i < 6; i++){
printf("%.20e, ", ann->activation_values_hidden[i]);
@@ -140,41 +187,3 @@ int main()
exit(0);
*/
-
- fann_set_train_error_function(ann, FANN_ERRORFUNC_LINEAR);
- fann_set_rprop_increase_factor(ann, 1.2);
- fann_set_rprop_decrease_factor(ann, 0.5);
- fann_set_rprop_delta_min(ann, 0.0);
- fann_set_rprop_delta_max(ann, 50.0);
-
- ann->cascade_change_fraction = 0.01;
- ann->cascade_stagnation_epochs = 12;
- ann->cascade_num_candidates = 16;
- ann->cascade_weight_multiplier = 0.5;
-
- fann_print_parameters(ann);
- /*fann_print_connections(ann);*/
-
- printf("Training network.\n");
-
- fann_cascadetrain_on_data_callback(ann, train_data, desired_error, print_callback, max_out_epochs, max_cand_epochs, max_neurons, neurons_between_reports);
-
- /*fann_train_on_data(ann, train_data, 300, 1, desired_error);*/
- /*printf("\nTrain error: %f, Test error: %f\n\n", fann_test_data(ann, train_data), fann_test_data(ann, test_data));*/
-
- fann_print_connections(ann);
- /*fann_print_parameters(ann);*/
-
- printf("\nTrain error: %f, Test error: %f\n\n", fann_test_data(ann, train_data), fann_test_data(ann, test_data));
-
- printf("Saving network.\n");
-
- fann_save(ann, "xor_float.net");
-
- printf("Cleaning up.\n");
- fann_destroy_train(train_data);
- fann_destroy_train(test_data);
- fann_destroy(ann);
-
- return 0;
-}
diff --git a/fann.prj b/fann.prj
index bbeeb43..6039018 100644
--- a/fann.prj
+++ b/fann.prj
@@ -30,7 +30,7 @@ project.type=GENERIC
project.target.type=EXECUTABLE
project.version=1.3.0
project.author=Steffen Nissen
-project.source.target=examples/xor_test_fixed_debug
+project.source.target=examples/cascade_train_debug
project.has.gettext=0
project.gui.command=
project.programming.language=C
@@ -107,7 +107,8 @@ module.source.files=\
src/fann_train_data.c\
src/fixedfann.c\
src/floatfann.c\
- src/fann_cascade.c
+ src/fann_cascade.c\
+ benchmarks/benchmark.sh
module.pixmap.name=.
module.pixmap.type=
diff --git a/src/fann.c b/src/fann.c
index 71d5fba..26e6545 100644
--- a/src/fann.c
+++ b/src/fann.c
@@ -16,7 +16,7 @@
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-
+
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
@@ -30,23 +30,23 @@
/* create a neural network.
*/
-FANN_EXTERNAL struct fann * FANN_API fann_create(float connection_rate, float learning_rate,
- unsigned int num_layers, /* the number of layers, including the input and output layer */
-
-
- ...) /* the number of neurons in each of the layers, starting with the input layer and ending with the output layer */
+FANN_EXTERNAL struct fann *FANN_API fann_create(float connection_rate, float learning_rate, unsigned int num_layers, /* the number of layers, including the input and output layer */
+ ...) /* the number of neurons in each of the layers, starting with the input layer and ending with the output layer */
{
struct fann *ann;
va_list layer_sizes;
int i;
- unsigned int *layers = (unsigned int *)calloc(num_layers, sizeof(unsigned int));
- if(layers == NULL){
+ unsigned int *layers = (unsigned int *) calloc(num_layers, sizeof(unsigned int));
+
+ if(layers == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
return NULL;
}
va_start(layer_sizes, num_layers);
- for ( i=0 ; i<(int)num_layers ; i++ ) {
+ for(i = 0; i < (int) num_layers; i++)
+ {
layers[i] = va_arg(layer_sizes, unsigned int);
}
va_end(layer_sizes);
@@ -60,7 +60,8 @@ FANN_EXTERNAL struct fann * FANN_API fann_create(float connection_rate, float le
/* create a neural network.
*/
-FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, float learning_rate, unsigned int num_layers, unsigned int * layers)
+FANN_EXTERNAL struct fann *FANN_API fann_create_array(float connection_rate, float learning_rate,
+ unsigned int num_layers, unsigned int *layers)
{
struct fann_layer *layer_it, *last_layer, *prev_layer;
struct fann *ann;
@@ -70,23 +71,25 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, fl
unsigned int min_connections, max_connections, num_connections;
unsigned int connections_per_neuron, allocated_connections;
unsigned int random_number, found_connection;
-
+
#ifdef FIXEDFANN
unsigned int decimal_point;
unsigned int multiplier;
#endif
- if(connection_rate > 1){
+ if(connection_rate > 1)
+ {
connection_rate = 1;
}
-
+
/* seed random */
-#ifndef FANN_NO_SEED
+#ifndef FANN_NO_SEED
fann_seed_rand();
#endif
-
+
/* allocate the general structure */
ann = fann_allocate_structure(learning_rate, num_layers);
- if(ann == NULL){
+ if(ann == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
return NULL;
}
@@ -100,224 +103,249 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, fl
/* determine how many neurons there should be in each layer */
i = 0;
- for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
+ for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++)
+ {
/* we do not allocate room here, but we make sure that
- last_neuron - first_neuron is the number of neurons */
+ * last_neuron - first_neuron is the number of neurons */
layer_it->first_neuron = NULL;
- layer_it->last_neuron = layer_it->first_neuron + layers[i++] +1; /* +1 for bias */
+ layer_it->last_neuron = layer_it->first_neuron + layers[i++] + 1; /* +1 for bias */
ann->total_neurons += layer_it->last_neuron - layer_it->first_neuron;
}
-
- ann->num_output = (ann->last_layer-1)->last_neuron - (ann->last_layer-1)->first_neuron -1;
- ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron -1;
-
+
+ ann->num_output = (ann->last_layer - 1)->last_neuron - (ann->last_layer - 1)->first_neuron - 1;
+ ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1;
+
/* allocate room for the actual neurons */
fann_allocate_neurons(ann);
- if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM){
+ if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
+ {
fann_destroy(ann);
return NULL;
}
-
+
#ifdef DEBUG
- printf("creating network with learning rate %f and connection rate %f\n", learning_rate, connection_rate);
+ printf("creating network with learning rate %f and connection rate %f\n", learning_rate,
+ connection_rate);
printf("input\n");
- printf(" layer : %d neurons, 1 bias\n", ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1);
+ printf(" layer : %d neurons, 1 bias\n",
+ ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1);
#endif
-
+
num_neurons_in = ann->num_input;
- for(layer_it = ann->first_layer+1; layer_it != ann->last_layer; layer_it++){
+ for(layer_it = ann->first_layer + 1; layer_it != ann->last_layer; layer_it++)
+ {
num_neurons_out = layer_it->last_neuron - layer_it->first_neuron - 1;
/*�if all neurons in each layer should be connected to at least one neuron
- in the previous layer, and one neuron in the next layer.
- and the bias node should be connected to the all neurons in the next layer.
- Then this is the minimum amount of neurons */
+ * in the previous layer, and one neuron in the next layer.
+ * and the bias node should be connected to the all neurons in the next layer.
+ * Then this is the minimum amount of neurons */
min_connections = fann_max(num_neurons_in, num_neurons_out) + num_neurons_out;
- max_connections = num_neurons_in * num_neurons_out; /* not calculating bias */
+ max_connections = num_neurons_in * num_neurons_out; /* not calculating bias */
num_connections = fann_max(min_connections,
- (unsigned int)(0.5+(connection_rate * max_connections)) + num_neurons_out);
-
- connections_per_neuron = num_connections/num_neurons_out;
+ (unsigned int) (0.5 + (connection_rate * max_connections)) +
+ num_neurons_out);
+
+ connections_per_neuron = num_connections / num_neurons_out;
allocated_connections = 0;
/* Now split out the connections on the different neurons */
- for(i = 0; i != num_neurons_out; i++){
+ for(i = 0; i != num_neurons_out; i++)
+ {
layer_it->first_neuron[i].first_con = ann->total_connections + allocated_connections;
allocated_connections += connections_per_neuron;
layer_it->first_neuron[i].last_con = ann->total_connections + allocated_connections;
layer_it->first_neuron[i].activation_function = FANN_SIGMOID_STEPWISE;
#ifdef FIXEDFANN
- layer_it->first_neuron[i].activation_steepness = ann->multiplier/2;
+ layer_it->first_neuron[i].activation_steepness = ann->multiplier / 2;
#else
layer_it->first_neuron[i].activation_steepness = 0.5;
#endif
-
- if(allocated_connections < (num_connections*(i+1))/num_neurons_out){
+
+ if(allocated_connections < (num_connections * (i + 1)) / num_neurons_out)
+ {
layer_it->first_neuron[i].last_con++;
allocated_connections++;
}
}
-
+
/* bias neuron also gets stuff */
layer_it->first_neuron[i].first_con = ann->total_connections + allocated_connections;
layer_it->first_neuron[i].last_con = ann->total_connections + allocated_connections;
ann->total_connections += num_connections;
-
+
/* used in the next run of the loop */
num_neurons_in = num_neurons_out;
}
-
+
fann_allocate_connections(ann);
- if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM){
+ if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
+ {
fann_destroy(ann);
return NULL;
}
first_neuron = ann->first_layer->first_neuron;
-
- if(connection_rate >= 1){
- prev_layer_size = ann->num_input+1;
+
+ if(connection_rate >= 1)
+ {
+ prev_layer_size = ann->num_input + 1;
prev_layer = ann->first_layer;
last_layer = ann->last_layer;
- for(layer_it = ann->first_layer+1; layer_it != last_layer; layer_it++){
- last_neuron = layer_it->last_neuron-1;
- for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++){
- for(i = neuron_it->first_con; i != neuron_it->last_con; i++){
- ann->weights[i] = (fann_type)fann_random_weight();
+ for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
+ {
+ last_neuron = layer_it->last_neuron - 1;
+ for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
+ {
+ for(i = neuron_it->first_con; i != neuron_it->last_con; i++)
+ {
+ ann->weights[i] = (fann_type) fann_random_weight();
/* these connections are still initialized for fully connected networks, to allow
- operations to work, that are not optimized for fully connected networks.
- */
+ * operations to work, that are not optimized for fully connected networks.
+ */
ann->connections[i] = prev_layer->first_neuron + (i - neuron_it->first_con);
}
}
prev_layer_size = layer_it->last_neuron - layer_it->first_neuron;
prev_layer = layer_it;
#ifdef DEBUG
- printf(" layer : %d neurons, 1 bias\n", prev_layer_size-1);
+ printf(" layer : %d neurons, 1 bias\n", prev_layer_size - 1);
#endif
}
- } else {
+ }
+ else
+ {
/* make connections for a network, that are not fully connected */
-
+
/* generally, what we do is first to connect all the input
- neurons to a output neuron, respecting the number of
- available input neurons for each output neuron. Then
- we go through all the output neurons, and connect the
- rest of the connections to input neurons, that they are
- not allready connected to.
- */
-
+ * neurons to a output neuron, respecting the number of
+ * available input neurons for each output neuron. Then
+ * we go through all the output neurons, and connect the
+ * rest of the connections to input neurons, that they are
+ * not allready connected to.
+ */
+
/* All the connections are cleared by calloc, because we want to
- be able to see which connections are allready connected */
-
- for(layer_it = ann->first_layer+1;
- layer_it != ann->last_layer; layer_it++){
-
+ * be able to see which connections are allready connected */
+
+ for(layer_it = ann->first_layer + 1; layer_it != ann->last_layer; layer_it++)
+ {
+
num_neurons_out = layer_it->last_neuron - layer_it->first_neuron - 1;
- num_neurons_in = (layer_it-1)->last_neuron - (layer_it-1)->first_neuron - 1;
-
+ num_neurons_in = (layer_it - 1)->last_neuron - (layer_it - 1)->first_neuron - 1;
+
/* first connect the bias neuron */
- bias_neuron = (layer_it-1)->last_neuron-1;
- last_neuron = layer_it->last_neuron-1;
- for(neuron_it = layer_it->first_neuron;
- neuron_it != last_neuron; neuron_it++){
+ bias_neuron = (layer_it - 1)->last_neuron - 1;
+ last_neuron = layer_it->last_neuron - 1;
+ for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
+ {
ann->connections[neuron_it->first_con] = bias_neuron;
- ann->weights[neuron_it->first_con] = (fann_type)fann_random_weight();
+ ann->weights[neuron_it->first_con] = (fann_type) fann_random_weight();
}
-
+
/* then connect all neurons in the input layer */
- last_neuron = (layer_it-1)->last_neuron - 1;
- for(neuron_it = (layer_it-1)->first_neuron;
- neuron_it != last_neuron; neuron_it++){
-
+ last_neuron = (layer_it - 1)->last_neuron - 1;
+ for(neuron_it = (layer_it - 1)->first_neuron; neuron_it != last_neuron; neuron_it++)
+ {
+
/* random neuron in the output layer that has space
- for more connections */
- do {
- random_number = (int) (0.5+fann_rand(0, num_neurons_out-1));
+ * for more connections */
+ do
+ {
+ random_number = (int) (0.5 + fann_rand(0, num_neurons_out - 1));
random_neuron = layer_it->first_neuron + random_number;
/* checks the last space in the connections array for room */
- }while(ann->connections[random_neuron->last_con-1]);
-
+ }
+ while(ann->connections[random_neuron->last_con - 1]);
+
/* find an empty space in the connection array and connect */
- for(i = random_neuron->first_con; i < random_neuron->last_con; i++){
- if(ann->connections[i] == NULL){
+ for(i = random_neuron->first_con; i < random_neuron->last_con; i++)
+ {
+ if(ann->connections[i] == NULL)
+ {
ann->connections[i] = neuron_it;
- ann->weights[i] = (fann_type)fann_random_weight();
+ ann->weights[i] = (fann_type) fann_random_weight();
break;
}
}
}
-
+
/* then connect the rest of the unconnected neurons */
last_neuron = layer_it->last_neuron - 1;
- for(neuron_it = layer_it->first_neuron;
- neuron_it != last_neuron; neuron_it++){
+ for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
+ {
/* find empty space in the connection array and connect */
- for(i = neuron_it->first_con; i < neuron_it->last_con; i++){
+ for(i = neuron_it->first_con; i < neuron_it->last_con; i++)
+ {
/* continue if allready connected */
- if(ann->connections[i] != NULL) continue;
-
- do {
+ if(ann->connections[i] != NULL)
+ continue;
+
+ do
+ {
found_connection = 0;
- random_number = (int) (0.5+fann_rand(0, num_neurons_in-1));
- random_neuron = (layer_it-1)->first_neuron + random_number;
-
+ random_number = (int) (0.5 + fann_rand(0, num_neurons_in - 1));
+ random_neuron = (layer_it - 1)->first_neuron + random_number;
+
/* check to see if this connection is allready there */
- for(j = neuron_it->first_con; j < i; j++){
- if(random_neuron == ann->connections[j]){
+ for(j = neuron_it->first_con; j < i; j++)
+ {
+ if(random_neuron == ann->connections[j])
+ {
found_connection = 1;
break;
}
}
-
- }while(found_connection);
-
+
+ }
+ while(found_connection);
+
/* we have found a neuron that is not allready
- connected to us, connect it */
+ * connected to us, connect it */
ann->connections[i] = random_neuron;
- ann->weights[i] = (fann_type)fann_random_weight();
+ ann->weights[i] = (fann_type) fann_random_weight();
}
}
-
+
#ifdef DEBUG
printf(" layer : %d neurons, 1 bias\n", num_neurons_out);
#endif
}
-
+
/* TODO it would be nice to have the randomly created
- connections sorted for smoother memory access.
- */
+ * connections sorted for smoother memory access.
+ */
}
-
+
#ifdef DEBUG
printf("output\n");
#endif
-
+
return ann;
}
-
+
/* create a neural network with shortcut connections.
*/
-FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut(float learning_rate,
- unsigned int num_layers, /* the number of layers, including the input and output layer */
-
-
- ...) /* the number of neurons in each of the layers, starting with the input layer and ending with the output layer */
+FANN_EXTERNAL struct fann *FANN_API fann_create_shortcut(float learning_rate, unsigned int num_layers, /* the number of layers, including the input and output layer */
+ ...) /* the number of neurons in each of the layers, starting with the input layer and ending with the output layer */
{
struct fann *ann;
int i;
va_list layer_sizes;
- unsigned int *layers = (unsigned int *)calloc(num_layers, sizeof(unsigned int));
- if(layers == NULL){
+ unsigned int *layers = (unsigned int *) calloc(num_layers, sizeof(unsigned int));
+
+ if(layers == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
return NULL;
}
-
+
va_start(layer_sizes, num_layers);
- for ( i=0 ; i<(int)num_layers ; i++ ) {
+ for(i = 0; i < (int) num_layers; i++)
+ {
layers[i] = va_arg(layer_sizes, unsigned int);
}
va_end(layer_sizes);
@@ -331,26 +359,29 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut(float learning_rate,
/* create a neural network with shortcut connections.
*/
-FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut_array(float learning_rate, unsigned int num_layers, unsigned int * layers)
+FANN_EXTERNAL struct fann *FANN_API fann_create_shortcut_array(float learning_rate,
+ unsigned int num_layers,
+ unsigned int *layers)
{
struct fann_layer *layer_it, *layer_it2, *last_layer;
struct fann *ann;
struct fann_neuron *neuron_it, *neuron_it2 = 0;
unsigned int i;
unsigned int num_neurons_in, num_neurons_out;
-
+
#ifdef FIXEDFANN
unsigned int decimal_point;
unsigned int multiplier;
#endif
/* seed random */
-#ifndef FANN_NO_SEED
+#ifndef FANN_NO_SEED
fann_seed_rand();
#endif
-
+
/* allocate the general structure */
ann = fann_allocate_structure(learning_rate, num_layers);
- if(ann == NULL){
+ if(ann == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
return NULL;
}
@@ -365,78 +396,90 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut_array(float learning_r
/* determine how many neurons there should be in each layer */
i = 0;
- for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
+ for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++)
+ {
/* we do not allocate room here, but we make sure that
- last_neuron - first_neuron is the number of neurons */
+ * last_neuron - first_neuron is the number of neurons */
layer_it->first_neuron = NULL;
layer_it->last_neuron = layer_it->first_neuron + layers[i++];
- if(layer_it == ann->first_layer){
+ if(layer_it == ann->first_layer)
+ {
/* there is a bias neuron in the first layer */
layer_it->last_neuron++;
}
-
+
ann->total_neurons += layer_it->last_neuron - layer_it->first_neuron;
}
-
- ann->num_output = (ann->last_layer-1)->last_neuron - (ann->last_layer-1)->first_neuron;
- ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron -1;
-
+
+ ann->num_output = (ann->last_layer - 1)->last_neuron - (ann->last_layer - 1)->first_neuron;
+ ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1;
+
/* allocate room for the actual neurons */
fann_allocate_neurons(ann);
- if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM){
+ if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
+ {
fann_destroy(ann);
return NULL;
}
-
+
#ifdef DEBUG
printf("creating fully shortcut connected network with learning rate %f.\n", learning_rate);
printf("input\n");
- printf(" layer : %d neurons, 1 bias\n", ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1);
+ printf(" layer : %d neurons, 1 bias\n",
+ ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1);
#endif
-
+
num_neurons_in = ann->num_input;
last_layer = ann->last_layer;
- for(layer_it = ann->first_layer+1; layer_it != last_layer; layer_it++){
+ for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
+ {
num_neurons_out = layer_it->last_neuron - layer_it->first_neuron;
-
+
/* Now split out the connections on the different neurons */
- for(i = 0; i != num_neurons_out; i++){
+ for(i = 0; i != num_neurons_out; i++)
+ {
layer_it->first_neuron[i].first_con = ann->total_connections;
- ann->total_connections += num_neurons_in+1;
+ ann->total_connections += num_neurons_in + 1;
layer_it->first_neuron[i].last_con = ann->total_connections;
layer_it->first_neuron[i].activation_function = FANN_SIGMOID_STEPWISE;
#ifdef FIXEDFANN
- layer_it->first_neuron[i].activation_steepness = ann->multiplier/2;
+ layer_it->first_neuron[i].activation_steepness = ann->multiplier / 2;
#else
layer_it->first_neuron[i].activation_steepness = 0.5;
#endif
}
-
+
#ifdef DEBUG
printf(" layer : %d neurons, 0 bias\n", num_neurons_out);
#endif
/* used in the next run of the loop */
num_neurons_in += num_neurons_out;
}
-
+
fann_allocate_connections(ann);
- if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM){
+ if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM)
+ {
fann_destroy(ann);
return NULL;
}
/* Connections are created from all neurons to all neurons in later layers
*/
- num_neurons_in = ann->num_input+1;
- for(layer_it = ann->first_layer+1; layer_it != last_layer; layer_it++){
- for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++){
+ num_neurons_in = ann->num_input + 1;
+ for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
+ {
+ for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++)
+ {
i = neuron_it->first_con;
- for(layer_it2 = ann->first_layer; layer_it2 != layer_it; layer_it2++){
- for(neuron_it2 = layer_it2->first_neuron; neuron_it2 != layer_it2->last_neuron; neuron_it2++){
-
- ann->weights[i] = (fann_type)fann_random_weight();
+ for(layer_it2 = ann->first_layer; layer_it2 != layer_it; layer_it2++)
+ {
+ for(neuron_it2 = layer_it2->first_neuron; neuron_it2 != layer_it2->last_neuron;
+ neuron_it2++)
+ {
+
+ ann->weights[i] = (fann_type) fann_random_weight();
ann->connections[i] = neuron_it2;
i++;
}
@@ -448,13 +491,13 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut_array(float learning_r
#ifdef DEBUG
printf("output\n");
#endif
-
+
return ann;
}
/* runs the network.
*/
-FANN_EXTERNAL fann_type * FANN_API fann_run(struct fann *ann, fann_type *input)
+FANN_EXTERNAL fann_type *FANN_API fann_run(struct fann * ann, fann_type * input)
{
struct fann_neuron *neuron_it, *last_neuron, *neurons, **neuron_pointers;
unsigned int i, num_connections, num_input, num_output;
@@ -463,14 +506,14 @@ FANN_EXTERNAL fann_type * FANN_API fann_run(struct fann *ann, fann_type *input)
struct fann_layer *layer_it, *last_layer;
unsigned int activation_function;
fann_type steepness;
-
+
/* store some variabels local for fast access */
struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
#ifdef FIXEDFANN
int multiplier = ann->multiplier;
unsigned int decimal_point = ann->decimal_point;
-
+
/* values used for the stepwise linear sigmoid function */
fann_type r1 = 0, r2 = 0, r3 = 0, r4 = 0, r5 = 0, r6 = 0;
fann_type v1 = 0, v2 = 0, v3 = 0, v4 = 0, v5 = 0, v6 = 0;
@@ -478,31 +521,36 @@ FANN_EXTERNAL fann_type * FANN_API fann_run(struct fann *ann, fann_type *input)
fann_type last_steepness = 0;
unsigned int last_activation_function = 0;
#endif
-
+
/* first set the input */
num_input = ann->num_input;
- for(i = 0; i != num_input; i++){
+ for(i = 0; i != num_input; i++)
+ {
#ifdef FIXEDFANN
- if(fann_abs(input[i]) > multiplier){
- printf("Warning input number %d is out of range -%d - %d with value %d, integer overflow may occur.\n", i, multiplier, multiplier, input[i]);
+ if(fann_abs(input[i]) > multiplier)
+ {
+ printf
+ ("Warning input number %d is out of range -%d - %d with value %d, integer overflow may occur.\n",
+ i, multiplier, multiplier, input[i]);
}
#endif
first_neuron[i].value = input[i];
}
/* Set the bias neuron in the input layer */
#ifdef FIXEDFANN
- (ann->first_layer->last_neuron-1)->value = multiplier;
+ (ann->first_layer->last_neuron - 1)->value = multiplier;
#else
- (ann->first_layer->last_neuron-1)->value = 1;
+ (ann->first_layer->last_neuron - 1)->value = 1;
#endif
-
+
last_layer = ann->last_layer;
- for(layer_it = ann->first_layer+1; layer_it != last_layer; layer_it++)
+ for(layer_it = ann->first_layer + 1; layer_it != last_layer; layer_it++)
{
last_neuron = layer_it->last_neuron;
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
{
- if(neuron_it->first_con == neuron_it->last_con){
+ if(neuron_it->first_con == neuron_it->last_con)
+ {
/* bias neurons */
#ifdef FIXEDFANN
neuron_it->value = multiplier;
@@ -511,146 +559,159 @@ FANN_EXTERNAL fann_type * FANN_API fann_run(struct fann *ann, fann_type *input)
#endif
continue;
}
-
- activation_function = neuron_it->activation_function;
+
+ activation_function = neuron_it->activation_function;
steepness = neuron_it->activation_steepness;
/* TODO REMOVE BEGIN
- if(layer_it == last_layer-1)
- activation_function = FANN_SIGMOID_SYMMETRIC;
- else
- activation_function = FANN_SIGMOID;
-#ifdef FIXEDFANN
- steepness = multiplier/2.0;
-#else
- steepness = 0.5;
-#endif
- TODO REMOVE END */
-
+ * if(layer_it == last_layer-1)
+ * activation_function = FANN_SIGMOID_SYMMETRIC;
+ * else
+ * activation_function = FANN_SIGMOID;
+ * #ifdef FIXEDFANN
+ * steepness = multiplier/2.0;
+ * #else
+ * steepness = 0.5;
+ * #endif
+ * TODO REMOVE END */
+
neuron_sum = 0;
num_connections = neuron_it->last_con - neuron_it->first_con;
weights = ann->weights + neuron_it->first_con;
-
- if(ann->connection_rate >= 1){
- if(ann->shortcut_connections){
+
+ if(ann->connection_rate >= 1)
+ {
+ if(ann->shortcut_connections)
+ {
neurons = ann->first_layer->first_neuron;
- } else {
- neurons = (layer_it-1)->first_neuron;
+ }
+ else
+ {
+ neurons = (layer_it - 1)->first_neuron;
}
-
+
/* unrolled loop start */
- i = num_connections & 3; /* same as modulo 4 */
- switch(i) {
- case 3:
- neuron_sum += fann_mult(weights[2], neurons[2].value);
- case 2:
- neuron_sum += fann_mult(weights[1], neurons[1].value);
- case 1:
- neuron_sum += fann_mult(weights[0], neurons[0].value);
- case 0:
- break;
+ i = num_connections & 3; /* same as modulo 4 */
+ switch (i)
+ {
+ case 3:
+ neuron_sum += fann_mult(weights[2], neurons[2].value);
+ case 2:
+ neuron_sum += fann_mult(weights[1], neurons[1].value);
+ case 1:
+ neuron_sum += fann_mult(weights[0], neurons[0].value);
+ case 0:
+ break;
}
-
- for(;i != num_connections; i += 4){
+
+ for(; i != num_connections; i += 4)
+ {
neuron_sum +=
fann_mult(weights[i], neurons[i].value) +
- fann_mult(weights[i+1], neurons[i+1].value) +
- fann_mult(weights[i+2], neurons[i+2].value) +
- fann_mult(weights[i+3], neurons[i+3].value);
+ fann_mult(weights[i + 1], neurons[i + 1].value) +
+ fann_mult(weights[i + 2], neurons[i + 2].value) +
+ fann_mult(weights[i + 3], neurons[i + 3].value);
}
/* unrolled loop end */
/*
- for(i = 0;i != num_connections; i++){
- printf("%f += %f*%f, ", neuron_sum, weights[i], neurons[i].value);
- neuron_sum += fann_mult(weights[i], neurons[i].value);
- }
- */
- } else {
+ * for(i = 0;i != num_connections; i++){
+ * printf("%f += %f*%f, ", neuron_sum, weights[i], neurons[i].value);
+ * neuron_sum += fann_mult(weights[i], neurons[i].value);
+ * }
+ */
+ }
+ else
+ {
neuron_pointers = ann->connections + neuron_it->first_con;
-
- i = num_connections & 3; /* same as modulo 4 */
- switch(i) {
- case 3:
- neuron_sum += fann_mult(weights[2], neuron_pointers[2]->value);
- case 2:
- neuron_sum += fann_mult(weights[1], neuron_pointers[1]->value);
- case 1:
- neuron_sum += fann_mult(weights[0], neuron_pointers[0]->value);
- case 0:
- break;
+
+ i = num_connections & 3; /* same as modulo 4 */
+ switch (i)
+ {
+ case 3:
+ neuron_sum += fann_mult(weights[2], neuron_pointers[2]->value);
+ case 2:
+ neuron_sum += fann_mult(weights[1], neuron_pointers[1]->value);
+ case 1:
+ neuron_sum += fann_mult(weights[0], neuron_pointers[0]->value);
+ case 0:
+ break;
}
-
- for(;i != num_connections; i += 4){
+
+ for(; i != num_connections; i += 4)
+ {
neuron_sum +=
fann_mult(weights[i], neuron_pointers[i]->value) +
- fann_mult(weights[i+1], neuron_pointers[i+1]->value) +
- fann_mult(weights[i+2], neuron_pointers[i+2]->value) +
- fann_mult(weights[i+3], neuron_pointers[i+3]->value);
+ fann_mult(weights[i + 1], neuron_pointers[i + 1]->value) +
+ fann_mult(weights[i + 2], neuron_pointers[i + 2]->value) +
+ fann_mult(weights[i + 3], neuron_pointers[i + 3]->value);
}
}
-
+
#ifdef FIXEDFANN
neuron_it->sum = fann_mult(steepness, neuron_sum);
- if(activation_function != last_activation_function ||
- steepness != last_steepness)
+ if(activation_function != last_activation_function || steepness != last_steepness)
{
- switch(activation_function)
+ switch (activation_function)
{
- case FANN_SIGMOID:
- case FANN_SIGMOID_STEPWISE:
- r1 = ann->sigmoid_results[0];
- r2 = ann->sigmoid_results[1];
- r3 = ann->sigmoid_results[2];
- r4 = ann->sigmoid_results[3];
- r5 = ann->sigmoid_results[4];
- r6 = ann->sigmoid_results[5];
- v1 = ann->sigmoid_values[0]/steepness;
- v2 = ann->sigmoid_values[1]/steepness;
- v3 = ann->sigmoid_values[2]/steepness;
- v4 = ann->sigmoid_values[3]/steepness;
- v5 = ann->sigmoid_values[4]/steepness;
- v6 = ann->sigmoid_values[5]/steepness;
- break;
- case FANN_SIGMOID_SYMMETRIC:
- case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- r1 = ann->sigmoid_symmetric_results[0];
- r2 = ann->sigmoid_symmetric_results[1];
- r3 = ann->sigmoid_symmetric_results[2];
- r4 = ann->sigmoid_symmetric_results[3];
- r5 = ann->sigmoid_symmetric_results[4];
- r6 = ann->sigmoid_symmetric_results[5];
- v1 = ann->sigmoid_symmetric_values[0]/steepness;
- v2 = ann->sigmoid_symmetric_values[1]/steepness;
- v3 = ann->sigmoid_symmetric_values[2]/steepness;
- v4 = ann->sigmoid_symmetric_values[3]/steepness;
- v5 = ann->sigmoid_symmetric_values[4]/steepness;
- v6 = ann->sigmoid_symmetric_values[5]/steepness;
- break;
- default:
- break;
- }
- }
-
- switch(activation_function)
- {
case FANN_SIGMOID:
case FANN_SIGMOID_STEPWISE:
- neuron_it->value = (fann_type)fann_stepwise(v1, v2, v3, v4, v5, v6, r1, r2, r3, r4, r5, r6, 0, multiplier, neuron_sum);
+ r1 = ann->sigmoid_results[0];
+ r2 = ann->sigmoid_results[1];
+ r3 = ann->sigmoid_results[2];
+ r4 = ann->sigmoid_results[3];
+ r5 = ann->sigmoid_results[4];
+ r6 = ann->sigmoid_results[5];
+ v1 = ann->sigmoid_values[0] / steepness;
+ v2 = ann->sigmoid_values[1] / steepness;
+ v3 = ann->sigmoid_values[2] / steepness;
+ v4 = ann->sigmoid_values[3] / steepness;
+ v5 = ann->sigmoid_values[4] / steepness;
+ v6 = ann->sigmoid_values[5] / steepness;
break;
case FANN_SIGMOID_SYMMETRIC:
case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- neuron_it->value = (fann_type)fann_stepwise(v1, v2, v3, v4, v5, v6, r1, r2, r3, r4, r5, r6, -multiplier, multiplier, neuron_sum);
- break;
- case FANN_THRESHOLD:
- neuron_it->value = (fann_type)((neuron_sum < 0) ? 0 : 1);
- break;
- case FANN_THRESHOLD_SYMMETRIC:
- neuron_it->value = (fann_type)((neuron_sum < 0) ? -1 : 1);
+ r1 = ann->sigmoid_symmetric_results[0];
+ r2 = ann->sigmoid_symmetric_results[1];
+ r3 = ann->sigmoid_symmetric_results[2];
+ r4 = ann->sigmoid_symmetric_results[3];
+ r5 = ann->sigmoid_symmetric_results[4];
+ r6 = ann->sigmoid_symmetric_results[5];
+ v1 = ann->sigmoid_symmetric_values[0] / steepness;
+ v2 = ann->sigmoid_symmetric_values[1] / steepness;
+ v3 = ann->sigmoid_symmetric_values[2] / steepness;
+ v4 = ann->sigmoid_symmetric_values[3] / steepness;
+ v5 = ann->sigmoid_symmetric_values[4] / steepness;
+ v6 = ann->sigmoid_symmetric_values[5] / steepness;
break;
default:
- fann_error((struct fann_error *)ann, FANN_E_CANT_USE_ACTIVATION);
+ break;
+ }
+ }
+
+ switch (activation_function)
+ {
+ case FANN_SIGMOID:
+ case FANN_SIGMOID_STEPWISE:
+ neuron_it->value =
+ (fann_type) fann_stepwise(v1, v2, v3, v4, v5, v6, r1, r2, r3, r4, r5, r6, 0,
+ multiplier, neuron_sum);
+ break;
+ case FANN_SIGMOID_SYMMETRIC:
+ case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+ neuron_it->value =
+ (fann_type) fann_stepwise(v1, v2, v3, v4, v5, v6, r1, r2, r3, r4, r5, r6,
+ -multiplier, multiplier, neuron_sum);
+ break;
+ case FANN_THRESHOLD:
+ neuron_it->value = (fann_type) ((neuron_sum < 0) ? 0 : 1);
+ break;
+ case FANN_THRESHOLD_SYMMETRIC:
+ neuron_it->value = (fann_type) ((neuron_sum < 0) ? -1 : 1);
+ break;
+ default:
+ fann_error((struct fann_error *) ann, FANN_E_CANT_USE_ACTIVATION);
}
last_steepness = steepness;
last_activation_function = activation_function;
@@ -662,12 +723,13 @@ FANN_EXTERNAL fann_type * FANN_API fann_run(struct fann *ann, fann_type *input)
#endif
}
}
-
+
/* set the output */
output = ann->output;
num_output = ann->num_output;
- neurons = (ann->last_layer-1)->first_neuron;
- for(i = 0; i != num_output; i++){
+ neurons = (ann->last_layer - 1)->first_neuron;
+ for(i = 0; i != num_output; i++)
+ {
output[i] = neurons[i].value;
}
return ann->output;
@@ -677,7 +739,8 @@ FANN_EXTERNAL fann_type * FANN_API fann_run(struct fann *ann, fann_type *input)
*/
FANN_EXTERNAL void FANN_API fann_destroy(struct fann *ann)
{
- if(ann == NULL) return;
+ if(ann == NULL)
+ return;
fann_safe_free(ann->weights);
fann_safe_free(ann->connections);
fann_safe_free(ann->first_layer->first_neuron);
@@ -691,17 +754,21 @@ FANN_EXTERNAL void FANN_API fann_destroy(struct fann *ann)
fann_safe_free(ann);
}
-FANN_EXTERNAL void FANN_API fann_randomize_weights(struct fann *ann, fann_type min_weight, fann_type max_weight)
+FANN_EXTERNAL void FANN_API fann_randomize_weights(struct fann *ann, fann_type min_weight,
+ fann_type max_weight)
{
fann_type *last_weight;
fann_type *weights = ann->weights;
+
last_weight = weights + ann->total_connections;
- for(;weights != last_weight; weights++){
- *weights = (fann_type)(fann_rand(min_weight, max_weight));
+ for(; weights != last_weight; weights++)
+ {
+ *weights = (fann_type) (fann_rand(min_weight, max_weight));
}
#ifndef FIXEDFANN
- if(ann->prev_train_slopes != NULL){
+ if(ann->prev_train_slopes != NULL)
+ {
fann_clear_train_arrays(ann);
}
#endif
@@ -715,45 +782,55 @@ FANN_EXTERNAL void FANN_API fann_print_connections(struct fann *ann)
int value;
char *neurons;
unsigned int num_neurons = fann_get_total_neurons(ann) - fann_get_num_output(ann);
- neurons = (char *)malloc(num_neurons+1);
- if(neurons == NULL){
+
+ neurons = (char *) malloc(num_neurons + 1);
+ if(neurons == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
return;
}
neurons[num_neurons] = 0;
printf("Layer / Neuron ");
- for(i = 0; i < num_neurons; i++){
- printf("%d", i%10);
+ for(i = 0; i < num_neurons; i++)
+ {
+ printf("%d", i % 10);
}
printf("\n");
-
- for(layer_it = ann->first_layer+1; layer_it != ann->last_layer; layer_it++){
- for(neuron_it = layer_it->first_neuron;
- neuron_it != layer_it->last_neuron; neuron_it++){
-
- memset(neurons, (int)'.', num_neurons);
- for(i = neuron_it->first_con; i < neuron_it->last_con; i++){
- if(ann->weights[i] < 0){
+
+ for(layer_it = ann->first_layer + 1; layer_it != ann->last_layer; layer_it++)
+ {
+ for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++)
+ {
+
+ memset(neurons, (int) '.', num_neurons);
+ for(i = neuron_it->first_con; i < neuron_it->last_con; i++)
+ {
+ if(ann->weights[i] < 0)
+ {
#ifdef FIXEDFANN
- value = (int)((ann->weights[i]/(double)ann->multiplier)-0.5);
+ value = (int) ((ann->weights[i] / (double) ann->multiplier) - 0.5);
#else
- value = (int)((ann->weights[i])-0.5);
+ value = (int) ((ann->weights[i]) - 0.5);
#endif
- if(value < -25) value = -25;
+ if(value < -25)
+ value = -25;
neurons[ann->connections[i] - ann->first_layer->first_neuron] = 'a' - value;
- }else{
+ }
+ else
+ {
#ifdef FIXEDFANN
- value = (int)((ann->weights[i]/(double)ann->multiplier)+0.5);
+ value = (int) ((ann->weights[i] / (double) ann->multiplier) + 0.5);
#else
- value = (int)((ann->weights[i])+0.5);
+ value = (int) ((ann->weights[i]) + 0.5);
#endif
- if(value > 25) value = 25;
+ if(value > 25)
+ value = 25;
neurons[ann->connections[i] - ann->first_layer->first_neuron] = 'A' + value;
}
}
printf("L %3d / N %4d %s\n", layer_it - ann->first_layer,
- neuron_it - ann->first_layer->first_neuron, neurons);
+ neuron_it - ann->first_layer->first_neuron, neurons);
}
}
@@ -768,48 +845,65 @@ FANN_EXTERNAL void FANN_API fann_init_weights(struct fann *ann, struct fann_trai
unsigned int dat = 0, elem, num_connect, num_hidden_neurons;
struct fann_layer *layer_it;
struct fann_neuron *neuron_it, *last_neuron, *bias_neuron;
+
#ifdef FIXEDFANN
unsigned int multiplier = ann->multiplier;
#endif
float scale_factor;
- for ( smallest_inp = largest_inp = train_data->input[0][0] ; dat < train_data->num_data ; dat++ ) {
- for ( elem = 0 ; elem < train_data->num_input ; elem++ ) {
- if ( train_data->input[dat][elem] < smallest_inp )
+ for(smallest_inp = largest_inp = train_data->input[0][0]; dat < train_data->num_data; dat++)
+ {
+ for(elem = 0; elem < train_data->num_input; elem++)
+ {
+ if(train_data->input[dat][elem] < smallest_inp)
smallest_inp = train_data->input[dat][elem];
- if ( train_data->input[dat][elem] > largest_inp )
+ if(train_data->input[dat][elem] > largest_inp)
largest_inp = train_data->input[dat][elem];
}
}
- num_hidden_neurons = ann->total_neurons - (ann->num_input + ann->num_output + (ann->last_layer - ann->first_layer));
- scale_factor = (float)(pow((double)(0.7f * (double)num_hidden_neurons),
- (double)(1.0f / (double)ann->num_input)) / (double)(largest_inp - smallest_inp));
+ num_hidden_neurons =
+ ann->total_neurons - (ann->num_input + ann->num_output +
+ (ann->last_layer - ann->first_layer));
+ scale_factor =
+ (float) (pow
+ ((double) (0.7f * (double) num_hidden_neurons),
+ (double) (1.0f / (double) ann->num_input)) / (double) (largest_inp -
+ smallest_inp));
#ifdef DEBUG
printf("Initializing weights with scale factor %f\n", scale_factor);
#endif
- bias_neuron = ann->first_layer->last_neuron-1;
- for ( layer_it = ann->first_layer+1; layer_it != ann->last_layer ; layer_it++) {
+ bias_neuron = ann->first_layer->last_neuron - 1;
+ for(layer_it = ann->first_layer + 1; layer_it != ann->last_layer; layer_it++)
+ {
last_neuron = layer_it->last_neuron;
- if(!ann->shortcut_connections){
- bias_neuron = (layer_it-1)->last_neuron-1;
+ if(!ann->shortcut_connections)
+ {
+ bias_neuron = (layer_it - 1)->last_neuron - 1;
}
- for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++) {
- for ( num_connect = neuron_it->first_con; num_connect < neuron_it->last_con ; num_connect++ ) {
- if ( bias_neuron == ann->connections[num_connect] ) {
+ for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++)
+ {
+ for(num_connect = neuron_it->first_con; num_connect < neuron_it->last_con;
+ num_connect++)
+ {
+ if(bias_neuron == ann->connections[num_connect])
+ {
#ifdef FIXEDFANN
- ann->weights[num_connect] = (fann_type)fann_rand(-scale_factor, scale_factor * multiplier);
+ ann->weights[num_connect] =
+ (fann_type) fann_rand(-scale_factor, scale_factor * multiplier);
#else
- ann->weights[num_connect] = (fann_type)fann_rand(-scale_factor, scale_factor);
+ ann->weights[num_connect] = (fann_type) fann_rand(-scale_factor, scale_factor);
#endif
- } else {
+ }
+ else
+ {
#ifdef FIXEDFANN
- ann->weights[num_connect] = (fann_type)fann_rand(0, scale_factor * multiplier);
+ ann->weights[num_connect] = (fann_type) fann_rand(0, scale_factor * multiplier);
#else
- ann->weights[num_connect] = (fann_type)fann_rand(0, scale_factor);
+ ann->weights[num_connect] = (fann_type) fann_rand(0, scale_factor);
#endif
}
}
@@ -817,7 +911,8 @@ FANN_EXTERNAL void FANN_API fann_init_weights(struct fann *ann, struct fann_trai
}
#ifndef FIXEDFANN
- if(ann->prev_train_slopes != NULL){
+ if(ann->prev_train_slopes != NULL)
+ {
fann_clear_train_arrays(ann);
}
#endif
@@ -826,11 +921,12 @@ FANN_EXTERNAL void FANN_API fann_init_weights(struct fann *ann, struct fann_trai
/* INTERNAL FUNCTION
Allocates the main structure and sets some default values.
*/
-struct fann * fann_allocate_structure(float learning_rate, unsigned int num_layers)
+struct fann *fann_allocate_structure(float learning_rate, unsigned int num_layers)
{
struct fann *ann;
-
- if(num_layers < 2){
+
+ if(num_layers < 2)
+ {
#ifdef DEBUG
printf("less than 2 layers - ABORTING.\n");
#endif
@@ -838,8 +934,9 @@ struct fann * fann_allocate_structure(float learning_rate, unsigned int num_laye
}
/* allocate and initialize the main network structure */
- ann = (struct fann *)malloc(sizeof(struct fann));
- if(ann == NULL){
+ ann = (struct fann *) malloc(sizeof(struct fann));
+ if(ann == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
return NULL;
}
@@ -872,33 +969,34 @@ struct fann * fann_allocate_structure(float learning_rate, unsigned int num_laye
ann->cascade_candidate_scores = NULL;
/* Variables for use with with Quickprop training (reasonable defaults) */
- ann->quickprop_decay = (float)-0.0001;
+ ann->quickprop_decay = (float) -0.0001;
ann->quickprop_mu = 1.75;
/* Variables for use with with RPROP training (reasonable defaults) */
- ann->rprop_increase_factor = (float)1.2;
+ ann->rprop_increase_factor = (float) 1.2;
ann->rprop_decrease_factor = 0.5;
ann->rprop_delta_min = 0.0;
ann->rprop_delta_max = 50.0;
ann->rprop_delta_zero = 0.5;
- fann_init_error_data((struct fann_error *)ann);
+ fann_init_error_data((struct fann_error *) ann);
#ifdef FIXEDFANN
/* these values are only boring defaults, and should really
- never be used, since the real values are always loaded from a file. */
+ * never be used, since the real values are always loaded from a file. */
ann->decimal_point = 8;
ann->multiplier = 256;
#endif
-
+
/* allocate room for the layers */
- ann->first_layer = (struct fann_layer *)calloc(num_layers, sizeof(struct fann_layer));
- if(ann->first_layer == NULL){
+ ann->first_layer = (struct fann_layer *) calloc(num_layers, sizeof(struct fann_layer));
+ if(ann->first_layer == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
free(ann);
return NULL;
}
-
+
ann->last_layer = ann->first_layer + num_layers;
return ann;
@@ -915,24 +1013,27 @@ void fann_allocate_neurons(struct fann *ann)
unsigned int num_neurons = 0;
/* all the neurons is allocated in one long array (calloc clears mem) */
- neurons = (struct fann_neuron *)calloc(ann->total_neurons, sizeof(struct fann_neuron));
+ neurons = (struct fann_neuron *) calloc(ann->total_neurons, sizeof(struct fann_neuron));
ann->total_neurons_allocated = ann->total_neurons;
-
- if(neurons == NULL){
- fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+
+ if(neurons == NULL)
+ {
+ fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
return;
}
-
- for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
+
+ for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++)
+ {
num_neurons = layer_it->last_neuron - layer_it->first_neuron;
- layer_it->first_neuron = neurons+num_neurons_so_far;
- layer_it->last_neuron = layer_it->first_neuron+num_neurons;
+ layer_it->first_neuron = neurons + num_neurons_so_far;
+ layer_it->last_neuron = layer_it->first_neuron + num_neurons;
num_neurons_so_far += num_neurons;
}
- ann->output = (fann_type *)calloc(num_neurons, sizeof(fann_type));
- if(ann->output == NULL){
- fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ ann->output = (fann_type *) calloc(num_neurons, sizeof(fann_type));
+ if(ann->output == NULL)
+ {
+ fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
return;
}
}
@@ -942,19 +1043,23 @@ void fann_allocate_neurons(struct fann *ann)
*/
void fann_allocate_connections(struct fann *ann)
{
- ann->weights = (fann_type *)calloc(ann->total_connections, sizeof(fann_type));
- if(ann->weights == NULL){
- fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ ann->weights = (fann_type *) calloc(ann->total_connections, sizeof(fann_type));
+ if(ann->weights == NULL)
+ {
+ fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
return;
}
ann->total_connections_allocated = ann->total_connections;
-
+
/* TODO make special cases for all places where the connections
- is used, so that it is not needed for fully connected networks.
- */
- ann->connections = (struct fann_neuron **) calloc(ann->total_connections_allocated, sizeof(struct fann_neuron*));
- if(ann->connections == NULL){
- fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ * is used, so that it is not needed for fully connected networks.
+ */
+ ann->connections =
+ (struct fann_neuron **) calloc(ann->total_connections_allocated,
+ sizeof(struct fann_neuron *));
+ if(ann->connections == NULL)
+ {
+ fann_error((struct fann_error *) ann, FANN_E_CANT_ALLOCATE_MEM);
return;
}
}
@@ -968,19 +1073,23 @@ void fann_seed_rand()
FILE *fp = fopen("/dev/urandom", "r");
unsigned int foo;
struct timeval t;
- if(!fp){
+
+ if(!fp)
+ {
gettimeofday(&t, NULL);
foo = t.tv_usec;
#ifdef DEBUG
printf("unable to open /dev/urandom\n");
#endif
- }else{
+ }
+ else
+ {
fread(&foo, sizeof(foo), 1, fp);
fclose(fp);
}
srand(foo);
#else
- /* COMPAT_TIME REPLACEMENT */
- srand(GetTickCount());
+ /* COMPAT_TIME REPLACEMENT */
+ srand(GetTickCount());
#endif
}
diff --git a/src/fann_cascade.c b/src/fann_cascade.c
index 10cdf03..fb76f13 100644
--- a/src/fann_cascade.c
+++ b/src/fann_cascade.c
@@ -283,13 +283,17 @@ float fann_train_outputs_epoch(struct fann *ann, struct fann_train_data *data)
fann_compute_MSE(ann, data->output[i]);
fann_update_slopes_batch(ann, ann->last_layer-1, ann->last_layer-1);
}
- /* TODO this should actually use the algorithm selected by
- ann->training_algorithm
- */
- if(ann->training_algorithm == FANN_TRAIN_RPROP){
- fann_update_weights_irpropm(ann, (ann->last_layer-1)->first_neuron->first_con, ann->total_connections);
- } else {
- fann_update_weights_quickprop(ann, data->num_data, (ann->last_layer-1)->first_neuron->first_con, ann->total_connections);
+
+ switch(ann->training_algorithm)
+ {
+ case FANN_TRAIN_RPROP:
+ fann_update_weights_irpropm(ann, (ann->last_layer-1)->first_neuron->first_con, ann->total_connections);
+ break;
+ case FANN_TRAIN_QUICKPROP:
+ fann_update_weights_quickprop(ann, data->num_data, (ann->last_layer-1)->first_neuron->first_con, ann->total_connections);
+ break;
+ default:
+ fann_error((struct fann_error *)ann, FANN_E_CANT_USE_ACTIVATION);
}
return fann_get_MSE(ann);
@@ -632,11 +636,16 @@ void fann_update_candidate_weights(struct fann *ann, unsigned int num_data)
struct fann_neuron *first_cand = (ann->last_layer-1)->last_neuron + 1; /* there is an empty neuron between the actual neurons and the candidate neuron */
struct fann_neuron *last_cand = first_cand + ann->cascade_num_candidates-1;
- /**/
- if(ann->training_algorithm == FANN_TRAIN_RPROP){
- fann_update_weights_irpropm(ann, first_cand->first_con, last_cand->last_con+ann->num_output);
- }else{
- fann_update_weights_quickprop(ann, num_data, first_cand->first_con, last_cand->last_con+ann->num_output);
+ switch(ann->training_algorithm)
+ {
+ case FANN_TRAIN_RPROP:
+ fann_update_weights_irpropm(ann, first_cand->first_con, last_cand->last_con+ann->num_output);
+ break;
+ case FANN_TRAIN_QUICKPROP:
+ fann_update_weights_quickprop(ann, num_data, first_cand->first_con, last_cand->last_con+ann->num_output);
+ break;
+ default:
+ fann_error((struct fann_error *)ann, FANN_E_CANT_USE_ACTIVATION);
}
}
@@ -886,6 +895,4 @@ void fann_install_candidate(struct fann *ann)
return;
}
-
-
#endif /* FIXEDFANN */
diff --git a/src/fann_train.c b/src/fann_train.c
index 993137c..86ea5d6 100644
--- a/src/fann_train.c
+++ b/src/fann_train.c
@@ -74,35 +74,6 @@ fann_type fann_activation(struct fann *ann, unsigned int activation_function, fa
value = fann_mult(steepness, value);
fann_activation_switch(ann, activation_function, value, value);
return value;
- /*
- switch(activation_function){
- case FANN_LINEAR:
- return value;
- case FANN_SIGMOID:
- return (fann_type)fann_sigmoid_real(value);
- case FANN_SIGMOID_SYMMETRIC:
- return (fann_type)fann_sigmoid_symmetric_real(value);
- case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- return (fann_type)fann_stepwise(-2.64665293693542480469e+00, -1.47221934795379638672e+00, -5.49306154251098632812e-01, 5.49306154251098632812e-01, 1.47221934795379638672e+00, 2.64665293693542480469e+00, -9.90000009536743164062e-01, -8.99999976158142089844e-01, -5.00000000000000000000e-01, 5.00000000000000000000e-01, 8.99999976158142089844e-01, 9.90000009536743164062e-01, -1, 1, value);
- case FANN_SIGMOID_STEPWISE:
- return (fann_type)fann_stepwise(-2.64665246009826660156e+00, -1.47221946716308593750e+00, -5.49306154251098632812e-01, 5.49306154251098632812e-01, 1.47221934795379638672e+00, 2.64665293693542480469e+00, 4.99999988824129104614e-03, 5.00000007450580596924e-02, 2.50000000000000000000e-01, 7.50000000000000000000e-01, 9.49999988079071044922e-01, 9.95000004768371582031e-01, 0, 1, value);
- case FANN_THRESHOLD:
- return (fann_type)((value < 0) ? 0 : 1);
- case FANN_THRESHOLD_SYMMETRIC:
- return (fann_type)((value < 0) ? -1 : 1);
- case FANN_GAUSSIAN:
- return (fann_type)fann_gaussian_real(value);
- case FANN_GAUSSIAN_SYMMETRIC:
- return (fann_type)fann_gaussian_symmetric_real(value);
- case FANN_ELLIOT:
- return (fann_type)fann_elliot_real(value);
- case FANN_ELLIOT_SYMMETRIC:
- return (fann_type)fann_elliot_symmetric_real(value);
- default:
- fann_error((struct fann_error *)ann, FANN_E_CANT_USE_ACTIVATION);
- return 0;
- }
- */
}
/* Trains the network with the backpropagation algorithm.
@@ -347,40 +318,6 @@ void fann_backpropagate_MSE(struct fann *ann)
/* *error_prev_layer *= fann_activation(ann, 0, neuron_value); */
*error_prev_layer *= fann_activation(ann, neuron_it->activation_function, neuron_it->activation_steepness, neuron_value);
}
-
- /*
- switch(ann->activation_function_hidden){
- case FANN_LINEAR:
- for(neuron_it = (layer_it-1)->first_neuron;
- neuron_it != last_neuron; neuron_it++){
- neuron_value = neuron_it->value;
- *error_prev_layer *= (fann_type)fann_linear_derive(activation_steepness_hidden, neuron_value);
- error_prev_layer++;
- }
- break;
- case FANN_SIGMOID:
- case FANN_SIGMOID_STEPWISE:
- for(neuron_it = (layer_it-1)->first_neuron;
- neuron_it != last_neuron; neuron_it++){
- neuron_value = neuron_it->value;
- *error_prev_layer *= (fann_type)fann_sigmoid_derive(activation_steepness_hidden, neuron_value);
- error_prev_layer++;
- }
- break;
- case FANN_SIGMOID_SYMMETRIC:
- case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- for(neuron_it = (layer_it-1)->first_neuron;
- neuron_it != last_neuron; neuron_it++){
- neuron_value = neuron_it->value;
- *error_prev_layer *= (fann_type)fann_sigmoid_symmetric_derive(activation_steepness_hidden, neuron_value);
- error_prev_layer++;
- }
- break;
- default:
- fann_error((struct fann_error *)ann, FANN_E_CANT_TRAIN_ACTIVATION);
- return;
- }
- */
}
}
diff --git a/src/fann_train_data.c b/src/fann_train_data.c
index e05f83f..6073985 100644
--- a/src/fann_train_data.c
+++ b/src/fann_train_data.c
@@ -1,21 +1,21 @@
/*
- Fast Artificial Neural Network Library (fann)
- Copyright (C) 2003 Steffen Nissen (lukesky at diku.dk)
-
- This library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- This library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with this library; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
+ * Fast Artificial Neural Network Library (fann) Copyright (C) 2003
+ * Steffen Nissen (lukesky at diku.dk)
+ *
+ * This library is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
#include <stdio.h>
#include <stdlib.h>
@@ -26,14 +26,16 @@
#include "fann.h"
#include "fann_errno.h"
-/* Reads training data from a file.
+/*
+ * Reads training data from a file.
*/
-FANN_EXTERNAL struct fann_train_data* FANN_API fann_read_train_from_file(char *configuration_file)
+FANN_EXTERNAL struct fann_train_data *FANN_API fann_read_train_from_file(char *configuration_file)
{
- struct fann_train_data* data;
+ struct fann_train_data *data;
FILE *file = fopen(configuration_file, "r");
- if(!file){
+ if(!file)
+ {
fann_error(NULL, FANN_E_CANT_OPEN_CONFIG_R, configuration_file);
return NULL;
}
@@ -43,26 +45,31 @@ FANN_EXTERNAL struct fann_train_data* FANN_API fann_read_train_from_file(char *c
return data;
}
-/* Save training data to a file
+/*
+ * Save training data to a file
*/
-FANN_EXTERNAL void FANN_API fann_save_train(struct fann_train_data* data, char *filename)
+FANN_EXTERNAL void FANN_API fann_save_train(struct fann_train_data *data, char *filename)
{
fann_save_train_internal(data, filename, 0, 0);
}
-/* Save training data to a file in fixed point algebra.
- (Good for testing a network in fixed point)
-*/
-FANN_EXTERNAL void FANN_API fann_save_train_to_fixed(struct fann_train_data* data, char *filename, unsigned int decimal_point)
+/*
+ * Save training data to a file in fixed point algebra. (Good for testing
+ * a network in fixed point)
+ */
+FANN_EXTERNAL void FANN_API fann_save_train_to_fixed(struct fann_train_data *data, char *filename,
+ unsigned int decimal_point)
{
fann_save_train_internal(data, filename, 1, decimal_point);
}
-/* deallocate the train data structure.
+/*
+ * deallocate the train data structure.
*/
FANN_EXTERNAL void FANN_API fann_destroy_train(struct fann_train_data *data)
{
- if(data == NULL) return;
+ if(data == NULL)
+ return;
fann_safe_free(data->input[0]);
fann_safe_free(data->output[0]);
fann_safe_free(data->input);
@@ -72,44 +79,52 @@ FANN_EXTERNAL void FANN_API fann_destroy_train(struct fann_train_data *data)
#ifndef FIXEDFANN
-/* Internal train function */
+/*
+ * Internal train function
+ */
float fann_train_epoch_quickprop(struct fann *ann, struct fann_train_data *data)
{
unsigned int i;
- if(ann->prev_train_slopes == NULL){
+ if(ann->prev_train_slopes == NULL)
+ {
fann_clear_train_arrays(ann);
}
-
+
fann_reset_MSE(ann);
-
- for(i = 0; i < data->num_data; i++){
+
+ for(i = 0; i < data->num_data; i++)
+ {
fann_run(ann, data->input[i]);
fann_compute_MSE(ann, data->output[i]);
fann_backpropagate_MSE(ann);
- fann_update_slopes_batch(ann, ann->first_layer+1, ann->last_layer-1);
+ fann_update_slopes_batch(ann, ann->first_layer + 1, ann->last_layer - 1);
}
fann_update_weights_quickprop(ann, data->num_data, 0, ann->total_connections);
return fann_get_MSE(ann);
}
-/* Internal train function */
+/*
+ * Internal train function
+ */
float fann_train_epoch_irpropm(struct fann *ann, struct fann_train_data *data)
{
unsigned int i;
- if(ann->prev_train_slopes == NULL){
+ if(ann->prev_train_slopes == NULL)
+ {
fann_clear_train_arrays(ann);
}
-
+
fann_reset_MSE(ann);
-
- for(i = 0; i < data->num_data; i++){
+
+ for(i = 0; i < data->num_data; i++)
+ {
fann_run(ann, data->input[i]);
fann_compute_MSE(ann, data->output[i]);
fann_backpropagate_MSE(ann);
- fann_update_slopes_batch(ann, ann->first_layer+1, ann->last_layer-1);
+ fann_update_slopes_batch(ann, ann->first_layer + 1, ann->last_layer - 1);
}
fann_update_weights_irpropm(ann, 0, ann->total_connections);
@@ -117,167 +132,223 @@ float fann_train_epoch_irpropm(struct fann *ann, struct fann_train_data *data)
return fann_get_MSE(ann);
}
-/* Internal train function */
+/*
+ * Internal train function
+ */
float fann_train_epoch_batch(struct fann *ann, struct fann_train_data *data)
{
unsigned int i;
+
fann_reset_MSE(ann);
-
- for(i = 0; i < data->num_data; i++){
+
+ for(i = 0; i < data->num_data; i++)
+ {
fann_run(ann, data->input[i]);
fann_compute_MSE(ann, data->output[i]);
fann_backpropagate_MSE(ann);
- fann_update_slopes_batch(ann, ann->first_layer+1, ann->last_layer-1);
+ fann_update_slopes_batch(ann, ann->first_layer + 1, ann->last_layer - 1);
}
-
+
fann_update_weights_batch(ann, data->num_data, 0, ann->total_connections);
return fann_get_MSE(ann);
}
-/* Internal train function */
+/*
+ * Internal train function
+ */
float fann_train_epoch_incremental(struct fann *ann, struct fann_train_data *data)
{
unsigned int i;
+
fann_reset_MSE(ann);
-
- for(i = 0; i != data->num_data; i++){
+
+ for(i = 0; i != data->num_data; i++)
+ {
fann_train(ann, data->input[i], data->output[i]);
}
return fann_get_MSE(ann);
}
-/* Train for one epoch with the selected training algorithm
+/*
+ * Train for one epoch with the selected training algorithm
*/
FANN_EXTERNAL float FANN_API fann_train_epoch(struct fann *ann, struct fann_train_data *data)
{
- switch(ann->training_algorithm){
- case FANN_TRAIN_QUICKPROP:
- return fann_train_epoch_quickprop(ann, data);
- break;
- case FANN_TRAIN_RPROP:
- return fann_train_epoch_irpropm(ann, data);
- break;
- case FANN_TRAIN_BATCH:
- return fann_train_epoch_batch(ann, data);
- break;
- case FANN_TRAIN_INCREMENTAL:
- return fann_train_epoch_incremental(ann, data);
- break;
- default:
- return 0.0;
+ switch (ann->training_algorithm)
+ {
+ case FANN_TRAIN_QUICKPROP:
+ return fann_train_epoch_quickprop(ann, data);
+ break;
+ case FANN_TRAIN_RPROP:
+ return fann_train_epoch_irpropm(ann, data);
+ break;
+ case FANN_TRAIN_BATCH:
+ return fann_train_epoch_batch(ann, data);
+ break;
+ case FANN_TRAIN_INCREMENTAL:
+ return fann_train_epoch_incremental(ann, data);
+ break;
+ default:
+ return 0.0;
}
}
-/* Test a set of training data and calculate the MSE
+/*
+ * Test a set of training data and calculate the MSE
*/
FANN_EXTERNAL float FANN_API fann_test_data(struct fann *ann, struct fann_train_data *data)
{
unsigned int i;
+
fann_reset_MSE(ann);
-
- for(i = 0; i != data->num_data; i++){
+
+ for(i = 0; i != data->num_data; i++)
+ {
fann_test(ann, data->input[i], data->output[i]);
}
return fann_get_MSE(ann);
}
-/* Train directly on the training data.
+/*
+ * Train directly on the training data.
*/
-FANN_EXTERNAL void FANN_API fann_train_on_data_callback(struct fann *ann, struct fann_train_data *data, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error, int (FANN_API *callback)(unsigned int epochs, float error))
+FANN_EXTERNAL void FANN_API fann_train_on_data_callback(struct fann *ann,
+ struct fann_train_data *data,
+ unsigned int max_epochs,
+ unsigned int epochs_between_reports,
+ float desired_error,
+ int (FANN_API *
+ callback) (unsigned int epochs,
+ float error))
{
float error;
unsigned int i;
#ifdef DEBUG
printf("Training with ");
- switch(ann->training_algorithm){
- case FANN_TRAIN_QUICKPROP:
- printf("FANN_TRAIN_QUICKPROP");
- break;
- case FANN_TRAIN_RPROP:
- printf("FANN_TRAIN_RPROP");
- break;
- case FANN_TRAIN_BATCH:
- printf("FANN_TRAIN_BATCH");
- break;
- case FANN_TRAIN_INCREMENTAL:
- printf("FANN_TRAIN_INCREMENTAL");
- break;
+ switch (ann->training_algorithm)
+ {
+ case FANN_TRAIN_QUICKPROP:
+ printf("FANN_TRAIN_QUICKPROP");
+ break;
+ case FANN_TRAIN_RPROP:
+ printf("FANN_TRAIN_RPROP");
+ break;
+ case FANN_TRAIN_BATCH:
+ printf("FANN_TRAIN_BATCH");
+ break;
+ case FANN_TRAIN_INCREMENTAL:
+ printf("FANN_TRAIN_INCREMENTAL");
+ break;
}
printf("\n");
-#endif
-
- if(epochs_between_reports && callback == NULL){
+#endif
+
+ if(epochs_between_reports && callback == NULL)
+ {
printf("Max epochs %8d. Desired error: %.10f.\n", max_epochs, desired_error);
}
- for(i = 1; i <= max_epochs; i++){
- /* train */
+ for(i = 1; i <= max_epochs; i++)
+ {
+ /*
+ * train
+ */
error = fann_train_epoch(ann, data);
-
- /* print current output */
+
+ /*
+ * print current output
+ */
if(epochs_between_reports &&
- (i % epochs_between_reports == 0
- || i == max_epochs
- || i == 1
- || error < desired_error)){
- if (callback == NULL) {
- printf("Epochs %8d. Current error: %.10f. Bit fail %d.\n", i, error, ann->num_bit_fail);
- } else if((*callback)(i, error) == -1){
- /* you can break the training by returning -1 */
+ (i % epochs_between_reports == 0 || i == max_epochs || i == 1 || error < desired_error))
+ {
+ if(callback == NULL)
+ {
+ printf("Epochs %8d. Current error: %.10f. Bit fail %d.\n", i, error,
+ ann->num_bit_fail);
+ }
+ else if((*callback) (i, error) == -1)
+ {
+ /*
+ * you can break the training by returning -1
+ */
break;
}
}
-
- if(error < desired_error){
+
+ if(error < desired_error)
+ {
break;
}
}
}
-FANN_EXTERNAL void FANN_API fann_train_on_data(struct fann *ann, struct fann_train_data *data, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error)
+FANN_EXTERNAL void FANN_API fann_train_on_data(struct fann *ann, struct fann_train_data *data,
+ unsigned int max_epochs,
+ unsigned int epochs_between_reports,
+ float desired_error)
{
fann_train_on_data_callback(ann, data, max_epochs, epochs_between_reports, desired_error, NULL);
}
-/* Wrapper to make it easy to train directly on a training data file.
+/*
+ * Wrapper to make it easy to train directly on a training data file.
*/
-FANN_EXTERNAL void FANN_API fann_train_on_file_callback(struct fann *ann, char *filename, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error, int (FANN_API *callback)(unsigned int epochs, float error))
+FANN_EXTERNAL void FANN_API fann_train_on_file_callback(struct fann *ann, char *filename,
+ unsigned int max_epochs,
+ unsigned int epochs_between_reports,
+ float desired_error,
+ int (FANN_API *
+ callback) (unsigned int epochs,
+ float error))
{
struct fann_train_data *data = fann_read_train_from_file(filename);
- if(data == NULL){
+
+ if(data == NULL)
+ {
return;
}
- fann_train_on_data_callback(ann, data, max_epochs, epochs_between_reports, desired_error, callback);
+ fann_train_on_data_callback(ann, data, max_epochs, epochs_between_reports, desired_error,
+ callback);
fann_destroy_train(data);
}
-FANN_EXTERNAL void FANN_API fann_train_on_file(struct fann *ann, char *filename, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error)
+FANN_EXTERNAL void FANN_API fann_train_on_file(struct fann *ann, char *filename,
+ unsigned int max_epochs,
+ unsigned int epochs_between_reports,
+ float desired_error)
{
- fann_train_on_file_callback(ann, filename, max_epochs, epochs_between_reports, desired_error, NULL);
+ fann_train_on_file_callback(ann, filename, max_epochs, epochs_between_reports, desired_error,
+ NULL);
}
#endif
-/* shuffles training data, randomizing the order
+/*
+ * shuffles training data, randomizing the order
*/
-FANN_EXTERNAL void FANN_API fann_shuffle_train_data(struct fann_train_data *train_data) {
+FANN_EXTERNAL void FANN_API fann_shuffle_train_data(struct fann_train_data *train_data)
+{
unsigned int dat = 0, elem, swap;
fann_type temp;
- for ( ; dat < train_data->num_data ; dat++ ) {
- swap = (unsigned int)(rand() % train_data->num_data);
- if ( swap != dat ) {
- for ( elem = 0 ; elem < train_data->num_input ; elem++ ) {
+ for(; dat < train_data->num_data; dat++)
+ {
+ swap = (unsigned int) (rand() % train_data->num_data);
+ if(swap != dat)
+ {
+ for(elem = 0; elem < train_data->num_input; elem++)
+ {
temp = train_data->input[dat][elem];
train_data->input[dat][elem] = train_data->input[swap][elem];
train_data->input[swap][elem] = temp;
}
- for ( elem = 0 ; elem < train_data->num_output ; elem++ ) {
+ for(elem = 0; elem < train_data->num_output; elem++)
+ {
temp = train_data->output[dat][elem];
train_data->output[dat][elem] = train_data->output[swap][elem];
train_data->output[swap][elem] = temp;
@@ -286,17 +357,24 @@ FANN_EXTERNAL void FANN_API fann_shuffle_train_data(struct fann_train_data *trai
}
}
-/* INTERNAL FUNCTION
- Scales data to a specific range
+/*
+ * INTERNAL FUNCTION Scales data to a specific range
*/
-void fann_scale_data(fann_type **data, unsigned int num_data, unsigned int num_elem, fann_type new_min, fann_type new_max) {
+void fann_scale_data(fann_type ** data, unsigned int num_data, unsigned int num_elem,
+ fann_type new_min, fann_type new_max)
+{
unsigned int dat, elem;
fann_type old_min, old_max, temp, old_span, new_span, factor;
+
old_min = old_max = data[0][0];
- /* first calculate min and max */
- for (dat = 0 ; dat < num_data ; dat++ ) {
- for ( elem = 0 ; elem < num_elem ; elem++ ) {
+ /*
+ * first calculate min and max
+ */
+ for(dat = 0; dat < num_data; dat++)
+ {
+ for(elem = 0; elem < num_elem; elem++)
+ {
temp = data[dat][elem];
if(temp < old_min)
old_min = temp;
@@ -309,134 +387,183 @@ void fann_scale_data(fann_type **data, unsigned int num_data, unsigned int num_e
new_span = new_max - new_min;
factor = new_span / old_span;
- for (dat = 0 ; dat < num_data ; dat++ ) {
- for ( elem = 0 ; elem < num_elem ; elem++ ) {
+ for(dat = 0; dat < num_data; dat++)
+ {
+ for(elem = 0; elem < num_elem; elem++)
+ {
temp = (data[dat][elem] - old_min) * factor + new_min;
- if(temp < new_min){
+ if(temp < new_min)
+ {
data[dat][elem] = new_min;
- /*printf("error %f < %f\n", temp, new_min);*/
- } else if(temp > new_max){
+ /*
+ * printf("error %f < %f\n", temp, new_min);
+ */
+ }
+ else if(temp > new_max)
+ {
data[dat][elem] = new_max;
- /*printf("error %f > %f\n", temp, new_max);*/
- } else {
+ /*
+ * printf("error %f > %f\n", temp, new_max);
+ */
+ }
+ else
+ {
data[dat][elem] = temp;
}
}
}
}
-/* Scales the inputs in the training data to the specified range
+/*
+ * Scales the inputs in the training data to the specified range
*/
-FANN_EXTERNAL void FANN_API fann_scale_input_train_data(struct fann_train_data *train_data, fann_type new_min, fann_type new_max) {
- fann_scale_data(train_data->input, train_data->num_data, train_data->num_input, new_min, new_max);
+FANN_EXTERNAL void FANN_API fann_scale_input_train_data(struct fann_train_data *train_data,
+ fann_type new_min, fann_type new_max)
+{
+ fann_scale_data(train_data->input, train_data->num_data, train_data->num_input, new_min,
+ new_max);
}
-/* Scales the inputs in the training data to the specified range
+/*
+ * Scales the inputs in the training data to the specified range
*/
-FANN_EXTERNAL void FANN_API fann_scale_output_train_data(struct fann_train_data *train_data, fann_type new_min, fann_type new_max) {
- fann_scale_data(train_data->output, train_data->num_data, train_data->num_output, new_min, new_max);
+FANN_EXTERNAL void FANN_API fann_scale_output_train_data(struct fann_train_data *train_data,
+ fann_type new_min, fann_type new_max)
+{
+ fann_scale_data(train_data->output, train_data->num_data, train_data->num_output, new_min,
+ new_max);
}
-/* Scales the inputs in the training data to the specified range
+/*
+ * Scales the inputs in the training data to the specified range
*/
-FANN_EXTERNAL void FANN_API fann_scale_train_data(struct fann_train_data *train_data, fann_type new_min, fann_type new_max) {
- fann_scale_data(train_data->input, train_data->num_data, train_data->num_input, new_min, new_max);
- fann_scale_data(train_data->output, train_data->num_data, train_data->num_output, new_min, new_max);
+FANN_EXTERNAL void FANN_API fann_scale_train_data(struct fann_train_data *train_data,
+ fann_type new_min, fann_type new_max)
+{
+ fann_scale_data(train_data->input, train_data->num_data, train_data->num_input, new_min,
+ new_max);
+ fann_scale_data(train_data->output, train_data->num_data, train_data->num_output, new_min,
+ new_max);
}
-/* merges training data into a single struct.
+/*
+ * merges training data into a single struct.
+ * TODO this function memory leaks
*/
-FANN_EXTERNAL struct fann_train_data * FANN_API fann_merge_train_data(struct fann_train_data *data1, struct fann_train_data *data2) {
- struct fann_train_data * train_data;
+FANN_EXTERNAL struct fann_train_data *FANN_API fann_merge_train_data(struct fann_train_data *data1,
+ struct fann_train_data *data2)
+{
+ struct fann_train_data *train_data;
unsigned int x;
- if ( (data1->num_input != data2->num_input) ||
- (data1->num_output != data2->num_output) ) {
+ if((data1->num_input != data2->num_input) || (data1->num_output != data2->num_output))
+ {
fann_error(NULL, FANN_E_TRAIN_DATA_MISMATCH);
return NULL;
}
- train_data = (struct fann_train_data *)malloc(sizeof(struct fann_train_data));
+ train_data = (struct fann_train_data *) malloc(sizeof(struct fann_train_data));
- fann_init_error_data((struct fann_error *)train_data);
+ fann_init_error_data((struct fann_error *) train_data);
train_data->num_data = data1->num_data + data2->num_data;
train_data->num_input = data1->num_input;
train_data->num_output = data1->num_output;
- if ( ((train_data->input = (fann_type **)calloc(train_data->num_data, sizeof(fann_type *))) == NULL) ||
- ((train_data->output = (fann_type **)calloc(train_data->num_data, sizeof(fann_type *))) == NULL) ) {
+ if(((train_data->input =
+ (fann_type **) calloc(train_data->num_data, sizeof(fann_type *))) == NULL)
+ || ((train_data->output = (fann_type **) calloc(train_data->num_data, sizeof(fann_type *)))
+ == NULL))
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
fann_destroy_train(train_data);
return NULL;
}
- for ( x = 0 ; x < train_data->num_data ; x++ ) {
- if ( ((train_data->input[x] = (fann_type *)calloc(train_data->num_input, sizeof(fann_type))) == NULL) ||
- ((train_data->output[x] = (fann_type *)calloc(train_data->num_output, sizeof(fann_type))) == NULL) ) {
+
+ for(x = 0; x < train_data->num_data; x++)
+ {
+ if(((train_data->input[x] = (fann_type *) calloc(train_data->num_input,
+ sizeof(fann_type))) == NULL)
+ ||
+ ((train_data->output[x] =
+ (fann_type *) calloc(train_data->num_output, sizeof(fann_type))) == NULL))
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
fann_destroy_train(train_data);
return NULL;
}
memcpy(train_data->input[x],
- ( x < data1->num_data ) ? data1->input[x] : data2->input[x - data1->num_data],
- train_data->num_input * sizeof(fann_type));
+ (x <
+ data1->num_data) ? data1->input[x] : data2->input[x -
+ data1->
+ num_data],
+ train_data->num_input * sizeof(fann_type));
memcpy(train_data->output[x],
- ( x < data1->num_data ) ? data1->output[x] : data2->output[x - data1->num_data],
- train_data->num_output * sizeof(fann_type));
+ (x < data1->num_data) ? data1->output[x] : data2->output[x - data1->num_data],
+ train_data->num_output * sizeof(fann_type));
}
return train_data;
}
-/* return a copy of a fann_train_data struct
+/*
+ * return a copy of a fann_train_data struct
*/
-FANN_EXTERNAL struct fann_train_data * FANN_API fann_duplicate_train_data(struct fann_train_data *data)
+FANN_EXTERNAL struct fann_train_data *FANN_API fann_duplicate_train_data(struct fann_train_data
+ *data)
{
unsigned int i;
fann_type *data_input, *data_output;
- struct fann_train_data* dest = (struct fann_train_data *)malloc(sizeof(struct fann_train_data));
+ struct fann_train_data *dest =
+ (struct fann_train_data *) malloc(sizeof(struct fann_train_data));
- if(dest == NULL){
+ if(dest == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
return NULL;
}
- fann_init_error_data((struct fann_error *)dest);
+ fann_init_error_data((struct fann_error *) dest);
dest->num_data = data->num_data;
dest->num_input = data->num_input;
dest->num_output = data->num_output;
- dest->input = (fann_type **)calloc(dest->num_data, sizeof(fann_type *));
- if(dest->input == NULL){
+ dest->input = (fann_type **) calloc(dest->num_data, sizeof(fann_type *));
+ if(dest->input == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
fann_destroy_train(dest);
return NULL;
}
-
- dest->output = (fann_type **)calloc(dest->num_data, sizeof(fann_type *));
- if(dest->output == NULL){
+
+ dest->output = (fann_type **) calloc(dest->num_data, sizeof(fann_type *));
+ if(dest->output == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
fann_destroy_train(dest);
return NULL;
}
-
- data_input = (fann_type *)calloc(dest->num_input*dest->num_data, sizeof(fann_type));
- if(data_input == NULL){
+
+ data_input = (fann_type *) calloc(dest->num_input * dest->num_data, sizeof(fann_type));
+ if(data_input == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
fann_destroy_train(dest);
return NULL;
}
- memcpy(data_input, data->input[0], dest->num_input*dest->num_data*sizeof(fann_type));
+ memcpy(data_input, data->input[0], dest->num_input * dest->num_data * sizeof(fann_type));
- data_output = (fann_type *)calloc(dest->num_output*dest->num_data, sizeof(fann_type));
- if(data_output == NULL){
+ data_output = (fann_type *) calloc(dest->num_output * dest->num_data, sizeof(fann_type));
+ if(data_output == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
fann_destroy_train(dest);
return NULL;
}
- memcpy(data_output, data->output[0], dest->num_output*dest->num_data*sizeof(fann_type));
+ memcpy(data_output, data->output[0], dest->num_output * dest->num_data * sizeof(fann_type));
- for(i = 0; i != dest->num_data; i++){
+ for(i = 0; i != dest->num_data; i++)
+ {
dest->input[i] = data_input;
data_input += dest->num_input;
dest->output[i] = data_output;
@@ -445,79 +572,91 @@ FANN_EXTERNAL struct fann_train_data * FANN_API fann_duplicate_train_data(struct
return dest;
}
-/* INTERNAL FUNCTION
- Reads training data from a file descriptor.
+/*
+ * INTERNAL FUNCTION Reads training data from a file descriptor.
*/
-struct fann_train_data* fann_read_train_from_fd(FILE *file, char *filename)
+struct fann_train_data *fann_read_train_from_fd(FILE * file, char *filename)
{
unsigned int num_input, num_output, num_data, i, j;
unsigned int line = 1;
fann_type *data_input, *data_output;
- struct fann_train_data* data = (struct fann_train_data *)malloc(sizeof(struct fann_train_data));
+ struct fann_train_data *data =
+ (struct fann_train_data *) malloc(sizeof(struct fann_train_data));
- if(data == NULL){
+ if(data == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
return NULL;
}
-
- if(fscanf(file, "%u %u %u\n", &num_data, &num_input, &num_output) != 3){
+
+ if(fscanf(file, "%u %u %u\n", &num_data, &num_input, &num_output) != 3)
+ {
fann_error(NULL, FANN_E_CANT_READ_TD, filename, line);
fann_destroy_train(data);
return NULL;
}
line++;
- fann_init_error_data((struct fann_error *)data);
+ fann_init_error_data((struct fann_error *) data);
data->num_data = num_data;
data->num_input = num_input;
data->num_output = num_output;
- data->input = (fann_type **)calloc(num_data, sizeof(fann_type *));
- if(data->input == NULL){
+ data->input = (fann_type **) calloc(num_data, sizeof(fann_type *));
+ if(data->input == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
fann_destroy_train(data);
return NULL;
}
-
- data->output = (fann_type **)calloc(num_data, sizeof(fann_type *));
- if(data->output == NULL){
+
+ data->output = (fann_type **) calloc(num_data, sizeof(fann_type *));
+ if(data->output == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
fann_destroy_train(data);
return NULL;
}
-
- data_input = (fann_type *)calloc(num_input*num_data, sizeof(fann_type));
- if(data_input == NULL){
+
+ data_input = (fann_type *) calloc(num_input * num_data, sizeof(fann_type));
+ if(data_input == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
fann_destroy_train(data);
return NULL;
}
- data_output = (fann_type *)calloc(num_output*num_data, sizeof(fann_type));
- if(data_output == NULL){
+ data_output = (fann_type *) calloc(num_output * num_data, sizeof(fann_type));
+ if(data_output == NULL)
+ {
fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
fann_destroy_train(data);
return NULL;
}
-
- for(i = 0; i != num_data; i++){
+
+ for(i = 0; i != num_data; i++)
+ {
data->input[i] = data_input;
data_input += num_input;
-
- for(j = 0; j != num_input; j++){
- if(fscanf(file, FANNSCANF" ", &data->input[i][j]) != 1){
+
+ for(j = 0; j != num_input; j++)
+ {
+ if(fscanf(file, FANNSCANF " ", &data->input[i][j]) != 1)
+ {
fann_error(NULL, FANN_E_CANT_READ_TD, filename, line);
fann_destroy_train(data);
return NULL;
}
}
line++;
-
+
data->output[i] = data_output;
data_output += num_output;
-
- for(j = 0; j != num_output; j++){
- if(fscanf(file, FANNSCANF" ", &data->output[i][j]) != 1){
+
+ for(j = 0; j != num_output; j++)
+ {
+ if(fscanf(file, FANNSCANF " ", &data->output[i][j]) != 1)
+ {
fann_error(NULL, FANN_E_CANT_READ_TD, filename, line);
fann_destroy_train(data);
return NULL;
diff --git a/src/include/fann_data.h b/src/include/fann_data.h
index c625e17..5572732 100644
--- a/src/include/fann_data.h
+++ b/src/include/fann_data.h
@@ -124,19 +124,6 @@ struct fann
*/
fann_type *train_errors;
- /* Used to choose which activation function to use
-
- Sometimes it can be smart, to set the activation function for the hidden neurons
- to FANN_THRESHOLD and the activation function for the output neurons to FANN_SIGMOID,
- in this way you get a very fast network, that is still cabable of
- producing real valued output.
- unsigned int activation_function_hidden, activation_function_output; */
-
- /* Parameters for the activation function
- fann_type activation_steepness_hidden;
- fann_type activation_steepness_output;
- */
-
/* Training algorithm used when calling fann_train_on_..
*/
unsigned int training_algorithm;
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/libfann.git
More information about the debian-science-commits
mailing list