[libfann] 164/242: merge from cascade branch
Christian Kastner
chrisk-guest at moszumanska.debian.org
Sat Oct 4 21:10:38 UTC 2014
This is an automated email from the git hooks/post-receive script.
chrisk-guest pushed a commit to tag Version2_0_0
in repository libfann.
commit 731f7fe38408e4e5c689e31d641a12ef75a30c23
Author: Steffen Nissen <lukesky at diku.dk>
Date: Sun Oct 10 07:51:31 2004 +0000
merge from cascade branch
---
TODO | 2 +
examples/Makefile | 12 +-
examples/{xor_train.c => cascade_train.c} | 75 +++--
examples/xor_test.c | 3 +
examples/xor_train.c | 2 +
src/doublefann.c | 1 +
src/fann.c | 358 +++++++++-----------
src/fann_cascade.c | 520 ++++++++++++++++++++++++++++--
src/fann_io.c | 34 +-
src/fann_options.c | 30 +-
src/fann_train.c | 284 ++++++++++++----
src/fann_train_data.c | 12 +-
src/fixedfann.c | 1 +
src/floatfann.c | 1 +
src/include/fann.h | 4 +
src/include/fann_data.h | 48 ++-
src/include/fann_internal.h | 20 +-
17 files changed, 1016 insertions(+), 391 deletions(-)
diff --git a/TODO b/TODO
index c6a0df7..8d061e7 100644
--- a/TODO
+++ b/TODO
@@ -48,6 +48,8 @@ Things TODO when releasing a new version (mostly for fann developers)
* Changelog should be updated and timestamp should be set
+* CVS branch for release
+
* PHP Extension should be up-to-date (including documentation)
* TGZ/BZ/ZIP packages should be created
diff --git a/examples/Makefile b/examples/Makefile
index b51d025..9e011c6 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -4,7 +4,7 @@
GCC=gcc
TARGETS = xor_train xor_test xor_test_fixed simple_train steepness_train simple_test robot mushroom
-DEBUG_TARGETS = xor_train_debug xor_test_debug xor_test_fixed_debug
+DEBUG_TARGETS = xor_train_debug xor_test_debug xor_test_fixed_debug cascade_train_debug
all: $(TARGETS)
@@ -54,20 +54,20 @@ compiletest:
debug: $(DEBUG_TARGETS)
%_debug: %.c Makefile ../src/*c ../src/include/*h
- $(GCC) -O3 -ggdb -lm -DDEBUG -Wall -ansi -I../src/ -I../src/include/ ../src/floatfann.c $< -o $@
+ $(GCC) -ggdb -lm -DDEBUG -Wall -ansi -I../src/ -I../src/include/ ../src/floatfann.c $< -o $@
-%_fixed_debug: %.c Makefile
+%_fixed_debug: %.c Makefile ../src/*c ../src/include/*h
$(GCC) -O3 -ggdb -lm -DDEBUG -Wall -ansi -DFIXEDFANN -I../src/ -I../src/include/ ../src/fixedfann.c $< -o $@
rundebug: $(DEBUG_TARGETS)
@echo
@echo Training network
- valgrind --leak-check=yes --show-reachable=yes --leak-resolution=high ./xor_train_debug
+ valgrind --leak-check=yes --show-reachable=yes --leak-resolution=high --db-attach=yes ./xor_train_debug
@echo
@echo Testing network with floats
- valgrind --leak-check=yes --show-reachable=yes --leak-resolution=high ./xor_test_debug
+ valgrind --leak-check=yes --show-reachable=yes --leak-resolution=high --db-attach=yes ./xor_test_debug
@echo
@echo Testing network with fixed points
- valgrind --leak-check=yes --show-reachable=yes --leak-resolution=high ./xor_test_fixed_debug
+ valgrind --leak-check=yes --show-reachable=yes --leak-resolution=high --db-attach=yes ./xor_test_fixed_debug
diff --git a/examples/xor_train.c b/examples/cascade_train.c
similarity index 50%
copy from examples/xor_train.c
copy to examples/cascade_train.c
index 5279611..097b3d9 100644
--- a/examples/xor_train.c
+++ b/examples/cascade_train.c
@@ -21,41 +21,27 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#include "fann.h"
-int print_callback(unsigned int epochs, float error)
-{
- printf("Epochs %8d. Current MSE-Error: %.10f\n", epochs, error);
- return 0;
-}
-
int main()
{
- fann_type *calc_out;
- const float connection_rate = 1;
const float learning_rate = (const float)0.7;
- const unsigned int num_input = 2;
- const unsigned int num_output = 1;
- const unsigned int num_layers = 3;
- const unsigned int num_neurons_hidden = 3;
const float desired_error = (const float)0.001;
- const unsigned int max_iterations = 300000;
- const unsigned int iterations_between_reports = 1000;
+ unsigned int max_out_epochs = 1000;
+ unsigned int max_cand_epochs = 200;
+ unsigned int max_neurons = 32;
+ unsigned int neurons_between_reports = 1;
+ unsigned int i = 0;
+ fann_type *calc_out;
struct fann *ann;
- struct fann_train_data *data;
+ struct fann_train_data *train_data, *test_data;
- unsigned int i = 0;
- unsigned int decimal_point;
-
- printf("Creating network.\n");
-
- ann = fann_create(connection_rate, learning_rate, num_layers,
- num_input,
- num_neurons_hidden,
- num_output);
+ printf("Reading data.\n");
- printf("Training network.\n");
+ train_data = fann_read_train_from_file("xor.data");
+ test_data = fann_read_train_from_file("xor.data");
- data = fann_read_train_from_file("xor.data");
+ printf("Creating network.\n");
+ ann = fann_create_shortcut(learning_rate, 2, train_data->num_input, train_data->num_output);
fann_set_activation_steepness_hidden(ann, 1.0);
fann_set_activation_steepness_output(ann, 1.0);
@@ -63,31 +49,44 @@ int main()
fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC_STEPWISE);
fann_set_activation_function_output(ann, FANN_SIGMOID_SYMMETRIC_STEPWISE);
- fann_init_weights(ann, data);
+ fann_print_connections(ann);
+ fann_print_parameters(ann);
- /*fann_set_training_algorithm(ann, FANN_TRAIN_QUICKPROP);*/
- fann_train_on_data(ann, data, max_iterations, iterations_between_reports, desired_error);
+ printf("Training network.\n");
- /*fann_train_on_data_callback(ann, data, max_iterations, iterations_between_reports, desired_error, print_callback);*/
+ /*fann_train_on_data(ann, train_data, 300, 1, desired_error);*/
+ printf("\nTrain error: %f, Test error: %f\n\n", fann_test_data(ann, train_data), fann_test_data(ann, test_data));
+ fann_cascadetrain_on_data_callback(ann, train_data, desired_error, NULL, max_out_epochs, max_cand_epochs, max_neurons, neurons_between_reports);
- printf("Testing network.\n");
+ printf("\nTrain error: %f, Test error: %f\n\n", fann_test_data(ann, train_data), fann_test_data(ann, test_data));
+
+ fann_print_connections(ann);
+ fann_print_parameters(ann);
- for(i = 0; i < data->num_data; i++){
- calc_out = fann_run(ann, data->input[i]);
+ printf("Testing network.\n");
+
+ for(i = 0; i < test_data->num_data; i++){
+ calc_out = fann_run(ann, test_data->input[i]);
printf("XOR test (%f,%f) -> %f, should be %f, difference=%f\n",
- data->input[i][0], data->input[i][1], *calc_out, data->output[i][0], fann_abs(*calc_out - data->output[i][0]));
+ test_data->input[i][0], test_data->input[i][1], *calc_out, test_data->output[i][0], fann_abs(*calc_out - test_data->output[i][0]));
}
printf("Saving network.\n");
fann_save(ann, "xor_float.net");
-
- decimal_point = fann_save_to_fixed(ann, "xor_fixed.net");
- fann_save_train_to_fixed(data, "xor_fixed.data", decimal_point);
+ /*fann_randomize_weights(ann, -0.1, 0.1);
+ fann_train_on_data(ann, train_data, max_out_epochs, 1, desired_error);
+
+ printf("\nTrain error: %f, Test error: %f\n\n", fann_test_data(ann, train_data), fann_test_data(ann, test_data));
+
+ fann_print_connections(ann);
+ fann_print_parameters(ann);*/
+
printf("Cleaning up.\n");
- fann_destroy_train(data);
+ fann_destroy_train(train_data);
+ fann_destroy_train(test_data);
fann_destroy(ann);
return 0;
diff --git a/examples/xor_test.c b/examples/xor_test.c
index b4c9540..7cece1e 100644
--- a/examples/xor_test.c
+++ b/examples/xor_test.c
@@ -43,6 +43,9 @@ int main()
return 0;
}
+ fann_print_connections(ann);
+ fann_print_parameters(ann);
+
printf("Testing network.\n");
#ifdef FIXEDFANN
diff --git a/examples/xor_train.c b/examples/xor_train.c
index 5279611..80ea0ea 100644
--- a/examples/xor_train.c
+++ b/examples/xor_train.c
@@ -70,6 +70,8 @@ int main()
/*fann_train_on_data_callback(ann, data, max_iterations, iterations_between_reports, desired_error, print_callback);*/
+ fann_print_connections(ann);
+ fann_print_parameters(ann);
printf("Testing network.\n");
diff --git a/src/doublefann.c b/src/doublefann.c
index 57d5efd..94083bf 100644
--- a/src/doublefann.c
+++ b/src/doublefann.c
@@ -27,3 +27,4 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#include "fann_train_data.c"
#include "fann_options.c"
#include "fann_error.c"
+#include "fann_cascade.c"
diff --git a/src/fann.c b/src/fann.c
index 48ea685..a3abbfa 100644
--- a/src/fann.c
+++ b/src/fann.c
@@ -39,6 +39,10 @@ FANN_EXTERNAL struct fann * FANN_API fann_create(float connection_rate, float le
struct fann *ann;
va_list layer_sizes;
unsigned int *layers = (unsigned int *)calloc(num_layers, sizeof(unsigned int));
+ if(layers == NULL){
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ return NULL;
+ }
int i = 0;
va_start(layer_sizes, num_layers);
@@ -60,7 +64,7 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, fl
{
struct fann_layer *layer_it, *last_layer, *prev_layer;
struct fann *ann;
- struct fann_neuron *neuron_it, *last_neuron, *random_neuron, *bias_neuron;
+ struct fann_neuron *neuron_it, *first_neuron, *last_neuron, *random_neuron, *bias_neuron;
unsigned int prev_layer_size, i, j;
unsigned int num_neurons_in, num_neurons_out;
unsigned int min_connections, max_connections, num_connections;
@@ -81,6 +85,7 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, fl
/* allocate the general structure */
ann = fann_allocate_structure(learning_rate, num_layers);
if(ann == NULL){
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
return NULL;
}
@@ -129,22 +134,27 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, fl
max_connections = num_neurons_in * num_neurons_out; /* not calculating bias */
num_connections = fann_max(min_connections,
(unsigned int)(0.5+(connection_rate * max_connections)) + num_neurons_out);
-
- ann->total_connections += num_connections;
-
+
connections_per_neuron = num_connections/num_neurons_out;
allocated_connections = 0;
/* Now split out the connections on the different neurons */
for(i = 0; i != num_neurons_out; i++){
- layer_it->first_neuron[i].num_connections = connections_per_neuron;
+ layer_it->first_neuron[i].first_con = ann->total_connections + allocated_connections;
allocated_connections += connections_per_neuron;
+ layer_it->first_neuron[i].last_con = ann->total_connections + allocated_connections;
if(allocated_connections < (num_connections*(i+1))/num_neurons_out){
- layer_it->first_neuron[i].num_connections++;
+ layer_it->first_neuron[i].last_con++;
allocated_connections++;
}
}
+ /* bias neuron also gets stuff */
+ layer_it->first_neuron[i].first_con = ann->total_connections + allocated_connections;
+ layer_it->first_neuron[i].last_con = ann->total_connections + allocated_connections;
+
+ ann->total_connections += num_connections;
+
/* used in the next run of the loop */
num_neurons_in = num_neurons_out;
}
@@ -154,6 +164,8 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, fl
fann_destroy(ann);
return NULL;
}
+
+ first_neuron = ann->first_layer->first_neuron;
if(connection_rate >= 1){
prev_layer_size = ann->num_input+1;
@@ -162,14 +174,13 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, fl
for(layer_it = ann->first_layer+1; layer_it != last_layer; layer_it++){
last_neuron = layer_it->last_neuron-1;
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++){
- for(i = 0; i != prev_layer_size; i++){
- neuron_it->weights[i] = (fann_type)fann_random_weight();
+ for(i = neuron_it->first_con; i != neuron_it->last_con; i++){
+ ann->weights[i] = (fann_type)fann_random_weight();
/* these connections are still initialized for fully connected networks, to allow
operations to work, that are not optimized for fully connected networks.
*/
- neuron_it->connected_neurons[i] = prev_layer->first_neuron+i;
+ ann->connections[i] = prev_layer->first_neuron + (i - neuron_it->first_con);
}
-
}
prev_layer_size = layer_it->last_neuron - layer_it->first_neuron;
prev_layer = layer_it;
@@ -188,9 +199,8 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, fl
not allready connected to.
*/
- /* first clear all the connections, because we want to
+ /* All the connections are cleared by calloc, because we want to
be able to see which connections are allready connected */
- memset((ann->first_layer+1)->first_neuron->connected_neurons, 0, ann->total_connections * sizeof(struct fann_neuron*));
for(layer_it = ann->first_layer+1;
layer_it != ann->last_layer; layer_it++){
@@ -203,9 +213,9 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, fl
last_neuron = layer_it->last_neuron-1;
for(neuron_it = layer_it->first_neuron;
neuron_it != last_neuron; neuron_it++){
-
- neuron_it->connected_neurons[0] = bias_neuron;
- neuron_it->weights[0] = (fann_type)fann_random_weight();
+
+ ann->connections[neuron_it->first_con] = bias_neuron;
+ ann->weights[neuron_it->first_con] = (fann_type)fann_random_weight();
}
/* then connect all neurons in the input layer */
@@ -219,13 +229,13 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, fl
random_number = (int) (0.5+fann_rand(0, num_neurons_out-1));
random_neuron = layer_it->first_neuron + random_number;
/* checks the last space in the connections array for room */
- }while(random_neuron->connected_neurons[random_neuron->num_connections-1]);
+ }while(ann->connections[random_neuron->last_con-1]);
/* find an empty space in the connection array and connect */
- for(i = 0; i < random_neuron->num_connections; i++){
- if(random_neuron->connected_neurons[i] == NULL){
- random_neuron->connected_neurons[i] = neuron_it;
- random_neuron->weights[i] = (fann_type)fann_random_weight();
+ for(i = random_neuron->first_con; i < random_neuron->last_con; i++){
+ if(ann->connections[i] == NULL){
+ ann->connections[i] = neuron_it;
+ ann->weights[i] = (fann_type)fann_random_weight();
break;
}
}
@@ -236,9 +246,9 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, fl
for(neuron_it = layer_it->first_neuron;
neuron_it != last_neuron; neuron_it++){
/* find empty space in the connection array and connect */
- for(i = 0; i < neuron_it->num_connections; i++){
+ for(i = neuron_it->first_con; i < neuron_it->last_con; i++){
/* continue if allready connected */
- if(neuron_it->connected_neurons[i] != NULL) continue;
+ if(ann->connections[i] != NULL) continue;
do {
found_connection = 0;
@@ -246,8 +256,8 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, fl
random_neuron = (layer_it-1)->first_neuron + random_number;
/* check to see if this connection is allready there */
- for(j = 0; j < i; j++){
- if(random_neuron == neuron_it->connected_neurons[j]){
+ for(j = neuron_it->first_con; j < i; j++){
+ if(random_neuron == ann->connections[j]){
found_connection = 1;
break;
}
@@ -257,8 +267,8 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, fl
/* we have found a neuron that is not allready
connected to us, connect it */
- neuron_it->connected_neurons[i] = random_neuron;
- neuron_it->weights[i] = (fann_type)fann_random_weight();
+ ann->connections[i] = random_neuron;
+ ann->weights[i] = (fann_type)fann_random_weight();
}
}
@@ -291,6 +301,11 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut(float learning_rate,
struct fann *ann;
va_list layer_sizes;
unsigned int *layers = (unsigned int *)calloc(num_layers, sizeof(unsigned int));
+ if(layers == NULL){
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ return NULL;
+ }
+
int i = 0;
va_start(layer_sizes, num_layers);
@@ -315,7 +330,6 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut_array(float learning_r
struct fann_neuron *neuron_it, *neuron_it2 = 0;
unsigned int i;
unsigned int num_neurons_in, num_neurons_out;
- unsigned int num_connections;
#ifdef FIXEDFANN
unsigned int decimal_point;
@@ -327,6 +341,7 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut_array(float learning_r
/* allocate the general structure */
ann = fann_allocate_structure(learning_rate, num_layers);
if(ann == NULL){
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
return NULL;
}
@@ -345,11 +360,16 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut_array(float learning_r
/* we do not allocate room here, but we make sure that
last_neuron - first_neuron is the number of neurons */
layer_it->first_neuron = NULL;
- layer_it->last_neuron = layer_it->first_neuron + layers[i++] +1; /* +1 for bias */
+ layer_it->last_neuron = layer_it->first_neuron + layers[i++];
+ if(layer_it == ann->first_layer){
+ /* there is a bias neuron in the first layer */
+ layer_it->last_neuron++;
+ }
+
ann->total_neurons += layer_it->last_neuron - layer_it->first_neuron;
}
- ann->num_output = (ann->last_layer-1)->last_neuron - (ann->last_layer-1)->first_neuron -1;
+ ann->num_output = (ann->last_layer-1)->last_neuron - (ann->last_layer-1)->first_neuron;
ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron -1;
/* allocate room for the actual neurons */
@@ -368,17 +388,17 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut_array(float learning_r
num_neurons_in = ann->num_input;
last_layer = ann->last_layer;
for(layer_it = ann->first_layer+1; layer_it != last_layer; layer_it++){
- num_neurons_out = layer_it->last_neuron - layer_it->first_neuron - 1;
- num_connections = num_neurons_in * num_neurons_out + num_neurons_out;
- ann->total_connections += num_connections;
+ num_neurons_out = layer_it->last_neuron - layer_it->first_neuron;
/* Now split out the connections on the different neurons */
for(i = 0; i != num_neurons_out; i++){
- layer_it->first_neuron[i].num_connections = num_neurons_in+1;
+ layer_it->first_neuron[i].first_con = ann->total_connections;
+ ann->total_connections += num_neurons_in+1;
+ layer_it->first_neuron[i].last_con = ann->total_connections;
}
#ifdef DEBUG
- printf(" layer : %d neurons, 1 bias\n", num_neurons_out);
+ printf(" layer : %d neurons, 0 bias\n", num_neurons_out);
#endif
/* used in the next run of the loop */
num_neurons_in += num_neurons_out;
@@ -394,24 +414,17 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut_array(float learning_r
*/
num_neurons_in = ann->num_input+1;
for(layer_it = ann->first_layer+1; layer_it != last_layer; layer_it++){
- for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron-1; neuron_it++){
+ for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++){
- i = 0;
+ i = neuron_it->first_con;
for(layer_it2 = ann->first_layer; layer_it2 != layer_it; layer_it2++){
- for(neuron_it2 = layer_it2->first_neuron; neuron_it2 != layer_it2->last_neuron-1; neuron_it2++){
+ for(neuron_it2 = layer_it2->first_neuron; neuron_it2 != layer_it2->last_neuron; neuron_it2++){
- neuron_it->weights[i] = (fann_type)fann_random_weight();
- /* these connections are still initialized for fully connected networks, to allow
- operations to work, that are not optimized for fully connected networks.
- */
- neuron_it->connected_neurons[i] = neuron_it2;
+ ann->weights[i] = (fann_type)fann_random_weight();
+ ann->connections[i] = neuron_it2;
i++;
}
}
-
- /* The connection to the bias neuron */
- neuron_it->weights[i] = (fann_type)fann_random_weight();
- neuron_it->connected_neurons[i] = neuron_it2;
}
num_neurons_in += layer_it->last_neuron - layer_it->first_neuron;
}
@@ -428,10 +441,10 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut_array(float learning_r
FANN_EXTERNAL fann_type * FANN_API fann_run(struct fann *ann, fann_type *input)
{
struct fann_neuron *neuron_it, *last_neuron, *neurons, **neuron_pointers;
- unsigned int activation_function, i, num_connections, num_neurons, num_input, num_output;
+ unsigned int activation_function, i, num_connections, num_input, num_output;
fann_type neuron_value, *output;
fann_type *weights;
- struct fann_layer *layer_it, *layer_it2, *last_layer;
+ struct fann_layer *layer_it, *last_layer;
/* store some variabels local for fast access */
@@ -451,9 +464,7 @@ FANN_EXTERNAL fann_type * FANN_API fann_run(struct fann *ann, fann_type *input)
/* values used for the stepwise linear sigmoid function */
fann_type rh1 = 0, rh2 = 0, rh3 = 0, rh4 = 0, rh5 = 0, rh6 = 0;
- fann_type ro1 = 0, ro2 = 0, ro3 = 0, ro4 = 0, ro5 = 0, ro6 = 0;
fann_type h1 = 0, h2 = 0, h3 = 0, h4 = 0, h5 = 0, h6 = 0;
- fann_type o1 = 0, o2 = 0, o3 = 0, o4 = 0, o5 = 0, o6 = 0;
switch(ann->activation_function_hidden){
#ifdef FIXEDFANN
@@ -482,33 +493,6 @@ FANN_EXTERNAL fann_type * FANN_API fann_run(struct fann *ann, fann_type *input)
break;
}
- switch(ann->activation_function_output){
-#ifdef FIXEDFANN
- case FANN_SIGMOID:
- case FANN_SIGMOID_SYMMETRIC:
-#endif
- case FANN_SIGMOID_STEPWISE:
- case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- /* the output results */
- ro1 = ann->activation_results_output[0];
- ro2 = ann->activation_results_output[1];
- ro3 = ann->activation_results_output[2];
- ro4 = ann->activation_results_output[3];
- ro5 = ann->activation_results_output[4];
- ro6 = ann->activation_results_output[5];
-
- /* the output parameters */
- o1 = ann->activation_values_output[0];
- o2 = ann->activation_values_output[1];
- o3 = ann->activation_values_output[2];
- o4 = ann->activation_values_output[3];
- o5 = ann->activation_values_output[4];
- o6 = ann->activation_values_output[5];
- break;
- default:
- break;
- }
-
/* first set the input */
num_input = ann->num_input;
for(i = 0; i != num_input; i++){
@@ -519,67 +503,72 @@ FANN_EXTERNAL fann_type * FANN_API fann_run(struct fann *ann, fann_type *input)
#endif
first_neuron[i].value = input[i];
}
+ /* Set the bias neuron in the input layer */
+#ifdef FIXEDFANN
+ (ann->first_layer->last_neuron-1)->value = multiplier;
+#else
+ (ann->first_layer->last_neuron-1)->value = 1;
+#endif
last_layer = ann->last_layer;
for(layer_it = ann->first_layer+1; layer_it != last_layer; layer_it++){
-#ifdef FIXEDFANN
- ((layer_it-1)->last_neuron-1)->value = multiplier;
-#else
- /* set the bias neuron */
- ((layer_it-1)->last_neuron-1)->value = 1;
-
+#ifndef FIXEDFANN
steepness = (layer_it == last_layer-1) ?
activation_steepness_output : activation_steepness_hidden;
#endif
activation_function = (layer_it == last_layer-1) ?
activation_function_output : activation_function_hidden;
+
+ if(layer_it == layer_it-1){
+ switch(ann->activation_function_output){
+#ifdef FIXEDFANN
+ case FANN_SIGMOID:
+ case FANN_SIGMOID_SYMMETRIC:
+#endif
+ case FANN_SIGMOID_STEPWISE:
+ case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+ /* the output results */
+ rh1 = ann->activation_results_output[0];
+ rh2 = ann->activation_results_output[1];
+ rh3 = ann->activation_results_output[2];
+ rh4 = ann->activation_results_output[3];
+ rh5 = ann->activation_results_output[4];
+ rh6 = ann->activation_results_output[5];
+
+ /* the output parameters */
+ h1 = ann->activation_values_output[0];
+ h2 = ann->activation_values_output[1];
+ h3 = ann->activation_values_output[2];
+ h4 = ann->activation_values_output[3];
+ h5 = ann->activation_values_output[4];
+ h6 = ann->activation_values_output[5];
+ break;
+ default:
+ break;
+ }
+ }
- last_neuron = layer_it->last_neuron-1;
+ last_neuron = layer_it->last_neuron;
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++){
+ if(neuron_it->first_con == neuron_it->last_con){
+ /* bias neurons */
+ neuron_it->value = 1;
+ continue;
+ }
+
neuron_value = 0;
- num_connections = neuron_it->num_connections;
- weights = neuron_it->weights;
+ num_connections = neuron_it->last_con - neuron_it->first_con;
+ weights = ann->weights + neuron_it->first_con;
if(ann->connection_rate >= 1){
if(ann->shortcut_connections){
- /* first go through the connections to the previous layers,
- then let the normal operation go through the rest.
- */
-
- for(layer_it2 = ann->first_layer;
- layer_it2 != layer_it-1; layer_it2++){
-
- neurons = layer_it2->first_neuron;
- num_neurons = layer_it2->last_neuron - neurons - 1; /* don't use bias from previous layers */
- i = num_neurons & 3; /* same as modulo 4 */
- switch(i) {
- case 3:
- neuron_value += fann_mult(weights[2], neurons[2].value);
- case 2:
- neuron_value += fann_mult(weights[1], neurons[1].value);
- case 1:
- neuron_value += fann_mult(weights[0], neurons[0].value);
- case 0:
- break;
- }
-
- for(;i != num_neurons; i += 4){
- neuron_value +=
- fann_mult(weights[i], neurons[i].value) +
- fann_mult(weights[i+1], neurons[i+1].value) +
- fann_mult(weights[i+2], neurons[i+2].value) +
- fann_mult(weights[i+3], neurons[i+3].value);
- }
-
- num_connections -= num_neurons;
- weights += num_neurons;
- }
+ neurons = ann->first_layer->first_neuron;
+ } else {
+ neurons = (layer_it-1)->first_neuron;
}
- neurons = (layer_it-1)->first_neuron;
-
i = num_connections & 3; /* same as modulo 4 */
switch(i) {
case 3:
@@ -599,8 +588,8 @@ FANN_EXTERNAL fann_type * FANN_API fann_run(struct fann *ann, fann_type *input)
fann_mult(weights[i+2], neurons[i+2].value) +
fann_mult(weights[i+3], neurons[i+3].value);
}
- }else{
- neuron_pointers = neuron_it->connected_neurons;
+ } else {
+ neuron_pointers = ann->connections + neuron_it->first_con;
i = num_connections & 3; /* same as modulo 4 */
switch(i) {
@@ -627,19 +616,11 @@ FANN_EXTERNAL fann_type * FANN_API fann_run(struct fann *ann, fann_type *input)
#ifdef FIXEDFANN
case FANN_SIGMOID:
case FANN_SIGMOID_STEPWISE:
- if(layer_it == last_layer-1){
- neuron_it->value = (fann_type)fann_stepwise(o1, o2, o3, o4, o5, o6, ro1, ro2, ro3, ro4, ro5, ro6, 0, multiplier, neuron_value);
- }else{
- neuron_it->value = (fann_type)fann_stepwise(h1, h2, h3, h4, h5, h6, rh1, rh2, rh3, rh4, rh5, rh6, 0, multiplier, neuron_value);
- }
+ neuron_it->value = (fann_type)fann_stepwise(h1, h2, h3, h4, h5, h6, rh1, rh2, rh3, rh4, rh5, rh6, 0, multiplier, neuron_value);
break;
case FANN_SIGMOID_SYMMETRIC:
case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- if(layer_it == last_layer-1){
- neuron_it->value = (fann_type)fann_stepwise(o1, o2, o3, o4, o5, o6, ro1, ro2, ro3, ro4, ro5, ro6, -multiplier, multiplier, neuron_value);
- }else{
- neuron_it->value = (fann_type)fann_stepwise(h1, h2, h3, h4, h5, h6, rh1, rh2, rh3, rh4, rh5, rh6, -multiplier, multiplier, neuron_value);
- }
+ neuron_it->value = (fann_type)fann_stepwise(h1, h2, h3, h4, h5, h6, rh1, rh2, rh3, rh4, rh5, rh6, -multiplier, multiplier, neuron_value);
break;
#else
case FANN_LINEAR:
@@ -655,18 +636,10 @@ FANN_EXTERNAL fann_type * FANN_API fann_run(struct fann *ann, fann_type *input)
break;
case FANN_SIGMOID_STEPWISE:
- if(layer_it == last_layer-1){
- neuron_it->value = (fann_type)fann_stepwise(o1, o2, o3, o4, o5, o6, ro1, ro2, ro3, ro4, ro5, ro6, 0, 1, neuron_value);
- }else{
- neuron_it->value = (fann_type)fann_stepwise(h1, h2, h3, h4, h5, h6, rh1, rh2, rh3, rh4, rh5, rh6, 0, 1, neuron_value);
- }
+ neuron_it->value = (fann_type)fann_stepwise(h1, h2, h3, h4, h5, h6, rh1, rh2, rh3, rh4, rh5, rh6, 0, 1, neuron_value);
break;
case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- if(layer_it == last_layer-1){
- neuron_it->value = (fann_type)fann_stepwise(o1, o2, o3, o4, o5, o6, ro1, ro2, ro3, ro4, ro5, ro6, -1, 1, neuron_value);
- }else{
- neuron_it->value = (fann_type)fann_stepwise(h1, h2, h3, h4, h5, h6, rh1, rh2, rh3, rh4, rh5, rh6, -1, 1, neuron_value);
- }
+ neuron_it->value = (fann_type)fann_stepwise(h1, h2, h3, h4, h5, h6, rh1, rh2, rh3, rh4, rh5, rh6, -1, 1, neuron_value);
break;
#endif
case FANN_THRESHOLD:
@@ -696,8 +669,8 @@ FANN_EXTERNAL fann_type * FANN_API fann_run(struct fann *ann, fann_type *input)
FANN_EXTERNAL void FANN_API fann_destroy(struct fann *ann)
{
if(ann == NULL) return;
- fann_safe_free(fann_get_weights(ann));
- fann_safe_free(fann_get_connections(ann));
+ fann_safe_free(ann->weights);
+ fann_safe_free(ann->connections);
fann_safe_free(ann->first_layer->first_neuron);
fann_safe_free(ann->first_layer);
fann_safe_free(ann->output);
@@ -712,7 +685,7 @@ FANN_EXTERNAL void FANN_API fann_destroy(struct fann *ann)
FANN_EXTERNAL void FANN_API fann_randomize_weights(struct fann *ann, fann_type min_weight, fann_type max_weight)
{
fann_type *last_weight;
- fann_type *weights = (ann->first_layer+1)->first_neuron->weights;
+ fann_type *weights = ann->weights;
last_weight = weights + ann->total_connections;
for(;weights != last_weight; weights++){
*weights = (fann_type)(fann_rand(min_weight, max_weight));
@@ -727,6 +700,10 @@ FANN_EXTERNAL void FANN_API fann_print_connections(struct fann *ann)
char *neurons;
unsigned int num_neurons = fann_get_total_neurons(ann) - fann_get_num_output(ann);
neurons = (char *)malloc(num_neurons+1);
+ if(neurons == NULL){
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ return;
+ }
neurons[num_neurons] = 0;
printf("Layer / Neuron ");
@@ -737,13 +714,18 @@ FANN_EXTERNAL void FANN_API fann_print_connections(struct fann *ann)
for(layer_it = ann->first_layer+1; layer_it != ann->last_layer; layer_it++){
for(neuron_it = layer_it->first_neuron;
- neuron_it != layer_it->last_neuron-1; neuron_it++){
+ neuron_it != layer_it->last_neuron; neuron_it++){
memset(neurons, (int)'.', num_neurons);
- for(i = 0; i < neuron_it->num_connections; i++){
- value = (unsigned int)(fann_abs(neuron_it->weights[i])+0.5);
+ for(i = neuron_it->first_con; i < neuron_it->last_con; i++){
+#ifdef FIXEDFANN
+ value = (unsigned int)(fann_abs(ann->weights[i]/(double)ann->multiplier)+0.5);
+#else
+ value = (unsigned int)(fann_abs(ann->weights[i])+0.5);
+#endif
+
if(value > 25) value = 25;
- neurons[neuron_it->connected_neurons[i] - ann->first_layer->first_neuron] = 'a' + value;
+ neurons[ann->connections[i] - ann->first_layer->first_neuron] = 'a' + value;
}
printf("L %3d / N %4d %s\n", layer_it - ann->first_layer,
neuron_it - ann->first_layer->first_neuron, neurons);
@@ -758,7 +740,7 @@ FANN_EXTERNAL void FANN_API fann_print_connections(struct fann *ann)
FANN_EXTERNAL void FANN_API fann_init_weights(struct fann *ann, struct fann_train_data *train_data)
{
fann_type smallest_inp, largest_inp;
- unsigned int dat = 0, elem, num_neurons_in, num_neurons_out, num_connect, num_hidden_neurons;
+ unsigned int dat = 0, elem, num_connect, num_hidden_neurons;
struct fann_layer *layer_it;
struct fann_neuron *neuron_it, *last_neuron, *bias_neuron;
#ifdef FIXEDFANN
@@ -782,35 +764,27 @@ FANN_EXTERNAL void FANN_API fann_init_weights(struct fann *ann, struct fann_trai
#ifdef DEBUG
printf("Initializing weights with scale factor %f\n", scale_factor);
#endif
+ bias_neuron = ann->first_layer->last_neuron-1;
for ( layer_it = ann->first_layer+1; layer_it != ann->last_layer ; layer_it++) {
-#ifdef DEBUG
- printf(" Layer: %x/%x (%d neurons)\n", layer_it-ann->first_layer, ann->last_layer-ann->first_layer, layer_it->last_neuron - layer_it->first_neuron);
-#endif
- num_neurons_out = layer_it->last_neuron - layer_it->first_neuron - 1;
- num_neurons_in = (layer_it-1)->last_neuron - (layer_it-1)->first_neuron - 1;
+ last_neuron = layer_it->last_neuron;
- last_neuron = layer_it->last_neuron-1;
- bias_neuron = (layer_it-1)->last_neuron-1;
+ if(!ann->shortcut_connections){
+ bias_neuron = (layer_it-1)->last_neuron-1;
+ }
for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++) {
-#ifdef DEBUG
- printf(" Neuron %x/%x (%d connections)\n", neuron_it-layer_it->first_neuron, last_neuron-layer_it->first_neuron, neuron_it->num_connections);
-#endif
- for ( num_connect = 0 ; num_connect < neuron_it->num_connections ; num_connect++ ) {
-#ifdef DEBUG
- printf(" Connection %d/%d (%x)\n", num_connect, neuron_it->num_connections, neuron_it->connected_neurons[num_connect] - ann->first_layer->first_neuron);
-#endif
- if ( bias_neuron == neuron_it->connected_neurons[num_connect] ) {
+ for ( num_connect = neuron_it->first_con; num_connect < neuron_it->last_con ; num_connect++ ) {
+ if ( bias_neuron == ann->connections[num_connect] ) {
#ifdef FIXEDFANN
- neuron_it->weights[num_connect] = (fann_type)fann_rand(-scale_factor, scale_factor * multiplier);
+ ann->weights[num_connect] = (fann_type)fann_rand(-scale_factor, scale_factor * multiplier);
#else
- neuron_it->weights[num_connect] = (fann_type)fann_rand(-scale_factor, scale_factor);
+ ann->weights[num_connect] = (fann_type)fann_rand(-scale_factor, scale_factor);
#endif
} else {
#ifdef FIXEDFANN
- neuron_it->weights[num_connect] = (fann_type)fann_rand(0, scale_factor * multiplier);
+ ann->weights[num_connect] = (fann_type)fann_rand(0, scale_factor * multiplier);
#else
- neuron_it->weights[num_connect] = (fann_type)fann_rand(0, scale_factor);
+ ann->weights[num_connect] = (fann_type)fann_rand(0, scale_factor);
#endif
}
}
@@ -858,8 +832,10 @@ struct fann * fann_allocate_structure(float learning_rate, unsigned int num_laye
ann->train_error_function = FANN_ERRORFUNC_TANH;
/* variables used for cascade correlation (reasonable defaults) */
- /*ann->change_fraction = 0.01;
- ann->stagnation_epochs = 12;*/
+ ann->cascade_change_fraction = 0.001;
+ ann->cascade_stagnation_epochs = 32;
+ ann->cascade_num_candidates = 8;
+ ann->cascade_candidate_scores = NULL;
/* Variables for use with with Quickprop training (reasonable defaults) */
ann->quickprop_decay = (float)-0.0001;
@@ -913,16 +889,15 @@ void fann_allocate_neurons(struct fann *ann)
unsigned int num_neurons_so_far = 0;
unsigned int num_neurons = 0;
- /* all the neurons is allocated in one long array */
+ /* all the neurons is allocated in one long array (calloc clears mem) */
neurons = (struct fann_neuron *)calloc(ann->total_neurons, sizeof(struct fann_neuron));
+ ann->total_neurons_allocated = ann->total_neurons;
+
if(neurons == NULL){
fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
return;
}
-
- /* clear data, primarily to make the input neurons cleared */
- memset(neurons, 0, ann->total_neurons * sizeof(struct fann_neuron));
-
+
for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
num_neurons = layer_it->last_neuron - layer_it->first_neuron;
layer_it->first_neuron = neurons+num_neurons_so_far;
@@ -942,42 +917,21 @@ void fann_allocate_neurons(struct fann *ann)
*/
void fann_allocate_connections(struct fann *ann)
{
- struct fann_layer *layer_it, *last_layer;
- struct fann_neuron *neuron_it, *last_neuron;
- fann_type *weights;
- struct fann_neuron **connected_neurons = NULL;
- unsigned int connections_so_far = 0;
-
- weights = (fann_type *)calloc(ann->total_connections, sizeof(fann_type));
- if(weights == NULL){
+ ann->weights = (fann_type *)calloc(ann->total_connections, sizeof(fann_type));
+ if(ann->weights == NULL){
fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
return;
}
+ ann->total_connections_allocated = ann->total_connections;
/* TODO make special cases for all places where the connections
is used, so that it is not needed for fully connected networks.
*/
- connected_neurons = (struct fann_neuron **) calloc(ann->total_connections, sizeof(struct fann_neuron*));
- if(connected_neurons == NULL){
+ ann->connections = (struct fann_neuron **) calloc(ann->total_connections_allocated, sizeof(struct fann_neuron*));
+ if(ann->connections == NULL){
fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
return;
}
-
-
- last_layer = ann->last_layer;
- for(layer_it = ann->first_layer+1; layer_it != ann->last_layer; layer_it++){
- last_neuron = layer_it->last_neuron-1;
- for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++){
- neuron_it->weights = weights+connections_so_far;
- neuron_it->connected_neurons = connected_neurons+connections_so_far;
- connections_so_far += neuron_it->num_connections;
- }
- }
-
- if(connections_so_far != ann->total_connections){
- fann_error((struct fann_error *)ann, FANN_E_WRONG_NUM_CONNECTIONS, connections_so_far, ann->total_connections);
- return;
- }
}
/* INTERNAL FUNCTION
diff --git a/src/fann_cascade.c b/src/fann_cascade.c
index c53beae..53f4566 100644
--- a/src/fann_cascade.c
+++ b/src/fann_cascade.c
@@ -18,32 +18,48 @@
*/
#include "fann.h"
+#include "fann_errno.h"
-void fann_cascadetrain_on_data_callback(struct fann *ann, struct fann_train_data *data, float desired_error, int (*callback)(unsigned int epochs, float error), unsigned int max_out_epochs, unsigned int max_neurons, unsigned int neurons_between_reports);
+#ifndef FIXEDFANN
+
+/* #define CASCADE_DEBUG */
+
+void fann_cascadetrain_on_data_callback(struct fann *ann, struct fann_train_data *data, float desired_error, int (*callback)(unsigned int epochs, float error), unsigned int max_out_epochs, unsigned int max_cand_epochs, unsigned int max_neurons, unsigned int neurons_between_reports);
int fann_train_outputs(struct fann *ann, struct fann_train_data *data, float desired_error, unsigned int max_epochs);
float fann_train_outputs_epoch(struct fann *ann, struct fann_train_data *data);
-/* Train directly on the training data.
+int fann_train_candidates(struct fann *ann, struct fann_train_data *data, unsigned int max_epochs);
+
+float fann_train_candidates_epoch(struct fann *ann, struct fann_train_data *data);
+
+void fann_install_candidate(struct fann *ann);
+
+int fann_initialize_candidates(struct fann *ann);
+
+void fann_set_shortcut_connections(struct fann *ann);
+
+/* Cascade training directly on the training data.
+ The connected_neurons pointers are not valid during training,
+ but they will be again after training.
*/
-void fann_cascadetrain_on_data_callback(struct fann *ann, struct fann_train_data *data, float desired_error, int (*callback)(unsigned int epochs, float error), unsigned int max_out_epochs, unsigned int max_neurons, unsigned int neurons_between_reports)
+void fann_cascadetrain_on_data_callback(struct fann *ann, struct fann_train_data *data, float desired_error, int (*callback)(unsigned int epochs, float error), unsigned int max_out_epochs, unsigned int max_cand_epochs, unsigned int max_neurons, unsigned int neurons_between_reports)
{
float error;
unsigned int i;
unsigned int total_epochs = 0;
-
+
if(neurons_between_reports && callback == NULL){
printf("Max neurons %6d. Desired error: %.6f\n", max_neurons, desired_error);
}
for(i = 1; i <= max_neurons; i++){
- /* train */
-
+ /* train output neurons */
total_epochs += fann_train_outputs(ann, data, desired_error, max_out_epochs);
error = fann_get_MSE(ann);
-
+
/* print current error */
if(neurons_between_reports &&
(i % neurons_between_reports == 0
@@ -61,10 +77,27 @@ void fann_cascadetrain_on_data_callback(struct fann *ann, struct fann_train_data
if(error < desired_error){
break;
}
+
+ if(fann_initialize_candidates(ann) == -1){
+ /* Unable to initialize room for candidates */
+ break;
+ }
+
+ /* train new candidates */
+ total_epochs += fann_train_candidates(ann, data, max_cand_epochs);
- /* fann_train_candidate */
- /* fann_install_candidate */
+ /* this installs the best candidate */
+ fann_install_candidate(ann);
}
+
+ /* Train outputs one last time */
+ total_epochs += fann_train_outputs(ann, data, desired_error, max_out_epochs);
+
+ /* Set pointers in connected_neurons
+ This is ONLY done in the end of cascade training,
+ since there is no need for them during training.
+ */
+ fann_set_shortcut_connections(ann);
}
int fann_train_outputs(struct fann *ann, struct fann_train_data *data, float desired_error, unsigned int max_epochs)
@@ -88,6 +121,7 @@ int fann_train_outputs(struct fann *ann, struct fann_train_data *data, float des
error = fann_train_outputs_epoch(ann, data);
if(error < desired_error){
+ printf("Error %f < %f (%f)\n", error, desired_error, fann_get_MSE(ann));
return i+1;
}
@@ -101,9 +135,9 @@ int fann_train_outputs(struct fann *ann, struct fann_train_data *data, float des
{
/*printf("error_improvement=%f, target_improvement=%f, backslide_improvement=%f, stagnation=%d\n", error_improvement, target_improvement, backslide_improvement, stagnation);*/
- target_improvement = error_improvement * (ann->change_fraction + 1);
- backslide_improvement = error_improvement * (ann->change_fraction - 1);
- stagnation = i + ann->stagnation_epochs;
+ target_improvement = error_improvement * (ann->cascade_change_fraction + 1);
+ backslide_improvement = error_improvement * (ann->cascade_change_fraction - 1);
+ stagnation = i + ann->cascade_stagnation_epochs;
}
/* No improvement in allotted period, so quit */
@@ -117,29 +151,467 @@ int fann_train_outputs(struct fann *ann, struct fann_train_data *data, float des
}
float fann_train_outputs_epoch(struct fann *ann, struct fann_train_data *data)
-{
+{
+ return fann_train_epoch_quickprop(ann, data); /* TODO remove this line */
unsigned int i;
fann_reset_MSE(ann);
for(i = 0; i < data->num_data; i++){
- /* TODO this should be real quickprop training and only on the output layer */
- /*fann_train(ann, data->input[i], data->output[i]);*/
+ fann_run(ann, data->input[i]);
+ fann_compute_MSE(ann, data->output[i]);
+ fann_update_slopes_batch(ann, ann->last_layer-1, ann->last_layer-1);
+ }
+ /* TODO this should actually use the algorithm selected by
+ ann->training_algorithm
+ */
+ fann_update_weights_quickprop(ann, data->num_data, ann->last_layer-1, ann->last_layer-1);
+
+ return fann_get_MSE(ann);
+}
+
+int fann_reallocate_connections(struct fann *ann, unsigned int total_connections)
+{
+ /* The connections are allocated, but the pointers inside are
+ first moved in the end of the cascade training session.
+ */
+ ann->connections = (struct fann_neuron **)realloc(ann->connections, total_connections * sizeof(struct fann_neuron *));
+ if(ann->connections == NULL){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ return -1;
+ }
+
+ ann->weights = (fann_type *)realloc(ann->weights, total_connections * sizeof(fann_type));
+ if(ann->weights == NULL){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ return -1;
+ }
+
+ ann->train_slopes = (fann_type *)realloc(ann->train_slopes, total_connections * sizeof(fann_type));
+ if(ann->train_slopes == NULL){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ return -1;
+ }
+ ann->prev_steps = (fann_type *)realloc(ann->prev_steps, total_connections * sizeof(fann_type));
+ if(ann->prev_steps == NULL){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ return -1;
+ }
+
+ ann->prev_train_slopes = (fann_type *)realloc(ann->prev_train_slopes, total_connections * sizeof(fann_type));
+ if(ann->prev_train_slopes == NULL){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ return -1;
+ }
+
+ ann->total_connections_allocated = total_connections;
+
+ return 0;
+}
+
+int fann_reallocate_neurons(struct fann *ann, unsigned int total_neurons)
+{
+ struct fann_layer *layer_it;
+ struct fann_neuron *neurons;
+ unsigned int num_neurons = 0;
+ unsigned int num_neurons_so_far = 0;
+
+ neurons = (struct fann_neuron *)realloc(ann->first_layer->first_neuron, total_neurons * sizeof(struct fann_neuron));
+ ann->total_neurons_allocated = total_neurons;
+
+ if(neurons == NULL){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ return -1;
+ }
+
+ /* Also allocate room for more train_errors */
+ ann->train_errors = realloc(ann->train_errors, total_neurons * sizeof(fann_type));
+ if(ann->train_errors == NULL){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ return -1;
+ }
+
+ if(neurons != ann->first_layer->first_neuron){
+ /* Then the memory has moved, also move the pointers */
+
+#ifdef CASCADE_DEBUG
+ printf("Moving neuron pointers\n");
+#endif
+
+ /* Move pointers from layers to neurons */
+ for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
+ num_neurons = layer_it->last_neuron - layer_it->first_neuron;
+ layer_it->first_neuron = neurons+num_neurons_so_far;
+ layer_it->last_neuron = layer_it->first_neuron+num_neurons;
+ num_neurons_so_far += num_neurons;
+ }
+ }
+
+ return 0;
+}
+
+int fann_initialize_candidates(struct fann *ann)
+{
+ /* The candidates are allocated after the normal neurons and connections,
+ but there is an empty place between the real neurons and the candidate neurons,
+ so that it will be possible to make room when the chosen candidate are copied in
+ on the desired place.
+ */
+ unsigned int neurons_to_allocate, connections_to_allocate;
+ unsigned int num_neurons = ann->total_neurons + ann->cascade_num_candidates + 1;
+ /* the number of connections going into a and out of a candidate is at maximum
+ ann->total_neurons */
+ unsigned int candidate_connections = ann->total_neurons * (ann->cascade_num_candidates + 1);
+ unsigned int num_connections = ann->total_connections + candidate_connections;
+ unsigned int first_candidate_connection = ann->total_connections + ann->total_neurons;
+ unsigned int first_candidate_neuron = ann->total_neurons + 1;
+ unsigned int connection_it, i;
+ struct fann_neuron *neurons;
+
+ /* First make sure that there is enough room, and if not then allocate a
+ bit more so that we do not need to allocate more room each time.
+ */
+ if(num_neurons > ann->total_neurons_allocated){
+ /* Then we need to allocate more neurons
+ Allocate half as many neurons as already exist (at least ten)
+ */
+ neurons_to_allocate = num_neurons + num_neurons/2;
+ if(neurons_to_allocate < num_neurons + 10){
+ neurons_to_allocate = num_neurons + 10;
+ }
+
+ if(fann_reallocate_neurons(ann, neurons_to_allocate) == -1){
+ return -1;
+ }
+ }
+
+ if(num_connections > ann->total_connections_allocated){
+ /* Then we need to allocate more connections
+ Allocate half as many connections as already exist
+ (at least enough for ten neurons)
+ */
+ connections_to_allocate = num_connections + num_connections/2;
+ if(connections_to_allocate < num_connections + ann->total_neurons * 10){
+ connections_to_allocate = num_connections + ann->total_neurons * 10;
+ }
+
+ if(fann_reallocate_connections(ann, connections_to_allocate) == -1){
+ return -1;
+ }
+ }
+
+ /* Set the neurons.
+ */
+ connection_it = first_candidate_connection;
+ neurons = ann->first_layer->first_neuron;
+ for(i = first_candidate_neuron; i < num_neurons; i++){
+ /* TODO candidates should actually be created both in
+ the last layer before the output layer, and in a new layer.
+ */
+ neurons[i].value = 0;
+ neurons[i].first_con = connection_it;
+ connection_it += candidate_connections;
+ neurons[i].last_con = connection_it;
+ ann->train_errors[i] = 0;
+ }
+
+ /* Now randomize the weights and zero out the arrays that needs zeroing out.
+ */
+ printf("random cand weight [%d ... %d]\n", first_candidate_connection, num_connections-1);
+ for(i = first_candidate_connection; i < num_connections; i++){
+ ann->weights[i] = fann_random_weight();
+ ann->train_slopes[i] = 0;
+ ann->prev_steps[i] = 0;
+ ann->prev_train_slopes[i] = 0;
+ }
+
+ return 0;
+}
+
+int fann_train_candidates(struct fann *ann, struct fann_train_data *data, unsigned int max_epochs)
+{
+ float best_cand_score;
+ float target_cand_score = 0.0;
+ float backslide_cand_score = 0.0;
+ unsigned int i;
+ unsigned int stagnation = max_epochs;
+
+ if(ann->cascade_candidate_scores == NULL){
+ ann->cascade_candidate_scores = (fann_type *)malloc(ann->cascade_num_candidates * sizeof(fann_type));
+ if(ann->cascade_candidate_scores == NULL){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ return 0;
+ }
+ }
+
+ /* TODO remove, just sets to first candidate neuron and returns.
+ */
+ ann->cascade_best_candidate = ann->total_neurons+1;
+ return 0;
+
+ for(i = 0; i < max_epochs; i++){
+ best_cand_score = fann_train_candidates_epoch(ann, data);
+
+ if ((best_cand_score > target_cand_score) ||
+ (best_cand_score < backslide_cand_score))
+ {
+ /*printf("best_cand_score=%f, target_cand_score=%f, backslide_cand_score=%f, stagnation=%d\n", best_cand_score, target_cand_score, backslide_cand_score, stagnation);*/
+
+ target_cand_score = best_cand_score * (ann->cascade_change_fraction + 1);
+ backslide_cand_score = best_cand_score * (ann->cascade_change_fraction - 1);
+ stagnation = i + ann->cascade_stagnation_epochs;
+ }
+
+ /* No improvement in allotted period, so quit */
+ if (i >= stagnation)
+ {
+ return i+1;
+ }
+ }
+
+ return max_epochs;
+}
+
+void fann_update_candidate_slopes(struct fann *ann)
+{
+ struct fann_neuron * neurons = ann->first_layer->first_neuron;
+ struct fann_neuron * first_cand = neurons + ann->total_neurons + 1;
+ struct fann_neuron * last_cand = first_cand + ann->cascade_num_candidates;
+ struct fann_neuron * neuron_it;
+ unsigned int i, num_connections;
+ fann_type neuron_value, activation, derived;
+ fann_type *weights;
+
+ for(neuron_it = first_cand; neuron_it < last_cand; neuron_it++){
+
+ /* code more or less stolen from fann_run to fast forward pass
+ */
+
+ neuron_value = 0.0;
+ num_connections = neuron_it->last_con - neuron_it->first_con;
+ weights = ann->weights + neuron_it->first_con;
+
+ i = num_connections & 3; /* same as modulo 4 */
+ switch(i) {
+ case 3:
+ neuron_value += weights[2] * neurons[2].value;
+ case 2:
+ neuron_value += weights[1] * neurons[1].value;
+ case 1:
+ neuron_value += weights[0] * neurons[0].value;
+ case 0:
+ break;
+ }
+
+ for(;i != num_connections; i += 4){
+ neuron_value +=
+ weights[i] * neurons[i].value +
+ weights[i+1] * neurons[i+1].value +
+ weights[i+2] * neurons[i+2].value +
+ weights[i+3] * neurons[i+3].value;
+ }
+ }
+
+ activation = fann_activation(ann, 0, neuron_value);
+ derived = fann_activation_derived(ann->activation_function_hidden,
+ ann->activation_steepness_hidden, activation);
+
+ /* BIG TODO add more here do stuff for the output */
+
+}
+
+float fann_train_candidates_epoch(struct fann *ann, struct fann_train_data *data)
+{
+ /* TODO this should actually train the candidates, but first I will need to decide how the candidates should be allocated */
+
+ unsigned int i;
+ float MSE = fann_get_MSE(ann);
+
+ unsigned int num_cand = ann->cascade_num_candidates;
+ for(i = 0; i < num_cand; i++){
+ ann->cascade_candidate_scores[i] = (fann_type)MSE;
+ }
+
+ fann_reset_MSE(ann);
+
+ for(i = 0; i < data->num_data; i++){
fann_run(ann, data->input[i]);
fann_compute_MSE(ann, data->output[i]);
- fann_backpropagate_MSE(ann);
- /*fann_update_weights(ann);*/
- fann_update_slopes_batch(ann);
+ fann_update_candidate_slopes(ann);
}
- fann_update_weights_quickprop(ann, data->num_data);
- /*fann_update_weights_batch(ann, data->num_data);*/
- /*fann_update_output_weights(ann);*/
+ /* fann_update_candidate_weights */
+
+ return fann_get_MSE(ann); /* TODO return the score of the best candidate */
+}
+
+/* add a layer ad the position pointed to by *layer */
+struct fann_layer *fann_add_layer(struct fann *ann, struct fann_layer *layer)
+{
+ int layer_pos = layer - ann->first_layer;
+ int num_layers = ann->last_layer - ann->first_layer + 1;
+ int i;
- return fann_get_MSE(ann);
+ /* allocate the layer */
+ struct fann_layer *layers = realloc(ann->first_layer, num_layers * sizeof(struct fann_layer));
+ if(layers == NULL){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ return NULL;
+ }
+
+ /* copy layers so that the free space is at the right location */
+ for(i = num_layers-1; i >= layer_pos; i--){
+ layers[i] = layers[i-1];
+ }
+
+ /* the newly allocated layer is empty */
+ layers[layer_pos].first_neuron = layers[layer_pos+1].first_neuron;
+ layers[layer_pos].last_neuron = layers[layer_pos+1].first_neuron;
+
+ /* Set the ann pointers correctly */
+ ann->first_layer = layers;
+ ann->last_layer = layers + num_layers;
+
+#ifdef CASCADE_DEBUG
+ printf("add layer at pos %d\n", layer_pos);
+#endif
+
+ return layers + layer_pos;
}
-void fann_update_output_weights(struct fann *ann)
+void fann_set_shortcut_connections(struct fann *ann)
{
- printf("fann_update_output_weights not implemented\n");
+ struct fann_layer *layer_it;
+ struct fann_neuron *neuron_it, **neuron_pointers, *neurons;
+ unsigned int num_connections = 0, i;
+ neuron_pointers = ann->connections;
+ neurons = ann->first_layer->first_neuron;
+
+ for(layer_it = ann->first_layer+1; layer_it != ann->last_layer; layer_it++){
+ for(neuron_it = layer_it->first_neuron;
+ neuron_it != layer_it->last_neuron; neuron_it++){
+
+ neuron_pointers += num_connections;
+ num_connections = neuron_it->last_con - neuron_it->first_con;
+
+ for(i = 0; i != num_connections; i++){
+ neuron_pointers[i] = neurons + i;
+ }
+ }
+ }
}
+
+void fann_add_shortcut_neuron(struct fann *ann, struct fann_layer *layer)
+{
+ unsigned int num_connections_in = layer->first_neuron - ann->first_layer->first_neuron;
+ unsigned int num_connections_out = (ann->last_layer-1)->last_neuron - (layer+1)->first_neuron;
+ unsigned int num_connections_move = num_connections_out + num_connections_in;
+ unsigned int neurons_to_allocate = 0;
+ unsigned int connections_to_allocate = 0;
+ int i, candidate_con;
+
+ struct fann_layer *layer_it;
+ struct fann_neuron *neuron_it, *neuron_place;
+
+ /* We know that there is enough room for the new neuron
+ (the candidates are in the same arrays), so move
+ the last neurons to make room for this neuron.
+ */
+
+ /* first move the pointers to neurons in the layer structs */
+ for(layer_it = ann->last_layer-1; layer_it != layer; layer_it--){
+#ifdef CASCADE_DEBUG
+ printf("move neuron pointers in layer %d, first(%d -> %d), last(%d -> %d)\n",
+ layer_it - ann->first_layer,
+ layer_it->first_neuron - ann->first_layer->first_neuron,
+ layer_it->first_neuron - ann->first_layer->first_neuron + 1,
+ layer_it->last_neuron - ann->first_layer->first_neuron,
+ layer_it->last_neuron - ann->first_layer->first_neuron + 1);
+#endif
+ layer_it->first_neuron++;
+ layer_it->last_neuron++;
+ }
+
+ /* also move the last neuron in the layer that needs the neuron added */
+ layer->last_neuron++;
+
+ /* this is the place that should hold the new neuron */
+ neuron_place = layer->last_neuron-1;
+
+ printf("num_connections_in=%d, num_connections_out=%d, neurons_to_allocate=%d, connections_to_allocate=%d\n", num_connections_in, num_connections_out, neurons_to_allocate, connections_to_allocate);
+
+ /* move the actual neurons and the indexes to the connection arrays */
+ for(neuron_it = (ann->last_layer-1)->last_neuron-1;
+ neuron_it != neuron_place; neuron_it--){
+#ifdef CASCADE_DEBUG
+ printf("move neuron %d -> %d\n", neuron_it - ann->first_layer->first_neuron -1,
+ neuron_it - ann->first_layer->first_neuron);
+#endif
+ *neuron_it = *(neuron_it-1);
+
+#ifdef CASCADE_DEBUG
+ printf("move connection first(%d -> %d), last(%d -> %d)\n", neuron_it->first_con, neuron_it->first_con + num_connections_move-1, neuron_it->last_con, neuron_it->last_con + num_connections_move);
+#endif
+
+ /* move the weights */
+ printf("move weight[%d ... %d] -> weight[%d ... %d]\n", neuron_it->first_con, neuron_it->last_con-1, neuron_it->first_con + num_connections_move - 1, neuron_it->last_con + num_connections_move - 2);
+ for(i = neuron_it->last_con - 1; i >= (int)neuron_it->first_con; i--){
+#ifdef CASCADE_DEBUG
+ printf("move weight[%d] = weight[%d]\n", i + num_connections_move - 1, i);
+#endif
+ ann->weights[i + num_connections_move - 1] = ann->weights[i];
+ }
+
+ /* move the indexes to weights */
+ neuron_it->last_con += num_connections_move;
+ num_connections_move--;
+ neuron_it->first_con += num_connections_move;
+
+ /* set the new weight to the newly allocated neuron */
+ printf("random weight[%d]\n", neuron_it->last_con-1);
+#ifdef CASCADE_DEBUG
+ printf("random weight[%d]\n", neuron_it->last_con-1);
+#endif
+ /* TODO this should be the weights into the candidate
+ neuron, don't really know how to get this.
+ */
+ ann->weights[neuron_it->last_con-1] = (fann_type)fann_random_weight();
+ }
+
+ /* Now inititalize the actual neuron */
+ neuron_place->value = 0;
+ neuron_place->last_con = (neuron_place+1)->first_con;
+ neuron_place->first_con = neuron_place->last_con - num_connections_in;
+#ifdef CASCADE_DEBUG
+ printf("neuron[%d] = (%d - %d)\n", neuron_place - ann->first_layer->first_neuron, neuron_place->first_con, neuron_place->last_con);
+#endif
+
+ candidate_con = ann->first_layer->first_neuron[ann->cascade_best_candidate].first_con;
+ /* initialize the input weights at random */
+ printf("move cand weights[%d ... %d] -> [%d ... %d]\n", candidate_con, candidate_con + num_connections_in-1, neuron_place->first_con, neuron_place->last_con-1);
+
+ for(i = 0; i < num_connections_in; i++){
+ ann->weights[i + neuron_place->first_con] = ann->weights[i + candidate_con];
+#ifdef CASCADE_DEBUG
+ printf("move weights[%d] -> weights[%d] (%f)\n", i + candidate_con, i + neuron_place->first_con, ann->weights[i + neuron_place->first_con]);
+#endif
+ }
+
+ /* Change some of main variables */
+ ann->total_neurons++;
+ ann->total_connections += num_connections_in + num_connections_out;
+
+ return;
+}
+
+void fann_install_candidate(struct fann *ann)
+{
+ struct fann_layer *layer;
+ layer = fann_add_layer(ann, ann->last_layer-1);
+ fann_add_shortcut_neuron(ann, layer);
+ return;
+}
+
+
+
+#endif /* FIXEDFANN */
diff --git a/src/fann_io.c b/src/fann_io.c
index 00abc52..062a862 100644
--- a/src/fann_io.c
+++ b/src/fann_io.c
@@ -112,8 +112,8 @@ int fann_save_internal_fd(struct fann *ann, FILE *conf, const char *configuratio
for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++){
/* look at all connections to each neurons, and see how high a value we can get */
current_max_value = 0;
- for(i = 0; i != neuron_it->num_connections; i++){
- current_max_value += fann_abs(neuron_it->weights[i]);
+ for(i = neuron_it->first_con; i != neuron_it->last_con; i++){
+ current_max_value += fann_abs(ann->weights[i]);
}
if(current_max_value > max_possible_value){
@@ -173,13 +173,13 @@ int fann_save_internal_fd(struct fann *ann, FILE *conf, const char *configuratio
for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
/* the number of connections to each neuron */
for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++){
- fprintf(conf, "%u ", neuron_it->num_connections);
+ fprintf(conf, "%u ", neuron_it->last_con - neuron_it->first_con);
}
fprintf(conf, "\n");
}
- connected_neurons = (ann->first_layer+1)->first_neuron->connected_neurons;
- weights = (ann->first_layer+1)->first_neuron->weights;
+ connected_neurons = ann->connections;
+ weights = ann->weights;
first_neuron = ann->first_layer->first_neuron;
/* Now save all the connections.
@@ -288,7 +288,7 @@ void fann_save_train_internal_fd(struct fann_train_data* data, FILE *file, char
*/
struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
{
- unsigned int num_layers, layer_size, activation_function_hidden, activation_function_output, input_neuron, i, shortcut_connections;
+ unsigned int num_layers, layer_size, activation_function_hidden, activation_function_output, input_neuron, i, shortcut_connections, num_connections;
#ifdef FIXEDFANN
unsigned int decimal_point, multiplier;
#endif
@@ -368,12 +368,20 @@ struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
layer_it->last_neuron = layer_it->first_neuron + layer_size;
ann->total_neurons += layer_size;
#ifdef DEBUG
- printf(" layer : %d neurons, 1 bias\n", layer_size);
+ if(ann->shortcut_connections && layer_it != ann->first_layer){
+ printf(" layer : %d neurons, 0 bias\n", layer_size);
+ } else {
+ printf(" layer : %d neurons, 1 bias\n", layer_size-1);
+ }
#endif
}
ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1;
- ann->num_output = ((ann->last_layer-1)->last_neuron - (ann->last_layer-1)->first_neuron) - 1;
+ ann->num_output = ((ann->last_layer-1)->last_neuron - (ann->last_layer-1)->first_neuron);
+ if(!ann->shortcut_connections){
+ /* one too many (bias) in the output layer */
+ ann->num_output--;
+ }
/* allocate room for the actual neurons */
fann_allocate_neurons(ann);
@@ -385,12 +393,14 @@ struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
last_neuron = (ann->last_layer-1)->last_neuron;
for(neuron_it = ann->first_layer->first_neuron;
neuron_it != last_neuron; neuron_it++){
- if(fscanf(conf, "%u ", &neuron_it->num_connections) != 1){
+ if(fscanf(conf, "%u ", &num_connections) != 1){
fann_error((struct fann_error *)ann, FANN_E_CANT_READ_NEURON, configuration_file);
fann_destroy(ann);
return NULL;
}
- ann->total_connections += neuron_it->num_connections;
+ neuron_it->first_con = ann->total_connections;
+ ann->total_connections += num_connections;
+ neuron_it->last_con = ann->total_connections;
}
fann_allocate_connections(ann);
@@ -399,8 +409,8 @@ struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
return NULL;
}
- connected_neurons = (ann->first_layer+1)->first_neuron->connected_neurons;
- weights = (ann->first_layer+1)->first_neuron->weights;
+ connected_neurons = ann->connections;
+ weights = ann->weights;
first_neuron = ann->first_layer->first_neuron;
for(i = 0; i < ann->total_connections; i++){
diff --git a/src/fann_options.c b/src/fann_options.c
index 341a7e4..25df6a2 100644
--- a/src/fann_options.c
+++ b/src/fann_options.c
@@ -33,14 +33,19 @@ FANN_EXTERNAL void FANN_API fann_print_parameters(struct fann *ann)
printf("Input layer : %2d neurons, 1 bias\n", ann->num_input);
for(layer_it = ann->first_layer+1; layer_it != ann->last_layer-1; layer_it++){
- printf(" Hidden layer : %2d neurons, 1 bias\n",
- layer_it->last_neuron - layer_it->first_neuron - 1);
+ if(ann->shortcut_connections){
+ printf(" Hidden layer : %2d neurons, 0 bias\n",
+ layer_it->last_neuron - layer_it->first_neuron);
+ } else {
+ printf(" Hidden layer : %2d neurons, 1 bias\n",
+ layer_it->last_neuron - layer_it->first_neuron - 1);
+ }
}
printf("Output layer : %2d neurons\n", ann->num_output);
printf("Total neurons and biases : %2d\n", fann_get_total_neurons(ann));
printf("Total connections : %2d\n", ann->total_connections);
printf("Connection rate : %5.2f\n", ann->connection_rate);
- printf("Shortcut connections : %2d\n", ann->shortcut_connections);
+ printf("Shortcut connections : %2d\n", ann->shortcut_connections);
printf("Training algorithm : %s\n", FANN_TRAIN_NAMES[ann->training_algorithm]);
printf("Learning rate : %5.2f\n", ann->learning_rate);
printf("Activation function hidden : %s\n", FANN_ACTIVATION_NAMES[ann->activation_function_hidden]);
@@ -159,8 +164,12 @@ FANN_EXTERNAL fann_type FANN_API fann_get_activation_steepness_output(struct fan
FANN_EXTERNAL unsigned int FANN_API fann_get_total_neurons(struct fann *ann)
{
- /* -1, because there is always an unused bias neuron in the last layer */
- return ann->total_neurons - 1;
+ if(ann->shortcut_connections){
+ return ann->total_neurons;
+ } else {
+ /* -1, because there is always an unused bias neuron in the last layer */
+ return ann->total_neurons - 1;
+ }
}
FANN_EXTERNAL unsigned int FANN_API fann_get_total_connections(struct fann *ann)
@@ -168,17 +177,6 @@ FANN_EXTERNAL unsigned int FANN_API fann_get_total_connections(struct fann *ann)
return ann->total_connections;
}
-fann_type * fann_get_weights(struct fann *ann)
-{
- return (ann->first_layer+1)->first_neuron->weights;
-}
-
-struct fann_neuron** fann_get_connections(struct fann *ann)
-{
- return (ann->first_layer+1)->first_neuron->connected_neurons;
-}
-
-
/* When using this, training is usually faster. (default ).
Makes the error used for calculating the slopes
higher when the difference is higher.
diff --git a/src/fann_train.c b/src/fann_train.c
index 8fd6100..d17c6a0 100644
--- a/src/fann_train.c
+++ b/src/fann_train.c
@@ -33,7 +33,7 @@
Calculates the derived of a value, given an activation function
and a steepness
*/
-static fann_type fann_activation_derived(unsigned int activation_function,
+fann_type fann_activation_derived(unsigned int activation_function,
fann_type steepness, fann_type value)
{
switch(activation_function){
@@ -52,6 +52,80 @@ static fann_type fann_activation_derived(unsigned int activation_function,
}
}
+/* INTERNAL FUNCTION
+ Calculates the activation of a value, given an activation function
+ and a steepness
+*/
+fann_type fann_activation(struct fann *ann, unsigned int is_output_layer,
+ fann_type value)
+{
+ /* values used for the stepwise linear sigmoid function */
+ fann_type rh1 = 0, rh2 = 0, rh3 = 0, rh4 = 0, rh5 = 0, rh6 = 0;
+ fann_type h1 = 0, h2 = 0, h3 = 0, h4 = 0, h5 = 0, h6 = 0;
+ fann_type low = 0;
+
+ fann_type steepness = (is_output_layer) ?
+ ann->activation_steepness_output : ann->activation_steepness_hidden;
+
+ unsigned int activation_function = (is_output_layer) ?
+ ann->activation_function_output : ann->activation_function_hidden;
+
+ switch(activation_function){
+ case FANN_LINEAR:
+ return (fann_type)fann_linear(steepness, value);
+ case FANN_SIGMOID:
+ return (fann_type)fann_sigmoid(steepness, value);
+ case FANN_SIGMOID_SYMMETRIC:
+ return (fann_type)fann_sigmoid_symmetric(steepness, value);
+ case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+ low = -1;
+ /* fallthrough */
+ case FANN_SIGMOID_STEPWISE:
+ if(is_output_layer){
+ /* the output results */
+ rh1 = ann->activation_results_output[0];
+ rh2 = ann->activation_results_output[1];
+ rh3 = ann->activation_results_output[2];
+ rh4 = ann->activation_results_output[3];
+ rh5 = ann->activation_results_output[4];
+ rh6 = ann->activation_results_output[5];
+
+ /* the output parameters */
+ h1 = ann->activation_values_output[0];
+ h2 = ann->activation_values_output[1];
+ h3 = ann->activation_values_output[2];
+ h4 = ann->activation_values_output[3];
+ h5 = ann->activation_values_output[4];
+ h6 = ann->activation_values_output[5];
+ }else{
+ /* the hidden results */
+ rh1 = ann->activation_results_hidden[0];
+ rh2 = ann->activation_results_hidden[1];
+ rh3 = ann->activation_results_hidden[2];
+ rh4 = ann->activation_results_hidden[3];
+ rh5 = ann->activation_results_hidden[4];
+ rh6 = ann->activation_results_hidden[5];
+
+ /* the hidden parameters */
+ h1 = ann->activation_values_hidden[0];
+ h2 = ann->activation_values_hidden[1];
+ h3 = ann->activation_values_hidden[2];
+ h4 = ann->activation_values_hidden[3];
+ h5 = ann->activation_values_hidden[4];
+ h6 = ann->activation_values_hidden[5];
+ }
+
+ return (fann_type)fann_stepwise(h1, h2, h3, h4, h5, h6, rh1, rh2, rh3, rh4, rh5, rh6, low, 1, value);
+ case FANN_THRESHOLD:
+ return (fann_type)((value < 0) ? 0 : 1);
+ case FANN_THRESHOLD_SYMMETRIC:
+ return (fann_type)((value < 0) ? -1 : 1);
+ default:
+ fann_error((struct fann_error *)ann, FANN_E_CANT_USE_ACTIVATION);
+ return 0;
+ }
+}
+
/* Trains the network with the backpropagation algorithm.
*/
FANN_EXTERNAL void FANN_API fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
@@ -160,10 +234,10 @@ void fann_compute_MSE(struct fann *ann, fann_type *desired_output)
fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
return;
}
+ } else {
+ /* clear the error variabels */
+ memset(ann->train_errors, 0, (ann->total_neurons) * sizeof(fann_type));
}
-
- /* clear the error variabels */
- memset(ann->train_errors, 0, (ann->total_neurons) * sizeof(fann_type));
error_begin = ann->train_errors;
#ifdef DEBUGTRAIN
@@ -214,9 +288,11 @@ void fann_backpropagate_MSE(struct fann *ann)
unsigned int i;
struct fann_layer *layer_it;
struct fann_neuron *neuron_it, *last_neuron;
+ struct fann_neuron **connections;
fann_type *error_begin = ann->train_errors;
fann_type *error_prev_layer;
+ fann_type *weights;
const fann_type activation_steepness_hidden = ann->activation_steepness_hidden;
const struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
const struct fann_layer *second_layer = ann->first_layer + 1;
@@ -228,16 +304,23 @@ void fann_backpropagate_MSE(struct fann *ann)
last_neuron = layer_it->last_neuron;
/* for each connection in this layer, propagate the error backwards*/
- if(ann->connection_rate >= 1 && !ann->shortcut_connections){
- /* optimization for fully connected networks */
- /* but not shortcut connected networks */
- error_prev_layer = error_begin + ((layer_it-1)->first_neuron - first_neuron);
+ if(ann->connection_rate >= 1){
+ if(!ann->shortcut_connections){
+ error_prev_layer = error_begin + ((layer_it-1)->first_neuron - first_neuron);
+ }else{
+ error_prev_layer = error_begin;
+ }
+
for(neuron_it = layer_it->first_neuron;
neuron_it != last_neuron; neuron_it++){
tmp_error = error_begin[neuron_it - first_neuron];
- for(i = neuron_it->num_connections ; i-- ; ){
- error_prev_layer[i] += tmp_error * neuron_it->weights[i];
+ weights = ann->weights + neuron_it->first_con;
+ for(i = neuron_it->last_con - neuron_it->first_con; i-- ; ){
+ /*printf("i = %d\n", i);
+ printf("error_prev_layer[%d] = %f\n", i, error_prev_layer[i]);
+ printf("weights[%d] = %f\n", i, weights[i]);*/
+ error_prev_layer[i] += tmp_error * weights[i];
}
}
}else{
@@ -245,8 +328,10 @@ void fann_backpropagate_MSE(struct fann *ann)
neuron_it != last_neuron; neuron_it++){
tmp_error = error_begin[neuron_it - first_neuron];
- for(i = neuron_it->num_connections ; i-- ; ){
- error_begin[neuron_it->connected_neurons[i] - first_neuron] += tmp_error * neuron_it->weights[i];
+ weights = ann->weights + neuron_it->first_con;
+ connections = ann->connections + neuron_it->first_con;
+ for(i = neuron_it->last_con - neuron_it->first_con; i-- ; ){
+ error_begin[connections[i] - first_neuron] += tmp_error * weights[i];
}
}
}
@@ -296,44 +381,51 @@ void fann_backpropagate_MSE(struct fann *ann)
*/
void fann_update_weights(struct fann *ann)
{
- struct fann_neuron *neuron_it, *last_neuron, *prev_neurons;
- fann_type tmp_error;
+ struct fann_neuron *neuron_it, *last_neuron, *prev_neurons, **connections;
+ fann_type tmp_error, *weights;
struct fann_layer *layer_it;
unsigned int i;
+ unsigned int num_connections;
/* store some variabels local for fast access */
const float learning_rate = ann->learning_rate;
- const struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
+ struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
struct fann_layer *first_layer = ann->first_layer;
const struct fann_layer *last_layer = ann->last_layer;
- fann_type *error_begin = ann->train_errors;
+ fann_type *error_begin = ann->train_errors;
#ifdef DEBUGTRAIN
printf("\nupdate weights\n");
#endif
-
+
+ prev_neurons = first_neuron;
for(layer_it = (first_layer+1); layer_it != last_layer; layer_it++){
#ifdef DEBUGTRAIN
printf("layer[%d]\n", layer_it - first_layer);
#endif
last_neuron = layer_it->last_neuron;
- if(ann->connection_rate >= 1 && !ann->shortcut_connections){
- /* optimization for fully connected networks */
- /* but not shortcut connected networks */
- prev_neurons = (layer_it-1)->first_neuron;
+ if(ann->connection_rate >= 1){
+ if(!ann->shortcut_connections){
+ prev_neurons = (layer_it-1)->first_neuron;
+ }
for(neuron_it = layer_it->first_neuron;
neuron_it != last_neuron; neuron_it++){
tmp_error = error_begin[neuron_it - first_neuron] * learning_rate;
- for(i = neuron_it->num_connections ; i-- ; ){
- neuron_it->weights[i] += tmp_error * prev_neurons[i].value;
+ num_connections = neuron_it->last_con - neuron_it->first_con;
+ weights = ann->weights + neuron_it->first_con;
+ for(i = 0; i != num_connections; i++){
+ weights[i] += tmp_error * prev_neurons[i].value;
}
}
}else{
for(neuron_it = layer_it->first_neuron;
neuron_it != last_neuron; neuron_it++){
tmp_error = error_begin[neuron_it - first_neuron] * learning_rate;
- for(i = neuron_it->num_connections ; i-- ; ){
- neuron_it->weights[i] += tmp_error * neuron_it->connected_neurons[i]->value;
+ num_connections = neuron_it->last_con - neuron_it->first_con;
+ weights = ann->weights + neuron_it->first_con;
+ connections = ann->connections + neuron_it->first_con;
+ for(i = 0; i != num_connections; i++){
+ weights[i] += tmp_error * connections[i]->value;
}
}
}
@@ -342,62 +434,75 @@ void fann_update_weights(struct fann *ann)
/* INTERNAL FUNCTION
Update slopes for batch training
+ layer_begin = ann->first_layer+1 and layer_end = ann->last_layer-1
+ will update all slopes.
+
*/
-void fann_update_slopes_batch(struct fann *ann)
+void fann_update_slopes_batch(struct fann *ann, struct fann_layer *layer_begin, struct fann_layer *layer_end)
{
- struct fann_neuron *neuron_it, *last_neuron, *prev_neurons;
+ struct fann_neuron *neuron_it, *last_neuron, *prev_neurons, **connections;
fann_type tmp_error, *weights_begin;
- struct fann_layer *layer_it;
- unsigned int i;
+ unsigned int i, num_connections;
/* store some variabels local for fast access */
- const struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
- struct fann_layer *first_layer = ann->first_layer;
- const struct fann_layer *last_layer = ann->last_layer;
+ struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
fann_type *error_begin = ann->train_errors;
fann_type *slope_begin, *neuron_slope;
/* if no room allocated for the slope variabels, allocate it now */
if(ann->train_slopes == NULL){
- ann->train_slopes = (fann_type *)calloc(ann->total_connections, sizeof(fann_type));
+ ann->train_slopes = (fann_type *)calloc(ann->total_connections_allocated, sizeof(fann_type));
if(ann->train_slopes == NULL){
fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
return;
}
- memset(ann->train_slopes, 0, (ann->total_connections) * sizeof(fann_type));
}
+ if(layer_begin == NULL){
+ layer_begin = ann->first_layer+1;
+ }
+
+ if(layer_end == NULL){
+ layer_end = ann->last_layer-1;
+ }
+
slope_begin = ann->train_slopes;
- weights_begin = fann_get_weights(ann);
+ weights_begin = ann->weights;
#ifdef DEBUGTRAIN
printf("\nupdate slopes\n");
#endif
+
+ prev_neurons = first_neuron;
- for(layer_it = (first_layer+1); layer_it != last_layer; layer_it++){
+ for(; layer_begin <= layer_end; layer_begin++){
#ifdef DEBUGTRAIN
- printf("layer[%d]\n", layer_it - first_layer);
+ printf("layer[%d]\n", layer_begin - ann->first_layer);
#endif
- last_neuron = layer_it->last_neuron;
- if(ann->connection_rate >= 1 && !ann->shortcut_connections){
- /* optimization for fully connected networks */
- /* but not shortcut connected networks */
- prev_neurons = (layer_it-1)->first_neuron;
- for(neuron_it = layer_it->first_neuron;
+ last_neuron = layer_begin->last_neuron;
+ if(ann->connection_rate >= 1){
+ if(!ann->shortcut_connections){
+ prev_neurons = (layer_begin-1)->first_neuron;
+ }
+
+ for(neuron_it = layer_begin->first_neuron;
neuron_it != last_neuron; neuron_it++){
tmp_error = error_begin[neuron_it - first_neuron];
- neuron_slope = slope_begin + (neuron_it->weights - weights_begin);
- for(i = neuron_it->num_connections ; i-- ; ){
+ neuron_slope = slope_begin + neuron_it->first_con;
+ num_connections = neuron_it->last_con - neuron_it->first_con;
+ for(i = 0; i != num_connections; i++){
neuron_slope[i] += tmp_error * prev_neurons[i].value;
}
}
}else{
- for(neuron_it = layer_it->first_neuron;
+ for(neuron_it = layer_begin->first_neuron;
neuron_it != last_neuron; neuron_it++){
tmp_error = error_begin[neuron_it - first_neuron];
- neuron_slope = slope_begin + (neuron_it->weights - weights_begin);
- for(i = neuron_it->num_connections ; i-- ; ){
- neuron_slope[i] += tmp_error * neuron_it->connected_neurons[i]->value;
+ neuron_slope = slope_begin + neuron_it->first_con;
+ num_connections = neuron_it->last_con - neuron_it->first_con;
+ connections = ann->connections + neuron_it->first_con;
+ for(i = 0; i != num_connections; i++){
+ neuron_slope[i] += tmp_error * connections[i]->value;
}
}
}
@@ -412,34 +517,38 @@ void fann_clear_train_arrays(struct fann *ann)
{
unsigned int i;
- /* if no room allocated for the slope variabels, allocate it now */
+ /* if no room allocated for the slope variabels, allocate it now
+ (calloc clears mem) */
if(ann->train_slopes == NULL){
- ann->train_slopes = (fann_type *)calloc(ann->total_connections, sizeof(fann_type));
+ ann->train_slopes = (fann_type *)calloc(ann->total_connections_allocated, sizeof(fann_type));
if(ann->train_slopes == NULL){
fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
return;
}
+ } else {
+ memset(ann->train_slopes, 0, (ann->total_connections) * sizeof(fann_type));
}
- memset(ann->train_slopes, 0, (ann->total_connections) * sizeof(fann_type));
+
/* if no room allocated for the variabels, allocate it now */
if(ann->prev_steps == NULL){
- ann->prev_steps = (fann_type *)calloc(ann->total_connections, sizeof(fann_type));
+ ann->prev_steps = (fann_type *)calloc(ann->total_connections_allocated, sizeof(fann_type));
if(ann->prev_steps == NULL){
fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
return;
}
+ } else {
+ memset(ann->prev_steps, 0, (ann->total_connections) * sizeof(fann_type));
}
- memset(ann->prev_steps, 0, (ann->total_connections) * sizeof(fann_type));
/* if no room allocated for the variabels, allocate it now */
if(ann->prev_train_slopes == NULL){
- ann->prev_train_slopes = (fann_type *)calloc(ann->total_connections, sizeof(fann_type));
+ ann->prev_train_slopes = (fann_type *)malloc(ann->total_connections_allocated * sizeof(fann_type));
if(ann->prev_train_slopes == NULL){
fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
return;
}
- }
-
+ }
+
if(ann->training_algorithm == FANN_TRAIN_RPROP){
for(i = 0; i < ann->total_connections; i++){
ann->prev_train_slopes[i] = (fann_type)0.0125;
@@ -452,13 +561,25 @@ void fann_clear_train_arrays(struct fann *ann)
/* INTERNAL FUNCTION
Update weights for batch training
*/
-void fann_update_weights_batch(struct fann *ann, unsigned int num_data)
+void fann_update_weights_batch(struct fann *ann, unsigned int num_data, struct fann_layer *layer_begin, struct fann_layer *layer_end)
{
fann_type *train_slopes = ann->train_slopes;
- fann_type *weights = fann_get_weights(ann);
+ fann_type *weights = ann->weights;
const float epsilon = ann->learning_rate/num_data;
- unsigned int i = ann->total_connections;
- while(i--){
+ unsigned int i, past_end;
+
+ if(layer_begin == NULL){
+ layer_begin = ann->first_layer+1;
+ }
+
+ if(layer_end == NULL){
+ layer_end = ann->last_layer-1;
+ }
+
+ i = layer_begin->first_neuron->first_con;
+ past_end = (layer_end->last_neuron - 1)->last_con;
+
+ for(;i != past_end; i++){
weights[i] += train_slopes[i] * epsilon;
train_slopes[i] = 0.0;
}
@@ -467,10 +588,10 @@ void fann_update_weights_batch(struct fann *ann, unsigned int num_data)
/* INTERNAL FUNCTION
The quickprop training algorithm
*/
-void fann_update_weights_quickprop(struct fann *ann, unsigned int num_data)
+void fann_update_weights_quickprop(struct fann *ann, unsigned int num_data, struct fann_layer *layer_begin, struct fann_layer *layer_end)
{
fann_type *train_slopes = ann->train_slopes;
- fann_type *weights = fann_get_weights(ann);
+ fann_type *weights = ann->weights;
fann_type *prev_steps = ann->prev_steps;
fann_type *prev_train_slopes = ann->prev_train_slopes;
@@ -481,8 +602,20 @@ void fann_update_weights_quickprop(struct fann *ann, unsigned int num_data)
float mu = ann->quickprop_mu; /*1.75;*/
float shrink_factor = (float)(mu / (1.0 + mu));
- unsigned int i = ann->total_connections;
- while(i--){
+ unsigned int i, past_end;
+
+ if(layer_begin == NULL){
+ layer_begin = ann->first_layer+1;
+ }
+
+ if(layer_end == NULL){
+ layer_end = ann->last_layer-1;
+ }
+
+ i = layer_begin->first_neuron->first_con;
+ past_end = (layer_end->last_neuron - 1)->last_con;
+
+ for(;i != past_end; i++){
w = weights[i];
prev_step = prev_steps[i];
slope = train_slopes[i] + decay * w;
@@ -490,7 +623,6 @@ void fann_update_weights_quickprop(struct fann *ann, unsigned int num_data)
next_step = 0.0;
/* The step must always be in direction opposite to the slope. */
-
if(prev_step > 0.001) {
/* If last step was positive... */
if(slope > 0.0) {
@@ -534,10 +666,10 @@ void fann_update_weights_quickprop(struct fann *ann, unsigned int num_data)
/* INTERNAL FUNCTION
The iRprop- algorithm
*/
-void fann_update_weights_irpropm(struct fann *ann, unsigned int num_data)
+void fann_update_weights_irpropm(struct fann *ann, unsigned int num_data, struct fann_layer *layer_begin, struct fann_layer *layer_end)
{
fann_type *train_slopes = ann->train_slopes;
- fann_type *weights = fann_get_weights(ann);
+ fann_type *weights = ann->weights;
fann_type *prev_steps = ann->prev_steps;
fann_type *prev_train_slopes = ann->prev_train_slopes;
@@ -549,8 +681,20 @@ void fann_update_weights_irpropm(struct fann *ann, unsigned int num_data)
float delta_min = ann->rprop_delta_min;/*0.0;*/
float delta_max = ann->rprop_delta_max;/*50.0;*/
- unsigned int i = ann->total_connections;
- while(i--){
+ unsigned int i, past_end;
+
+ if(layer_begin == NULL){
+ layer_begin = ann->first_layer+1;
+ }
+
+ if(layer_end == NULL){
+ layer_end = ann->last_layer-1;
+ }
+
+ i = layer_begin->first_neuron->first_con;
+ past_end = (layer_end->last_neuron - 1)->last_con;
+
+ for(;i != past_end; i++){
prev_step = fann_max(prev_steps[i], (fann_type)0.001); /* prev_step may not be zero because then the training will stop */
slope = train_slopes[i];
prev_slope = prev_train_slopes[i];
diff --git a/src/fann_train_data.c b/src/fann_train_data.c
index 9ade7c1..068d102 100644
--- a/src/fann_train_data.c
+++ b/src/fann_train_data.c
@@ -87,9 +87,9 @@ float fann_train_epoch_quickprop(struct fann *ann, struct fann_train_data *data)
fann_run(ann, data->input[i]);
fann_compute_MSE(ann, data->output[i]);
fann_backpropagate_MSE(ann);
- fann_update_slopes_batch(ann);
+ fann_update_slopes_batch(ann, ann->first_layer+1, ann->last_layer-1);
}
- fann_update_weights_quickprop(ann, data->num_data);
+ fann_update_weights_quickprop(ann, data->num_data, ann->first_layer+1, ann->last_layer-1);
return fann_get_MSE(ann);
}
@@ -109,9 +109,9 @@ float fann_train_epoch_irpropm(struct fann *ann, struct fann_train_data *data)
fann_run(ann, data->input[i]);
fann_compute_MSE(ann, data->output[i]);
fann_backpropagate_MSE(ann);
- fann_update_slopes_batch(ann);
+ fann_update_slopes_batch(ann, ann->first_layer+1, ann->last_layer-1);
}
- fann_update_weights_irpropm(ann, data->num_data);
+ fann_update_weights_irpropm(ann, data->num_data, ann->first_layer+1, ann->last_layer-1);
return fann_get_MSE(ann);
}
@@ -126,9 +126,9 @@ float fann_train_epoch_batch(struct fann *ann, struct fann_train_data *data)
fann_run(ann, data->input[i]);
fann_compute_MSE(ann, data->output[i]);
fann_backpropagate_MSE(ann);
- fann_update_slopes_batch(ann);
+ fann_update_slopes_batch(ann, ann->first_layer+1, ann->last_layer-1);
}
- fann_update_weights_batch(ann, data->num_data);
+ fann_update_weights_batch(ann, data->num_data, ann->first_layer+1, ann->last_layer-1);
return fann_get_MSE(ann);
}
diff --git a/src/fixedfann.c b/src/fixedfann.c
index b4fecfd..9878803 100644
--- a/src/fixedfann.c
+++ b/src/fixedfann.c
@@ -27,3 +27,4 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#include "fann_train_data.c"
#include "fann_options.c"
#include "fann_error.c"
+#include "fann_cascade.c"
diff --git a/src/floatfann.c b/src/floatfann.c
index 4f411ce..baa4cc5 100644
--- a/src/floatfann.c
+++ b/src/floatfann.c
@@ -27,3 +27,4 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#include "fann_train_data.c"
#include "fann_options.c"
#include "fann_error.c"
+#include "fann_cascade.c"
diff --git a/src/include/fann.h b/src/include/fann.h
index 5c7a271..bed141e 100644
--- a/src/include/fann.h
+++ b/src/include/fann.h
@@ -314,6 +314,10 @@ FANN_EXTERNAL void FANN_API fann_save_train(struct fann_train_data* data, char *
*/
FANN_EXTERNAL void FANN_API fann_save_train_to_fixed(struct fann_train_data* data, char *filename, unsigned int decimal_point);
+/* ----- Implemented in fann_cascade.c Used to train the ANN with cascade correlation ----- */
+void fann_cascadetrain_on_data_callback(struct fann *ann, struct fann_train_data *data, float desired_error, int (*callback)(unsigned int epochs, float error), unsigned int max_out_epochs, unsigned int max_cand_epochs, unsigned int max_neurons, unsigned int neurons_between_reports);
+
+
/* ----- Implemented in fann_options.c Get and set options for the ANNs ----- */
/* Prints all of the parameters and options of the ANN */
diff --git a/src/include/fann_data.h b/src/include/fann_data.h
index 23cff67..7249c25 100644
--- a/src/include/fann_data.h
+++ b/src/include/fann_data.h
@@ -28,9 +28,11 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
struct fann_neuron
{
- fann_type *weights;
- struct fann_neuron **connected_neurons;
- unsigned int num_connections;
+ /* Index to the first and last connection
+ (actually the last is a past end index)
+ */
+ unsigned int first_con;
+ unsigned int last_con;
fann_type value;
#ifdef __GNUC__
}__attribute__((packed));
@@ -103,6 +105,12 @@ struct fann
/* Number of output neurons (not calculating bias) */
unsigned int num_output;
+ /* The weight array */
+ fann_type *weights;
+
+ /* The connection array */
+ struct fann_neuron **connections;
+
/* Used to contain the errors used during training
* Is allocated during first training session,
* which means that if we do not train, it is never allocated.
@@ -175,14 +183,40 @@ struct fann
/* The error must change by at least this
fraction of its old value to count as a
- significant change. NOT IMPLEMENTED YET
+ significant change.
*/
- /* float change_fraction; */
+ float cascade_change_fraction;
/* No change in this number of epochs will cause
- stagnation. NOT IMPLEMENTED YET
+ stagnation.
+ */
+ unsigned int cascade_stagnation_epochs;
+
+ /* The number of candidate neurons used during cascade correlation
+ training.
*/
- /* unsigned int stagnation_epochs; */
+ unsigned int cascade_num_candidates;
+
+ /* The current best candidate, which will be installed.
+ */
+ unsigned int cascade_best_candidate;
+
+ /* An array consisting of the score of the individual candidates,
+ which is used to decide which candidate is the best
+ */
+ fann_type *cascade_candidate_scores;
+
+ /* The number of allocated neurons during cascade correlation algorithms.
+ This number might be higher than the actual number of neurons to avoid
+ allocating new space too often.
+ */
+ unsigned int total_neurons_allocated;
+
+ /* The number of allocated connections during cascade correlation algorithms.
+ This number might be higher than the actual number of neurons to avoid
+ allocating new space too often.
+ */
+ unsigned int total_connections_allocated;
/* Variables for use with Quickprop training */
diff --git a/src/include/fann_internal.h b/src/include/fann_internal.h
index c9e7d42..ffece5c 100644
--- a/src/include/fann_internal.h
+++ b/src/include/fann_internal.h
@@ -60,23 +60,23 @@ void fann_compute_MSE(struct fann *ann, fann_type *desired_output);
void fann_update_output_weights(struct fann *ann);
void fann_backpropagate_MSE(struct fann *ann);
void fann_update_weights(struct fann *ann);
-void fann_update_slopes_batch(struct fann *ann);
-void fann_update_weights_quickprop(struct fann *ann, unsigned int num_data);
-void fann_update_weights_irpropm(struct fann *ann, unsigned int num_data);
-void fann_update_weights_batch(struct fann *ann, unsigned int num_data);
+void fann_update_slopes_batch(struct fann *ann, struct fann_layer *layer_begin, struct fann_layer *layer_end);
+void fann_update_weights_quickprop(struct fann *ann, unsigned int num_data, struct fann_layer *layer_begin, struct fann_layer *layer_end);
+void fann_update_weights_batch(struct fann *ann, unsigned int num_data, struct fann_layer *layer_begin, struct fann_layer *layer_end);
+void fann_update_weights_irpropm(struct fann *ann, unsigned int num_data, struct fann_layer *layer_begin, struct fann_layer *layer_end);
+void fann_clear_train_arrays(struct fann *ann);
-/* get a pointer to the weights */
-fann_type* fann_get_weights(struct fann *ann);
-/* get a pointer to the connections */
-struct fann_neuron** fann_get_connections(struct fann *ann);
+fann_type fann_activation(struct fann *ann, unsigned int is_output_layer,
+ fann_type value);
-void fann_clear_train_arrays(struct fann *ann);
+fann_type fann_activation_derived(unsigned int activation_function,
+ fann_type steepness, fann_type value);
/* called fann_max, in order to not interferre with predefined versions of max */
#define fann_max(x, y) (((x) > (y)) ? (x) : (y))
#define fann_min(x, y) (((x) < (y)) ? (x) : (y))
-#define fann_safe_free(x) if(x) free(x)
+#define fann_safe_free(x) {if(x) { free(x); x = NULL; }}
#define fann_clip(x, lo, hi) (((x) < (lo)) ? (lo) : (((x) > (hi)) ? (hi) : (x)))
#define fann_rand(min_value, max_value) (((double)(min_value))+(((double)(max_value)-((double)(min_value)))*rand()/(RAND_MAX+1.0)))
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/libfann.git
More information about the debian-science-commits
mailing list