[libfann] 129/242: Merge from lukesky

Christian Kastner chrisk-guest at moszumanska.debian.org
Sat Oct 4 21:10:28 UTC 2014


This is an automated email from the git hooks/post-receive script.

chrisk-guest pushed a commit to tag Version2_0_0
in repository libfann.

commit 09dbe6060486bc754608a82b6e2024bdf7456ec9
Author: Steffen Nissen <lukesky at diku.dk>
Date:   Thu May 6 22:35:10 2004 +0000

    Merge from lukesky
---
 ChangeLog                     |   4 +
 Makefile.in                   |   3 +-
 examples/Makefile             |  23 ++--
 examples/mushroom.c           |   2 -
 examples/robot.c              |   2 -
 examples/simple_train.c       |   4 +-
 examples/xor.data             |  10 +-
 examples/xor_train.c          |   2 -
 src/Makefile.in               |   1 -
 src/fann.c                    | 248 ++++++++++++++++++++++++++++++++++++++----
 src/fann_error.c              |  34 +++---
 src/fann_io.c                 |  17 +--
 src/fann_train.c              |  12 +-
 src/fann_train_data.c         |   2 +-
 src/include/fann.h            |  16 ++-
 src/include/fann_activation.h |   2 -
 src/include/fann_data.h       |   7 ++
 src/include/fann_internal.h   |   4 +-
 18 files changed, 312 insertions(+), 81 deletions(-)

diff --git a/ChangeLog b/ChangeLog
index 50cd98b..84833b4 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,7 @@
+libfann (1.2.0) stable; urgency=low
+	* Fixes for better compability with different compilers
+	* Connections that skip layers
+	
 libfann (1.1.0) stable; urgency=low
 	* Error checking when allocating memory
 	* Debian package
diff --git a/Makefile.in b/Makefile.in
index 2258129..1a3b77f 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -139,8 +139,7 @@ EXTRA_DIST = benchmarks doc examples python MSVC++ src/include/config.h \
 	debian/docs debian/libfann1-dev.dirs debian/libfann1-dev.examples \
 	debian/libfann1-dev.files debian/libfann1-dev.install \
 	debian/libfann1.dirs debian/libfann1.files debian/libfann1.install \
-	debian/rules debian/files debian/libfann1.postinst.debhelper \
-	debian/libfann1.postrm.debhelper debian/libfann1.substvars
+	debian/rules 
 
 subdir = .
 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
diff --git a/examples/Makefile b/examples/Makefile
index 5ffb141..7f70a81 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -1,16 +1,18 @@
 # This makefile is on purpose not made with configure, to show how to use the library
 # The make file requires that the fann library is installed (see ../README)
 
+GCC=gcc
+
 TARGETS = xor_train xor_test xor_test_fixed simple_train steepness_train simple_test
 DEBUG_TARGETS = xor_train_debug xor_test_debug xor_test_fixed_debug
 
 all: $(TARGETS)
 
 %: %.c Makefile
-	gcc -O3 -lm -lfann $< -o $@
+	$(GCC) -O3 -lm -lfann $< -o $@
 
 %_fixed: %.c Makefile
-	gcc -O3 -lm -lfixedfann -DFIXEDFANN $< -o $@
+	$(GCC) -O3 -lm -lfixedfann -DFIXEDFANN $< -o $@
 
 clean:
 	rm -f $(TARGETS) $(DEBUG_TARGETS) xor_fixed.data xor_float.net xor_fixed.net
@@ -43,25 +45,30 @@ rundebugtest: $(DEBUG_TARGETS)
 	@echo Testing network with fixed points
 	./xor_test_fixed_debug
 
+#compiletest is used to test whether the library will compile easily in other compilers
+compiletest:
+	gcc -O3 -ggdb -lm -DDEBUG -Wall -Wformat-security -Wfloat-equal -Wshadow -Wpointer-arith -Wcast-qual -Wsign-compare -pedantic -ansi -I../src/ -I../src/include/ ../src/floatfann.c xor_train.c -o xor_train
+	g++ -O3 -ggdb -lm -DDEBUG -Wall -Wformat-security -Wfloat-equal -Wpointer-arith -Wcast-qual -Wsign-compare -pedantic -ansi -I../src/ -I../src/include/ ../src/floatfann.c xor_train.c -o xor_train
+
 debug: $(DEBUG_TARGETS)
 
 %_debug: %.c Makefile ../src/*c ../src/include/*h
-	gcc -O -ggdb -lm -DDEBUG -I../src/ -I../src/include/ ../src/floatfann.c $< -o $@
+	$(GCC) -O3 -ggdb -lm -DDEBUG -Wall -ansi -I../src/ -I../src/include/ ../src/floatfann.c $< -o $@
 
 %_fixed_debug: %.c Makefile
-	gcc -O -ggdb -lm -DDEBUG -DFIXEDFANN -I../src/ -I../src/include/ ../src/fixedfann.c $< -o $@
+	$(GCC) -O3 -ggdb -lm -DDEBUG -Wall -ansi -DFIXEDFANN -I../src/ -I../src/include/ ../src/fixedfann.c $< -o $@
 
-rundebug: $(TARGETS)
+rundebug: $(DEBUG_TARGETS)
 	@echo
 	@echo Training network
-	valgrind --leak-check=yes --show-reachable=yes --leak-resolution=high ./xor_train
+	valgrind --leak-check=yes --show-reachable=yes --leak-resolution=high ./xor_train_debug
 
 	@echo
 	@echo Testing network with floats
-	valgrind --leak-check=yes --show-reachable=yes --leak-resolution=high ./xor_test
+	valgrind --leak-check=yes --show-reachable=yes --leak-resolution=high ./xor_test_debug
 
 	@echo
 	@echo Testing network with fixed points
-	valgrind --leak-check=yes --show-reachable=yes --leak-resolution=high ./xor_test_fixed
+	valgrind --leak-check=yes --show-reachable=yes --leak-resolution=high ./xor_test_fixed_debug
 
 
diff --git a/examples/mushroom.c b/examples/mushroom.c
index 769f8d2..546c33f 100644
--- a/examples/mushroom.c
+++ b/examples/mushroom.c
@@ -29,7 +29,6 @@ int print_callback(unsigned int epochs, float error)
 
 int main()
 {
-	fann_type *calc_out;
 	const float connection_rate = 1;
 	const float learning_rate = (const float)0.4;
 	const unsigned int num_layers = 3;
@@ -41,7 +40,6 @@ int main()
 	struct fann_train_data *train_data, *test_data;
 	
 	unsigned int i = 0;
-	unsigned int decimal_point;
 
 	printf("Creating network.\n");
 
diff --git a/examples/robot.c b/examples/robot.c
index ad73af8..44fc080 100644
--- a/examples/robot.c
+++ b/examples/robot.c
@@ -29,7 +29,6 @@ int print_callback(unsigned int epochs, float error)
 
 int main()
 {
-	fann_type *calc_out;
 	const float connection_rate = 1;
 	const float learning_rate = (const float)0.7;
 	const unsigned int num_layers = 3;
@@ -41,7 +40,6 @@ int main()
 	struct fann_train_data *train_data, *test_data;
 	
 	unsigned int i = 0;
-	unsigned int decimal_point;
 
 	printf("Creating network.\n");
 
diff --git a/examples/simple_train.c b/examples/simple_train.c
index e423db4..1a6e44a 100644
--- a/examples/simple_train.c
+++ b/examples/simple_train.c
@@ -26,14 +26,14 @@ int main()
 	const unsigned int num_input = 2;
 	const unsigned int num_output = 1;
 	const unsigned int num_layers = 3;
-	const unsigned int num_neurons_hidden = 4;
+	const unsigned int num_neurons_hidden = 5;
 	const float desired_error = (const float)0.0001;
 	const unsigned int max_iterations = 500000;
 	const unsigned int iterations_between_reports = 1000;
 
 	struct fann *ann = fann_create(connection_rate, learning_rate, num_layers,
 		num_input, num_neurons_hidden, num_output);
-	
+
 	fann_train_on_file(ann, "xor.data", max_iterations,
 		iterations_between_reports, desired_error);
 	
diff --git a/examples/xor.data b/examples/xor.data
index e831fc6..1d8fe3e 100644
--- a/examples/xor.data
+++ b/examples/xor.data
@@ -1,9 +1,9 @@
 4 2 1
--1 -1
--1
--1 1
+0 0
+0
+0 1
 1
-1 -1
+1 0
 1
 1 1
--1
+0
diff --git a/examples/xor_train.c b/examples/xor_train.c
index 93ec555..f8edd21 100644
--- a/examples/xor_train.c
+++ b/examples/xor_train.c
@@ -56,8 +56,6 @@ int main()
 
 	data = fann_read_train_from_file("xor.data");
 
-	fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC_STEPWISE);
-	fann_set_activation_function_output(ann, FANN_SIGMOID_SYMMETRIC_STEPWISE);
 	fann_init_weights(ann, data);
 	
 	fann_train_on_data(ann, data, max_iterations, iterations_between_reports, desired_error);
diff --git a/src/Makefile.in b/src/Makefile.in
index a510eb1..62cc8c5 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -134,7 +134,6 @@ SUBDIRS = include
 lib_LTLIBRARIES = libfloatfann.la libdoublefann.la libfixedfann.la libfann.la
 
 AM_LDFLAGS = -version-info 2:0:1
-AM_CFLAGS = -O9 -Wall
 
 libfloatfann_la_SOURCES = floatfann.c
 libdoublefann_la_SOURCES = doublefann.c
diff --git a/src/fann.c b/src/fann.c
index 3c30b97..d829d33 100644
--- a/src/fann.c
+++ b/src/fann.c
@@ -16,7 +16,7 @@
   License along with this library; if not, write to the Free Software
   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
-
+ 
 #include <stdio.h>
 #include <stdlib.h>
 #include <stdarg.h>
@@ -28,14 +28,6 @@
 #include "fann.h"
 #include "fann_errno.h"
 
-#ifdef _MSC_VER
-/* MSVC++6 does not have powf in math.h
- */
-float powf(float _X, float _Y){
-	return ((float)pow((double)_X, (double)_Y));
-}
-#endif
-
 /* create a neural network.
  */
 struct fann * fann_create(float connection_rate, float learning_rate,
@@ -46,7 +38,7 @@ struct fann * fann_create(float connection_rate, float learning_rate,
 {
 	struct fann *ann;
 	va_list layer_sizes;
-	unsigned int *layers = calloc(num_layers, sizeof(unsigned int));
+	unsigned int *layers = (unsigned int *)calloc(num_layers, sizeof(unsigned int));
 	int i = 0;
 
 	va_start(layer_sizes, num_layers);
@@ -163,7 +155,7 @@ struct fann * fann_create_array(float connection_rate, float learning_rate, unsi
 		return NULL;
 	}
 	
-	if(connection_rate == 1){
+	if(connection_rate >= 1){
 		prev_layer_size = ann->num_input+1;
 		prev_layer = ann->first_layer;
 		last_layer = ann->last_layer;
@@ -287,15 +279,159 @@ struct fann * fann_create_array(float connection_rate, float learning_rate, unsi
 	return ann;
 }
 
+ 
+/* create a neural network with forward connections.
+ */
+struct fann * fann_create_forward(float learning_rate,
+	unsigned int num_layers, /* the number of layers, including the input and output layer */
+
+
+	...) /* the number of neurons in each of the layers, starting with the input layer and ending with the output layer */
+{
+	struct fann *ann;
+	va_list layer_sizes;
+	unsigned int *layers = (unsigned int *)calloc(num_layers, sizeof(unsigned int));
+	int i = 0;
+
+	va_start(layer_sizes, num_layers);
+	for ( i=0 ; i<(int)num_layers ; i++ ) {
+		layers[i] = va_arg(layer_sizes, unsigned int);
+	}
+	va_end(layer_sizes);
+
+	ann = fann_create_forward_array(learning_rate, num_layers, layers);
+
+	free(layers);
+
+	return ann;
+}
+
+/* create a neural network with forward connections.
+ */
+struct fann * fann_create_forward_array(float learning_rate, unsigned int num_layers, unsigned int * layers)
+{
+	struct fann_layer *layer_it, *layer_it2, *last_layer;
+	struct fann *ann;
+	struct fann_neuron *neuron_it, *neuron_it2 = 0;
+	unsigned int i;
+	unsigned int num_neurons_in, num_neurons_out;
+	unsigned int num_connections;
+	
+#ifdef FIXEDFANN
+	unsigned int decimal_point;
+	unsigned int multiplier;
+#endif
+	/* seed random */
+	fann_seed_rand();
+	
+	/* allocate the general structure */
+	ann = fann_allocate_structure(learning_rate, num_layers);
+	if(ann == NULL){
+		return NULL;
+	}
+
+	ann->connection_rate = 1;
+	ann->forward_connections = 1;
+#ifdef FIXEDFANN
+	decimal_point = ann->decimal_point;
+	multiplier = ann->multiplier;
+#endif
+	fann_update_stepwise_hidden(ann);
+	fann_update_stepwise_output(ann);
+
+	/* determine how many neurons there should be in each layer */
+	i = 0;
+	for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
+		/* we do not allocate room here, but we make sure that
+		   last_neuron - first_neuron is the number of neurons */
+		layer_it->first_neuron = NULL;
+		layer_it->last_neuron = layer_it->first_neuron + layers[i++] +1; /* +1 for bias */
+		ann->total_neurons += layer_it->last_neuron - layer_it->first_neuron;
+	}
+	
+	ann->num_output = (ann->last_layer-1)->last_neuron - (ann->last_layer-1)->first_neuron -1;
+	ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron -1;
+	
+	/* allocate room for the actual neurons */
+	fann_allocate_neurons(ann);
+	if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM){
+		fann_destroy(ann);
+		return NULL;
+	}
+	
+#ifdef DEBUG
+	printf("creating fully forward connected network with learning rate %f.\n", learning_rate);
+	printf("input\n");
+	printf("  layer       : %d neurons, 1 bias\n", ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1);
+#endif
+	
+	num_neurons_in = ann->num_input;
+	last_layer = ann->last_layer;
+	for(layer_it = ann->first_layer+1; layer_it != last_layer; layer_it++){
+		num_neurons_out = layer_it->last_neuron - layer_it->first_neuron - 1;
+		num_connections = num_neurons_in * num_neurons_out + num_neurons_out;
+		ann->total_connections += num_connections;
+		
+		/* Now split out the connections on the different neurons */
+		for(i = 0; i != num_neurons_out; i++){
+			layer_it->first_neuron[i].num_connections = num_neurons_in+1;
+		}
+		
+#ifdef DEBUG
+		printf("  layer       : %d neurons, 1 bias\n", num_neurons_out);
+#endif
+		/* used in the next run of the loop */
+		num_neurons_in += num_neurons_out;
+	}
+	
+	fann_allocate_connections(ann);
+	if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM){
+		fann_destroy(ann);
+		return NULL;
+	}
+
+	/* Connections are created from all neurons to all neurons in later layers
+	 */
+	num_neurons_in = ann->num_input+1;
+	for(layer_it = ann->first_layer+1; layer_it != last_layer; layer_it++){
+		for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron-1; neuron_it++){
+
+			i = 0;
+			for(layer_it2 = ann->first_layer; layer_it2 != layer_it; layer_it2++){
+				for(neuron_it2 = layer_it2->first_neuron; neuron_it2 != layer_it2->last_neuron-1; neuron_it2++){
+					
+					neuron_it->weights[i] = (fann_type)fann_random_weight();
+					/* these connections are still initialized for fully connected networks, to allow
+					   operations to work, that are not optimized for fully connected networks.
+					*/
+					neuron_it->connected_neurons[i] = neuron_it2;
+					i++;
+				}
+			}
+
+			/* The connection to the bias neuron */
+			neuron_it->weights[i] = (fann_type)fann_random_weight();
+			neuron_it->connected_neurons[i] = neuron_it2;
+		}
+		num_neurons_in += layer_it->last_neuron - layer_it->first_neuron;
+	}
+
+#ifdef DEBUG
+	printf("output\n");
+#endif
+	
+	return ann;
+}
+
 /* runs the network.
  */
 fann_type* fann_run(struct fann *ann, fann_type *input)
 {
 	struct fann_neuron *neuron_it, *last_neuron, *neurons, **neuron_pointers;
-	unsigned int activation_function, i, num_connections, num_input, num_output;
+	unsigned int activation_function, i, num_connections, num_neurons, num_input, num_output;
 	fann_type neuron_value, *output;
 	fann_type *weights;
-	struct fann_layer *layer_it, *last_layer;
+	struct fann_layer *layer_it, *layer_it2, *last_layer;
 	
 	
 	/* store some variabels local for fast access */
@@ -405,7 +541,43 @@ fann_type* fann_run(struct fann *ann, fann_type *input)
 			neuron_value = 0;
 			num_connections = neuron_it->num_connections;
 			weights = neuron_it->weights;
-			if(ann->connection_rate == 1){
+			
+			if(ann->connection_rate >= 1){
+				if(ann->forward_connections){
+					/* first go through the connections to the previous layers,
+					   then let the normal operation go through the rest.
+					*/
+
+					for(layer_it2 = ann->first_layer;
+						layer_it2 != layer_it-1; layer_it2++){
+
+						neurons = layer_it2->first_neuron;
+						num_neurons = layer_it2->last_neuron - neurons - 1; /* don't use bias from previous layers */
+						i = num_neurons & 3; /* same as modulo 4 */
+						switch(i) {
+							case 3:
+								neuron_value += fann_mult(weights[2], neurons[2].value);
+							case 2:
+								neuron_value += fann_mult(weights[1], neurons[1].value);
+							case 1:
+								neuron_value += fann_mult(weights[0], neurons[0].value);
+							case 0:
+								break;
+						}
+							
+						for(;i != num_neurons; i += 4){
+							neuron_value +=
+								fann_mult(weights[i], neurons[i].value) +
+								fann_mult(weights[i+1], neurons[i+1].value) +
+								fann_mult(weights[i+2], neurons[i+2].value) +
+								fann_mult(weights[i+3], neurons[i+3].value);
+						}
+							
+						num_connections -= num_neurons;
+						weights += num_neurons;
+					}
+				}
+				
 				neurons = (layer_it-1)->first_neuron;
 				
 				i = num_connections & 3; /* same as modulo 4 */
@@ -504,7 +676,7 @@ fann_type* fann_run(struct fann *ann, fann_type *input)
 					fann_error((struct fann_error *)ann, FANN_E_CANT_USE_ACTIVATION);
 			}
 		}
-	}	
+	}
 	
 	/* set the output */
 	output = ann->output;
@@ -540,6 +712,39 @@ void fann_randomize_weights(struct fann *ann, fann_type min_weight, fann_type ma
 	}
 }
 
+void fann_print_connections(struct fann *ann)
+{
+	struct fann_layer *layer_it;
+	struct fann_neuron *neuron_it;
+	unsigned int i, value;
+	char *neurons;
+	unsigned int num_neurons = fann_get_total_neurons(ann)+1;
+	neurons = (char *)malloc(num_neurons+1);
+	neurons[num_neurons] = 0;
+
+	printf("Layer / Neuron ");
+	for(i = 0; i < num_neurons; i++){
+		printf("%d", i%10);
+	}
+	printf("\n");
+	
+	for(layer_it = ann->first_layer+1; layer_it != ann->last_layer; layer_it++){
+		for(neuron_it = layer_it->first_neuron;
+			neuron_it != layer_it->last_neuron; neuron_it++){
+			memset(neurons, (int)'.', num_neurons);
+			for(i = 0; i < neuron_it->num_connections; i++){
+				value = (unsigned int)(fann_abs(neuron_it->weights[i])+0.5);
+				if(value > 25) value = 25;
+				neurons[neuron_it->connected_neurons[i] - ann->first_layer->first_neuron] = 'a' + value;
+			}
+			printf("L %03d / N %04d %s\n", layer_it - ann->first_layer,
+				neuron_it - ann->first_layer->first_neuron, neurons);
+		}
+	}
+
+	free(neurons);
+}
+
 /* Initialize the weights using Widrow + Nguyen's algorithm.
 */
 void fann_init_weights(struct fann *ann, struct fann_train_data *train_data)
@@ -563,15 +768,15 @@ void fann_init_weights(struct fann *ann, struct fann_train_data *train_data)
 	}
 
 	num_hidden_neurons = ann->total_neurons - (ann->num_input + ann->num_output + (ann->last_layer - ann->first_layer));
-	scale_factor = powf((float)(0.7f * (float)num_hidden_neurons),
-				  (float)(1.0f / (float)ann->num_input)) / (float)(largest_inp - smallest_inp);
+	scale_factor = pow((double)(0.7f * (double)num_hidden_neurons),
+				  (double)(1.0f / (double)ann->num_input)) / (double)(largest_inp - smallest_inp);
 
 #ifdef DEBUG
 	printf("Initializing weights with scale factor %f\n", scale_factor);
 #endif
 	for ( layer_it = ann->first_layer+1; layer_it != ann->last_layer ; layer_it++) {
 #ifdef DEBUG
-		printf(" Layer: %x/%x (%d neurons)\n", layer_it, ann->last_layer, layer_it->last_neuron - layer_it->first_neuron);
+		printf(" Layer: %x/%x (%d neurons)\n", layer_it-ann->first_layer, ann->last_layer-ann->first_layer, layer_it->last_neuron - layer_it->first_neuron);
 #endif
 		num_neurons_out = layer_it->last_neuron - layer_it->first_neuron - 1;
 		num_neurons_in = (layer_it-1)->last_neuron - (layer_it-1)->first_neuron - 1;
@@ -579,13 +784,13 @@ void fann_init_weights(struct fann *ann, struct fann_train_data *train_data)
 		last_neuron = layer_it->last_neuron-1;
 		bias_neuron = (layer_it-1)->last_neuron-1;
 
-		for(neuron_it = layer_it->first_neuron ; neuron_it != last_neuron; neuron_it++) {
+		for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++) {
 #ifdef DEBUG
-			printf("  Neuron %x/%x (%d connections)\n", neuron_it, last_neuron, neuron_it->num_connections);
+			printf("  Neuron %x/%x (%d connections)\n", neuron_it-layer_it->first_neuron, last_neuron-layer_it->first_neuron, neuron_it->num_connections);
 #endif
 			for ( num_connect = 0 ; num_connect < neuron_it->num_connections ; num_connect++ ) {
 #ifdef DEBUG
-				printf("   Connection %d/%d (%x)\n", num_connect, neuron_it->num_connections, neuron_it->connected_neurons[num_connect]);
+				printf("   Connection %d/%d (%x)\n", num_connect, neuron_it->num_connections, neuron_it->connected_neurons[num_connect] - ann->first_layer->first_neuron);
 #endif
 				if ( bias_neuron == neuron_it->connected_neurons[num_connect] ) {
 #ifdef FIXEDFANN
@@ -633,6 +838,7 @@ struct fann * fann_allocate_structure(float learning_rate, unsigned int num_laye
 	ann->num_output = 0;
 	ann->train_deltas = NULL;
 	ann->num_errors = 0;
+	ann->forward_connections = 0;
 
 	fann_init_error_data((struct fann_error *)ann);
 
diff --git a/src/fann_error.c b/src/fann_error.c
index b1b4c04..f20a910 100644
--- a/src/fann_error.c
+++ b/src/fann_error.c
@@ -68,9 +68,9 @@ char * fann_get_errstr(struct fann_error *errdat)
 
 /* change where errors are logged to
  */
-void fann_set_error_log(struct fann_error *errdat, FILE *log)
+void fann_set_error_log(struct fann_error *errdat, FILE *log_file)
 {
-  errdat->error_log = log;
+  errdat->error_log = log_file;
 }
 
 /* prints the last error to the error log (default stderr)
@@ -106,49 +106,49 @@ void fann_error(struct fann_error *errdat, const unsigned int errno, ...)
 	case FANN_E_NO_ERROR:
 		break;
 	case FANN_E_CANT_OPEN_CONFIG_R:
-		vsnprintf(errstr, FANN_ERRSTR_MAX, "Unable to open configuration file \"%s\" for reading.\n", ap);
+		vsprintf(errstr, "Unable to open configuration file \"%s\" for reading.\n", ap);
 		break;
 	case FANN_E_CANT_OPEN_CONFIG_W:
-		vsnprintf(errstr, FANN_ERRSTR_MAX, "Unable to open configuration file \"%s\" for writing.\n", ap);
+		vsprintf(errstr, "Unable to open configuration file \"%s\" for writing.\n", ap);
 		break;
 	case FANN_E_WRONG_CONFIG_VERSION:
-		vsnprintf(errstr, FANN_ERRSTR_MAX, "Wrong version of configuration file, aborting read of configuration file \"%s\".\n", ap);
+		vsprintf(errstr, "Wrong version of configuration file, aborting read of configuration file \"%s\".\n", ap);
 		break;
 	case FANN_E_CANT_READ_CONFIG:
-		vsnprintf(errstr, FANN_ERRSTR_MAX, "Error reading info from configuration file \"%s\".\n", ap);
+		vsprintf(errstr, "Error reading info from configuration file \"%s\".\n", ap);
 		break;
 	case FANN_E_CANT_READ_NEURON:
-		vsnprintf(errstr, FANN_ERRSTR_MAX, "Error reading neuron info from configuration file \"%s\".\n", ap);
+		vsprintf(errstr, "Error reading neuron info from configuration file \"%s\".\n", ap);
 		break;
 	case FANN_E_CANT_READ_CONNECTIONS:
-		vsnprintf(errstr, FANN_ERRSTR_MAX, "Error reading connections from configuration file \"%s\".\n", ap);
+		vsprintf(errstr, "Error reading connections from configuration file \"%s\".\n", ap);
 		break;
 	case FANN_E_WRONG_NUM_CONNECTIONS:
-		vsnprintf(errstr, FANN_ERRSTR_MAX, "ERROR connections_so_far=%d, total_connections=%d\n", ap);
+		vsprintf(errstr, "ERROR connections_so_far=%d, total_connections=%d\n", ap);
 		break;
 	case FANN_E_CANT_OPEN_TD_W:
-		vsnprintf(errstr, FANN_ERRSTR_MAX, "Unable to open train data file \"%s\" for writing.\n", ap);
+		vsprintf(errstr, "Unable to open train data file \"%s\" for writing.\n", ap);
 		break;
 	case FANN_E_CANT_OPEN_TD_R:
-		vsnprintf(errstr, FANN_ERRSTR_MAX, "Unable to open train data file \"%s\" for writing.\n", ap);
+		vsprintf(errstr, "Unable to open train data file \"%s\" for writing.\n", ap);
 		break;
 	case FANN_E_CANT_READ_TD:
-		vsnprintf(errstr, FANN_ERRSTR_MAX, "Error reading info from train data file \"%s\", line: %d.\n", ap);
+		vsprintf(errstr, "Error reading info from train data file \"%s\", line: %d.\n", ap);
 		break;
 	case FANN_E_CANT_ALLOCATE_MEM:
-		snprintf(errstr, FANN_ERRSTR_MAX, "Unable to allocate memory.\n");
+		sprintf(errstr, "Unable to allocate memory.\n");
 		break;
 	case FANN_E_CANT_TRAIN_ACTIVATION:
-		snprintf(errstr, FANN_ERRSTR_MAX, "Unable to train with the selected activation function.\n");
+		sprintf(errstr, "Unable to train with the selected activation function.\n");
 		break;
 	case FANN_E_CANT_USE_ACTIVATION:
-		snprintf(errstr, FANN_ERRSTR_MAX, "Unable to use the selected activation function.\n");
+		sprintf(errstr, "Unable to use the selected activation function.\n");
 		break;
 	case FANN_E_TRAIN_DATA_MISMATCH:
-		snprintf(errstr, FANN_ERRSTR_MAX, "Training data must be of equivalent structure.");
+		sprintf(errstr, "Training data must be of equivalent structure.");
 		break;
 	default:
-		vsnprintf(errstr, FANN_ERRSTR_MAX, "Unknown error.\n", ap);
+		vsprintf(errstr, "Unknown error.\n", ap);
 		break;
 	}
 	va_end(ap);
diff --git a/src/fann_io.c b/src/fann_io.c
index c9e7611..eec5053 100644
--- a/src/fann_io.c
+++ b/src/fann_io.c
@@ -149,18 +149,18 @@ int fann_save_internal_fd(struct fann *ann, FILE *conf, const char *configuratio
 		/* save the decimal_point on a seperate line */
 		fprintf(conf, "%u\n", decimal_point);
 		
-		/* save the number layers "num_layers learning_rate connection_rate activation_function_hidden activation_function_output activation_hidden_steepness activation_output_steepness" */	
-		fprintf(conf, "%u %f %f %u %u %d %d\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->activation_function_hidden, ann->activation_function_output, (int)(ann->activation_hidden_steepness * fixed_multiplier), (int)(ann->activation_output_steepness * fixed_multiplier));
+		/* save the number layers "num_layers learning_rate connection_rate forward_connections activation_function_hidden activation_function_output activation_hidden_steepness activation_output_steepness" */	
+		fprintf(conf, "%u %f %f %u %u %u %d %d\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->forward_connections, ann->activation_function_hidden, ann->activation_function_output, (int)(ann->activation_hidden_steepness * fixed_multiplier), (int)(ann->activation_output_steepness * fixed_multiplier));
 	}else{
-		/* save the number layers "num_layers learning_rate connection_rate activation_function_hidden activation_function_output activation_hidden_steepness activation_output_steepness" */	
-		fprintf(conf, "%u %f %f %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->activation_function_hidden, ann->activation_function_output, ann->activation_hidden_steepness, ann->activation_output_steepness);
+		/* save the number layers "num_layers learning_rate connection_rate forward_connections activation_function_hidden activation_function_output activation_hidden_steepness activation_output_steepness" */	
+		fprintf(conf, "%u %f %f %u %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->forward_connections, ann->activation_function_hidden, ann->activation_function_output, ann->activation_hidden_steepness, ann->activation_output_steepness);
 	}
 #else
 	/* save the decimal_point on a seperate line */
 	fprintf(conf, "%u\n", ann->decimal_point);
 	
-	/* save the number layers "num_layers learning_rate connection_rate activation_function_hidden activation_function_output activation_hidden_steepness activation_output_steepness" */	
-	fprintf(conf, "%u %f %f %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->activation_function_hidden, ann->activation_function_output, ann->activation_hidden_steepness, ann->activation_output_steepness);	
+	/* save the number layers "num_layers learning_rate connection_rate forward_connections activation_function_hidden activation_function_output activation_hidden_steepness activation_output_steepness" */	
+	fprintf(conf, "%u %f %f %u %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->forward_connections, ann->activation_function_hidden, ann->activation_function_output, ann->activation_hidden_steepness, ann->activation_output_steepness);	
 #endif
 
 	for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
@@ -278,7 +278,7 @@ void fann_save_train_internal_fd(struct fann_train_data* data, FILE *file, char
  */
 struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
 {
-	unsigned int num_layers, layer_size, activation_function_hidden, activation_function_output, input_neuron, i;
+	unsigned int num_layers, layer_size, activation_function_hidden, activation_function_output, input_neuron, i, forward_connections;
 #ifdef FIXEDFANN
 	unsigned int decimal_point, multiplier;
 #endif
@@ -316,7 +316,7 @@ struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
 	multiplier = 1 << decimal_point;
 #endif
 	
-	if(fscanf(conf, "%u %f %f %u %u "FANNSCANF" "FANNSCANF"\n", &num_layers, &learning_rate, &connection_rate, &activation_function_hidden, &activation_function_output, &activation_hidden_steepness, &activation_output_steepness) != 7){
+	if(fscanf(conf, "%u %f %f %u %u %u "FANNSCANF" "FANNSCANF"\n", &num_layers, &learning_rate, &connection_rate, &forward_connections, &activation_function_hidden, &activation_function_output, &activation_hidden_steepness, &activation_output_steepness) != 8){
 		fann_error(NULL, FANN_E_CANT_READ_CONFIG, configuration_file);
 		return NULL;
 	}
@@ -326,6 +326,7 @@ struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
 		return NULL;
 	}
 	ann->connection_rate = connection_rate;
+	ann->forward_connections = forward_connections;
 
 #ifdef FIXEDFANN
 	ann->decimal_point = decimal_point;
diff --git a/src/fann_train.c b/src/fann_train.c
index 6081a27..32569c7 100644
--- a/src/fann_train.c
+++ b/src/fann_train.c
@@ -102,7 +102,9 @@ void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
 		last_neuron = layer_it->last_neuron;
 		
 		/* for each connection in this layer, propagate the error backwards*/
-		if(ann->connection_rate == 1){ /* optimization for fully connected networks */
+		if(ann->connection_rate >= 1 && !ann->forward_connections){
+			/* optimization for fully connected networks */
+			/* but not forward connected networks */
 			shift_prev_layer = (layer_it-1)->first_neuron - first_neuron;
 			for(neuron_it = layer_it->first_neuron;
 				neuron_it != last_neuron; neuron_it++){
@@ -179,7 +181,9 @@ void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
 		printf("layer[%d]\n", layer_it - first_layer);
 #endif
 		last_neuron = layer_it->last_neuron;
-		if(ann->connection_rate == 1){ /* optimization for fully connected networks */
+		if(ann->connection_rate >= 1 && !ann->forward_connections){
+			/* optimization for fully connected networks */
+			/* but not forward connected networks */			
 			neurons = (layer_it-1)->first_neuron;
 			for(neuron_it = layer_it->first_neuron;
 				neuron_it != last_neuron; neuron_it++){
@@ -194,9 +198,9 @@ void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
 		}else{
 			for(neuron_it = layer_it->first_neuron;
 				neuron_it != last_neuron; neuron_it++){
-				tmp_delta = *(delta_begin + (neuron_it - first_neuron));
+				tmp_delta = *(delta_begin + (neuron_it - first_neuron)) * learning_rate;
 				for(i = neuron_it->num_connections ; i-- ; ){
-					neuron_it->weights[i] += learning_rate * tmp_delta * neuron_it->connected_neurons[i]->value;
+					neuron_it->weights[i] += tmp_delta * neuron_it->connected_neurons[i]->value;
 				}
 			}
 		}
diff --git a/src/fann_train_data.c b/src/fann_train_data.c
index 875de8b..5123d13 100644
--- a/src/fann_train_data.c
+++ b/src/fann_train_data.c
@@ -221,7 +221,7 @@ struct fann_train_data * fann_duplicate_train_data(struct fann_train_data *data)
 	struct fann_train_data * dest;
 	unsigned int x;
 
-	if ( (dest = malloc(sizeof(struct fann_train_data))) == NULL ) {
+	if ( (dest = (struct fann_train_data *)malloc(sizeof(struct fann_train_data))) == NULL ) {
 		fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
 		return NULL;
 	}
diff --git a/src/include/fann.h b/src/include/fann.h
index c413db5..d340750 100644
--- a/src/include/fann.h
+++ b/src/include/fann.h
@@ -77,7 +77,18 @@ struct fann * fann_create(float connection_rate, float learning_rate,
 */
 struct fann * fann_create_array(float connection_rate, float learning_rate,
 	unsigned int num_layers, unsigned int * layers);
+	
+
+/* create a neural network with forward connections.
+ */
+struct fann * fann_create_forward(float learning_rate,
+	unsigned int num_layers, /* the number of layers, including the input and output layer */
+	...); /* the number of neurons in each of the layers, starting with the input layer and ending with the output layer */
 
+/* create a neural network with forward connections.
+ */
+struct fann * fann_create_forward_array(float learning_rate, unsigned int num_layers, unsigned int * layers);	
+	
 /* Runs a input through the network, and returns the output.
  */
 fann_type* fann_run(struct fann *ann, fann_type *input);
@@ -95,7 +106,8 @@ void fann_randomize_weights(struct fann *ann, fann_type min_weight, fann_type ma
 */
 void fann_init_weights(struct fann *ann, struct fann_train_data * train_data);
 
-
+/* print out which connections there are in the ann */
+void fann_print_connections(struct fann *ann);	
 	
 /* ----- Implemented in fann_io.c Saving and loading of ANNs ----- */
 
@@ -316,7 +328,7 @@ unsigned int fann_get_multiplier(struct fann *ann);
 	
 /* change where errors are logged to
  */
-void fann_set_error_log(struct fann_error *errdat, FILE *log);
+void fann_set_error_log(struct fann_error *errdat, FILE *log_file);
 
 /* returns the last error number
  */
diff --git a/src/include/fann_activation.h b/src/include/fann_activation.h
index 1183afc..b25656f 100644
--- a/src/include/fann_activation.h
+++ b/src/include/fann_activation.h
@@ -61,13 +61,11 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    One of the most used activation functions.
    span: -1 < y < 1
    y = tanh(s*x) = 2/(1 + exp(-2*s*x)) - 1, d = s*(1-(y*y))
-   NOT implemented yet.
 */
 #define FANN_SIGMOID_SYMMETRIC 5
 	
 /* Stepwise linear approximation to symmetric sigmoid.
    Faster than symmetric sigmoid but a bit less precise.
-   NOT implemented yet.
 */
 #define FANN_SIGMOID_SYMMETRIC_STEPWISE 6
 
diff --git a/src/include/fann_data.h b/src/include/fann_data.h
index 5544b79..80bf85f 100644
--- a/src/include/fann_data.h
+++ b/src/include/fann_data.h
@@ -75,6 +75,13 @@ struct fann
 	 */
 	float connection_rate;
 
+	/* is 1 if forward connections are used in the ann otherwise 0
+	 * Forward connections are connections that skip layers.
+	 * A fully connected ann with forward connections are a ann where
+	 * neurons have connections to all neurons in all later layers.
+	 */
+	unsigned int forward_connections;
+
 	/* pointer to the first layer (input layer) in an array af all the layers,
 	 * including the input and outputlayers 
 	 */
diff --git a/src/include/fann_internal.h b/src/include/fann_internal.h
index 53691a0..2342510 100644
--- a/src/include/fann_internal.h
+++ b/src/include/fann_internal.h
@@ -26,8 +26,8 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 #include <stdio.h>
 #include "fann_data.h"
 
-#define FANN_FIX_VERSION "FANN_FIX_1.0"
-#define FANN_FLO_VERSION "FANN_FLO_1.0"
+#define FANN_FIX_VERSION "FANN_FIX_1.1"
+#define FANN_FLO_VERSION "FANN_FLO_1.1"
 
 #ifdef FIXEDFANN
 #define FANN_CONF_VERSION FANN_FIX_VERSION

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/libfann.git



More information about the debian-science-commits mailing list