[libfann] 69/242: done some stuff on more activation functions, does not work yet but in cvs anyway, just use the regular activation functions and there will be no problems

Christian Kastner chrisk-guest at moszumanska.debian.org
Sat Oct 4 21:10:21 UTC 2014


This is an automated email from the git hooks/post-receive script.

chrisk-guest pushed a commit to tag Version2_0_0
in repository libfann.

commit 811276338a8ff6561e31c7bb57b383aa671da7e8
Author: Steffen Nissen <lukesky at diku.dk>
Date:   Mon Feb 9 10:28:14 2004 +0000

    done some stuff on more activation functions, does not work yet but in cvs anyway, just use the regular activation functions and there will be no problems
---
 src/fann.c                    | 162 +++++++++++++++++++++++++--------
 src/fann_internal.c           | 202 ++++++++++++++++++++++++++++++++----------
 src/include/fann_activation.h |  22 +++--
 src/include/fann_data.h       |   3 +-
 src/include/fann_errno.h      |   8 +-
 src/include/fann_internal.h   |   2 +-
 6 files changed, 303 insertions(+), 96 deletions(-)

diff --git a/src/fann.c b/src/fann.c
index d158ac1..271ba2e 100644
--- a/src/fann.c
+++ b/src/fann.c
@@ -28,6 +28,8 @@
 
 #include "fann_errno.h"
 
+//#define DEBUGTRAIN
+
 /* create a neural network.
  */
 struct fann * fann_create_array(float connection_rate, float learning_rate, unsigned int num_layers, unsigned int * layers)
@@ -63,7 +65,8 @@ struct fann * fann_create_array(float connection_rate, float learning_rate, unsi
 	decimal_point = ann->decimal_point;
 	multiplier = ann->multiplier;
 #endif
-	fann_initialise_result_array(ann);
+	fann_update_stepwise_hidden(ann);
+	fann_update_stepwise_output(ann);
 
 	/* determine how many neurons there should be in each layer */
 	i = 0;
@@ -439,15 +442,30 @@ void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
 #endif
 	/* calculate the error and place it in the output layer */
 	delta_it = delta_begin + (last_layer_begin - first_neuron);
+
 	for(; last_layer_begin != last_layer_end; last_layer_begin++){
 		neuron_value = last_layer_begin->value;
-		/* TODO add switch the minute there are other activation functions */
-		*delta_it = fann_sigmoid_derive(activation_output_steepness, neuron_value) * (*desired_output - neuron_value);
+		switch(ann->activation_function_output){
+			case FANN_LINEAR:
+				*delta_it = fann_linear_derive(activation_output_steepness, neuron_value) * (*desired_output - neuron_value);
+				break;
+			case FANN_SIGMOID:
+			case FANN_SIGMOID_STEPWISE:
+				*delta_it = fann_sigmoid_derive(activation_output_steepness, neuron_value) * (*desired_output - neuron_value);
+				break;
+			case FANN_SIGMOID_SYMMETRIC:
+			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+				*delta_it = fann_sigmoid_symmetric_derive(activation_output_steepness, neuron_value) * (*desired_output - neuron_value);
+				break;
+			default:
+				fann_error(ann, FANN_E_CANT_TRAIN_ACTIVATION);
+				return;
+		}
 		
 		ann->error_value += (*desired_output - neuron_value) * (*desired_output - neuron_value);
 		
 #ifdef DEBUGTRAIN
-		printf("delta[%d] = "FANNPRINTF"\n", (delta_it - delta_begin), *delta_it);
+		printf("delta1[%d] = "FANNPRINTF"\n", (delta_it - delta_begin), *delta_it);
 #endif
 		desired_output++;
 		delta_it++;
@@ -467,6 +485,9 @@ void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
 				tmp_delta = *(delta_begin + (neuron_it - first_neuron));
 				for(i = 0; i < neuron_it->num_connections; i++){
 					*(delta_begin + i + shift_prev_layer) += tmp_delta * neuron_it->weights[i];
+#ifdef DEBUGTRAIN
+					printf("delta2[%d] = "FANNPRINTF" += ("FANNPRINTF" * "FANNPRINTF")\n", (i + shift_prev_layer), *(delta_begin + i + shift_prev_layer), tmp_delta, neuron_it->weights[i]);
+#endif
 				}
 			}
 		}else{
@@ -483,16 +504,45 @@ void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
 		/* then calculate the actual errors in the previous layer */
 		delta_it = delta_begin + ((layer_it-1)->first_neuron - first_neuron);
 		last_neuron = (layer_it-1)->last_neuron;
-		for(neuron_it = (layer_it-1)->first_neuron;
-			neuron_it != last_neuron; neuron_it++){
-			neuron_value = neuron_it->value;
-			/* TODO add switch the minute there are other activation functions */
-			*delta_it *= fann_sigmoid_derive(activation_hidden_steepness, neuron_value) * learning_rate;
-			
+		
+		switch(ann->activation_function_hidden){
+			case FANN_LINEAR:
+				for(neuron_it = (layer_it-1)->first_neuron;
+					neuron_it != last_neuron; neuron_it++){
+					neuron_value = neuron_it->value;
+					*delta_it *= fann_linear_derive(activation_hidden_steepness, neuron_value) * learning_rate;
+					delta_it++;
+				}
+				break;
+			case FANN_SIGMOID:
+			case FANN_SIGMOID_STEPWISE:
+				for(neuron_it = (layer_it-1)->first_neuron;
+					neuron_it != last_neuron; neuron_it++){
+					neuron_value = neuron_it->value;
+					neuron_value = fann_clip(neuron_value, 0.01, 0.99);
+					*delta_it *= fann_sigmoid_derive(activation_hidden_steepness, neuron_value);
+#ifdef DEBUGTRAIN
+					printf("delta3[%d] = "FANNPRINTF" *= fann_sigmoid_derive(%f, %f) * %f\n", (delta_it - delta_begin), *delta_it, activation_hidden_steepness, neuron_value, learning_rate);
+#endif
+					delta_it++;
+				}
+				break;
+			case FANN_SIGMOID_SYMMETRIC:
+			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+				for(neuron_it = (layer_it-1)->first_neuron;
+					neuron_it != last_neuron; neuron_it++){
+					neuron_value = neuron_it->value;
+					neuron_value = fann_clip(neuron_value, -0.98, 0.98);
+					*delta_it *= fann_sigmoid_symmetric_derive(activation_hidden_steepness, neuron_value);
 #ifdef DEBUGTRAIN
-			printf("delta[%d] = "FANNPRINTF"\n", delta_it - delta_begin, *delta_it);
+					printf("delta3[%d] = "FANNPRINTF" *= fann_sigmoid_symmetric_derive(%f, %f) * %f\n", (delta_it - delta_begin), *delta_it, activation_hidden_steepness, neuron_value, learning_rate);
 #endif
-			delta_it++;
+					delta_it++;
+				}
+				break;
+			default:
+				fann_error(ann, FANN_E_CANT_TRAIN_ACTIVATION);
+				return;
 		}
 	}
 	
@@ -511,7 +561,10 @@ void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
 				neuron_it != last_neuron; neuron_it++){
 				tmp_delta = *(delta_begin + (neuron_it - first_neuron));
 				for(i = 0; i < neuron_it->num_connections; i++){
-					neuron_it->weights[i] += tmp_delta * neurons[i].value;
+#ifdef DEBUGTRAIN
+					printf("weights[%d] += "FANNPRINTF" = %f * %f\n", i, tmp_delta * neurons[i].value, tmp_delta, neurons[i].value);
+#endif
+					neuron_it->weights[i] += learning_rate * tmp_delta * neurons[i].value;
 				}
 			}
 		}else{
@@ -519,7 +572,7 @@ void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
 				neuron_it != last_neuron; neuron_it++){
 				tmp_delta = *(delta_begin + (neuron_it - first_neuron));
 				for(i = 0; i < neuron_it->num_connections; i++){
-					neuron_it->weights[i] += tmp_delta * neuron_it->connected_neurons[i]->value;
+					neuron_it->weights[i] += learning_rate * tmp_delta * neuron_it->connected_neurons[i]->value;
 				}
 			}
 		}
@@ -735,7 +788,8 @@ fann_type* fann_run(struct fann *ann, fann_type *input)
 {
 	struct fann_neuron *neuron_it, *last_neuron, *neurons, **neuron_pointers;
 	unsigned int activation_function, i, num_connections, num_input, num_output;
-	fann_type neuron_value, *weights, *output;
+	fann_type neuron_value, *output;
+	fann_type *weights;
 	struct fann_layer *layer_it, *last_layer;
 	
 	
@@ -755,26 +809,25 @@ fann_type* fann_run(struct fann *ann, fann_type *input)
 #endif
 	
 	/* values used for the stepwise linear sigmoid function */
-	fann_type r1 = 0, r2 = 0, r3 = 0, r4 = 0, r5 = 0, r6 = 0;
+	fann_type rh1 = 0, rh2 = 0, rh3 = 0, rh4 = 0, rh5 = 0, rh6 = 0;
+	fann_type ro1 = 0, ro2 = 0, ro3 = 0, ro4 = 0, ro5 = 0, ro6 = 0;
 	fann_type h1 = 0, h2 = 0, h3 = 0, h4 = 0, h5 = 0, h6 = 0;
 	fann_type o1 = 0, o2 = 0, o3 = 0, o4 = 0, o5 = 0, o6 = 0;
-	
+
+	switch(ann->activation_function_hidden){
 #ifdef FIXEDFANN
-	if(activation_function_output == FANN_SIGMOID_STEPWISE ||
-		activation_function_hidden == FANN_SIGMOID_STEPWISE ||
-		activation_function_output == FANN_SIGMOID ||
-		activation_function_hidden == FANN_SIGMOID){
-#else
-		if(activation_function_output == FANN_SIGMOID_STEPWISE ||
-			activation_function_hidden == FANN_SIGMOID_STEPWISE){
+		case FANN_SIGMOID:
+		case FANN_SIGMOID_SYMMETRIC:
 #endif
-			/* the results */
-			r1 = ann->activation_results[0];
-			r2 = ann->activation_results[1];
-			r3 = ann->activation_results[2];
-			r4 = ann->activation_results[3];
-			r5 = ann->activation_results[4];
-			r6 = ann->activation_results[5];
+		case FANN_SIGMOID_STEPWISE:
+		case FANN_SIGMOID_SYMMETRIC_STEPWISE:			
+			/* the hidden results */
+			rh1 = ann->activation_hidden_results[0];
+			rh2 = ann->activation_hidden_results[1];
+			rh3 = ann->activation_hidden_results[2];
+			rh4 = ann->activation_hidden_results[3];
+			rh5 = ann->activation_hidden_results[4];
+			rh6 = ann->activation_hidden_results[5];
 			
 			/* the hidden parameters */
 			h1 = ann->activation_hidden_values[0];
@@ -783,6 +836,25 @@ fann_type* fann_run(struct fann *ann, fann_type *input)
 			h4 = ann->activation_hidden_values[3];
 			h5 = ann->activation_hidden_values[4];
 			h6 = ann->activation_hidden_values[5];
+			break;
+		default:
+			break;
+	}
+			
+	switch(ann->activation_function_output){
+#ifdef FIXEDFANN
+		case FANN_SIGMOID:
+		case FANN_SIGMOID_SYMMETRIC:
+#endif
+		case FANN_SIGMOID_STEPWISE:
+		case FANN_SIGMOID_SYMMETRIC_STEPWISE:			
+			/* the output results */
+			ro1 = ann->activation_output_results[0];
+			ro2 = ann->activation_output_results[1];
+			ro3 = ann->activation_output_results[2];
+			ro4 = ann->activation_output_results[3];
+			ro5 = ann->activation_output_results[4];
+			ro6 = ann->activation_output_results[5];
 			
 			/* the output parameters */
 			o1 = ann->activation_output_values[0];
@@ -791,11 +863,10 @@ fann_type* fann_run(struct fann *ann, fann_type *input)
 			o4 = ann->activation_output_values[3];
 			o5 = ann->activation_output_values[4];
 			o6 = ann->activation_output_values[5];
-#ifdef FIXEDFANN /* just to make autoindent happy */
-		}
-#else
+			break;
+		default:
+			break;
 	}
-#endif
 	
 	/* first set the input */
 	num_input = ann->num_input;
@@ -879,28 +950,41 @@ fann_type* fann_run(struct fann *ann, fann_type *input)
 #ifdef FIXEDFANN
 				case FANN_SIGMOID:
 				case FANN_SIGMOID_STEPWISE:
+				case FANN_SIGMOID_SYMMETRIC:
+				case FANN_SIGMOID_SYMMETRIC_STEPWISE:
 					if(layer_it == last_layer-1){
-						neuron_it->value = fann_stepwise(o1, o2, o3, o4, o5, o6, r1, r2, r3, r4, r5, r6, neuron_value, multiplier);
+						neuron_it->value = fann_stepwise(o1, o2, o3, o4, o5, o6, ro1, ro2, ro3, ro4, ro5, ro6, neuron_value, multiplier);
 					}else{
-						neuron_it->value = fann_stepwise(h1, h2, h3, h4, h5, h6, r1, r2, r3, r4, r5, r6, neuron_value, multiplier);
+						neuron_it->value = fann_stepwise(h1, h2, h3, h4, h5, h6, rh1, rh2, rh3, rh4, rh5, rh6, neuron_value, multiplier);
 					}
 					break;
 #else
+				case FANN_LINEAR:
+					neuron_it->value = fann_linear(steepness, neuron_value);
+					break;
+					
 				case FANN_SIGMOID:
 					neuron_it->value = fann_sigmoid(steepness, neuron_value);
 					break;
 					
+				case FANN_SIGMOID_SYMMETRIC:
+					neuron_it->value = fann_sigmoid_symmetric(steepness, neuron_value);
+					break;
+					
 				case FANN_SIGMOID_STEPWISE:
+				case FANN_SIGMOID_SYMMETRIC_STEPWISE:
 					if(layer_it == last_layer-1){
-						neuron_it->value = fann_stepwise(o1, o2, o3, o4, o5, o6, r1, r2, r3, r4, r5, r6, neuron_value, 1);
+						neuron_it->value = fann_stepwise(o1, o2, o3, o4, o5, o6, ro1, ro2, ro3, ro4, ro5, ro6, neuron_value, 1);
 					}else{
-						neuron_it->value = fann_stepwise(h1, h2, h3, h4, h5, h6, r1, r2, r3, r4, r5, r6, neuron_value, 1);
+						neuron_it->value = fann_stepwise(h1, h2, h3, h4, h5, h6, rh1, rh2, rh3, rh4, rh5, rh6, neuron_value, 1);
 					}
 					break;
 #endif
 				case FANN_THRESHOLD:
 					neuron_it->value = (neuron_value < 0) ? 0 : 1;
 					break;
+				default:
+					fann_error(ann, FANN_E_CANT_USE_ACTIVATION);
 			}
 		}
 	}	
diff --git a/src/fann_internal.c b/src/fann_internal.c
index cb45b7d..c466bec 100644
--- a/src/fann_internal.c
+++ b/src/fann_internal.c
@@ -236,7 +236,7 @@ int fann_save_internal_fd(struct fann *ann, FILE *conf, const char *configuratio
 
 		/* The maximum number of bits we shift the fix point, is the number
 		   of bits in a integer, minus one for the sign, one for the minus
-		   in stepwise sigmoid, and minus the bits used for the maximum.
+		   in stepwise, and minus the bits used for the maximum.
 		   This is devided by two, to allow multiplication of two fixed
 		   point numbers.
 		*/
@@ -379,58 +379,92 @@ void fann_save_train_internal_fd(struct fann_train_data* data, FILE *file, char
 	}
 }
 
-void fann_initialise_result_array(struct fann *ann)
+/* Adjust the steepwise functions (if used) */
+void fann_update_stepwise_hidden(struct fann *ann)
 {
-#ifdef FIXEDFANN
-	/* Calculate the parameters for the stepwise linear
-	   sigmoid function fixed point.
-	   Using a rewritten sigmoid function.
+	unsigned int i = 0;
+#ifndef FIXEDFANN
+	/* For use in stepwise linear activation function.
 	   results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
 	*/
-	ann->activation_results[0] = (fann_type)(ann->multiplier/200.0+0.5);
-	ann->activation_results[1] = (fann_type)(ann->multiplier/20.0+0.5);
-	ann->activation_results[2] = (fann_type)(ann->multiplier/4.0+0.5);
-	ann->activation_results[3] = ann->multiplier - (fann_type)(ann->multiplier/4.0+0.5);
-	ann->activation_results[4] = ann->multiplier - (fann_type)(ann->multiplier/20.0+0.5);
-	ann->activation_results[5] = ann->multiplier - (fann_type)(ann->multiplier/200.0+0.5);
+	switch(ann->activation_function_hidden){
+		case FANN_SIGMOID:
+		case FANN_SIGMOID_STEPWISE:
+			ann->activation_hidden_results[0] = 0.005;
+			ann->activation_hidden_results[1] = 0.05;
+			ann->activation_hidden_results[2] = 0.25;
+			ann->activation_hidden_results[3] = 0.75;
+			ann->activation_hidden_results[4] = 0.95;
+			ann->activation_hidden_results[5] = 0.995;	
+			break;
+		case FANN_SIGMOID_SYMMETRIC:
+		case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+			ann->activation_hidden_results[0] = -0.99;
+			ann->activation_hidden_results[1] = -0.9;
+			ann->activation_hidden_results[2] = -0.5;
+			ann->activation_hidden_results[3] = 0.5;
+			ann->activation_hidden_results[4] = 0.9;
+			ann->activation_hidden_results[5] = 0.99;
+			break;
+		default:
+			/* the actiavation functions which do not have a stepwise function
+			   should not have it calculated */
+			return;
+	}
 #else
-	/* For use in stepwise linear activation function.
+	/* Calculate the parameters for the stepwise linear
+	   sigmoid function fixed point.
+	   Using a rewritten sigmoid function.
 	   results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
 	*/
-	ann->activation_results[0] = 0.005;
-	ann->activation_results[1] = 0.05;
-	ann->activation_results[2] = 0.25;
-	ann->activation_results[3] = 0.75;
-	ann->activation_results[4] = 0.95;
-	ann->activation_results[5] = 0.995;	
+	switch(ann->activation_function_hidden){
+		case FANN_SIGMOID:
+		case FANN_SIGMOID_STEPWISE:
+			ann->activation_hidden_results[0] = (fann_type)(ann->multiplier/200.0+0.5);
+			ann->activation_hidden_results[1] = (fann_type)(ann->multiplier/20.0+0.5);
+			ann->activation_hidden_results[2] = (fann_type)(ann->multiplier/4.0+0.5);
+			ann->activation_hidden_results[3] = ann->multiplier - (fann_type)(ann->multiplier/4.0+0.5);
+			ann->activation_hidden_results[4] = ann->multiplier - (fann_type)(ann->multiplier/20.0+0.5);
+			ann->activation_hidden_results[5] = ann->multiplier - (fann_type)(ann->multiplier/200.0+0.5);
+			break;
+		case FANN_SIGMOID_SYMMETRIC:
+		case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+			ann->activation_hidden_results[0] = (fann_type)((ann->multiplier/100.0) - ann->multiplier + 0.5);
+			ann->activation_hidden_results[1] = (fann_type)((ann->multiplier/10.0) - ann->multiplier + 0.5);
+			ann->activation_hidden_results[2] = (fann_type)((ann->multiplier/2.0) - ann->multiplier + 0.5);
+			ann->activation_hidden_results[3] = ann->multiplier - (fann_type)(ann->multiplier/2.0+0.5);
+			ann->activation_hidden_results[4] = ann->multiplier - (fann_type)(ann->multiplier/10.0+0.5);
+			ann->activation_hidden_results[5] = ann->multiplier - (fann_type)(ann->multiplier/100.0+0.5);
+			break;
+		default:
+			/* the actiavation functions which do not have a stepwise function
+			   should not have it calculated */
+			return;
+	}			
 #endif
 
-	fann_update_stepwise_hidden(ann);
-	fann_update_stepwise_output(ann);
-}
-
-/* Adjust the steepwise functions (if used) */
-void fann_update_stepwise_hidden(struct fann *ann)
-{
-	unsigned int i = 0;
 	for(i = 0; i < 6; i++){
-#ifdef FIXEDFANN
+#ifndef FIXEDFANN
 		switch(ann->activation_function_hidden){
 			case FANN_SIGMOID:
+				break;
 			case FANN_SIGMOID_STEPWISE:
-				ann->activation_hidden_values[i] = (fann_type)((((log(ann->multiplier/(float)ann->activation_results[i] -1)*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_hidden_steepness);
+				ann->activation_hidden_values[i] = ((log(1.0/ann->activation_hidden_results[i] -1.0) * 1.0/-2.0) * 1.0/ann->activation_hidden_steepness);
 				break;
-			case FANN_THRESHOLD:
+			case FANN_SIGMOID_SYMMETRIC:
+			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+				ann->activation_hidden_values[i] = ((log((1.0-ann->activation_hidden_results[i]) / (ann->activation_hidden_results[i]+1.0)) * 1.0/-2.0) * 1.0/ann->activation_hidden_steepness);
 				break;
 		}
 #else
 		switch(ann->activation_function_hidden){
 			case FANN_SIGMOID:
-				break;
 			case FANN_SIGMOID_STEPWISE:
-				ann->activation_hidden_values[i] = ((log(1.0/ann->activation_results[i] -1.0) * 1.0/-2.0) * 1.0/ann->activation_hidden_steepness);
+				ann->activation_hidden_values[i] = (fann_type)((((log(ann->multiplier/(float)ann->activation_hidden_results[i] -1)*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_hidden_steepness);
 				break;
-			case FANN_THRESHOLD:
+			case FANN_SIGMOID_SYMMETRIC:
+			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+				ann->activation_hidden_values[i] = (fann_type)((((log((ann->multiplier - (float)ann->activation_hidden_results[i])/((float)ann->activation_hidden_results[i] + ann->multiplier))*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_hidden_steepness);
 				break;
 		}
 #endif
@@ -441,25 +475,88 @@ void fann_update_stepwise_hidden(struct fann *ann)
 void fann_update_stepwise_output(struct fann *ann)
 {
 	unsigned int i = 0;
+#ifndef FIXEDFANN
+	/* For use in stepwise linear activation function.
+	   results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
+	*/
+	switch(ann->activation_function_output){
+		case FANN_SIGMOID:
+		case FANN_SIGMOID_STEPWISE:
+			ann->activation_output_results[0] = 0.005;
+			ann->activation_output_results[1] = 0.05;
+			ann->activation_output_results[2] = 0.25;
+			ann->activation_output_results[3] = 0.75;
+			ann->activation_output_results[4] = 0.95;
+			ann->activation_output_results[5] = 0.995;	
+			break;
+		case FANN_SIGMOID_SYMMETRIC:
+		case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+			ann->activation_output_results[0] = -0.99;
+			ann->activation_output_results[1] = -0.9;
+			ann->activation_output_results[2] = -0.5;
+			ann->activation_output_results[3] = 0.5;
+			ann->activation_output_results[4] = 0.9;
+			ann->activation_output_results[5] = 0.99;
+			break;
+		default:
+			/* the actiavation functions which do not have a stepwise function
+			   should not have it calculated */
+			return;
+	}
+#else
+	/* Calculate the parameters for the stepwise linear
+	   sigmoid function fixed point.
+	   Using a rewritten sigmoid function.
+	   results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
+	*/
+	switch(ann->activation_function_output){
+		case FANN_SIGMOID:
+		case FANN_SIGMOID_STEPWISE:
+			ann->activation_output_results[0] = (fann_type)(ann->multiplier/200.0+0.5);
+			ann->activation_output_results[1] = (fann_type)(ann->multiplier/20.0+0.5);
+			ann->activation_output_results[2] = (fann_type)(ann->multiplier/4.0+0.5);
+			ann->activation_output_results[3] = ann->multiplier - (fann_type)(ann->multiplier/4.0+0.5);
+			ann->activation_output_results[4] = ann->multiplier - (fann_type)(ann->multiplier/20.0+0.5);
+			ann->activation_output_results[5] = ann->multiplier - (fann_type)(ann->multiplier/200.0+0.5);
+			break;
+		case FANN_SIGMOID_SYMMETRIC:
+		case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+			ann->activation_output_results[0] = (fann_type)((ann->multiplier/100.0) - ann->multiplier + 0.5);
+			ann->activation_output_results[1] = (fann_type)((ann->multiplier/10.0) - ann->multiplier + 0.5);
+			ann->activation_output_results[2] = (fann_type)((ann->multiplier/2.0) - ann->multiplier + 0.5);
+			ann->activation_output_results[3] = ann->multiplier - (fann_type)(ann->multiplier/2.0+0.5);
+			ann->activation_output_results[4] = ann->multiplier - (fann_type)(ann->multiplier/10.0+0.5);
+			ann->activation_output_results[5] = ann->multiplier - (fann_type)(ann->multiplier/100.0+0.5);
+			break;
+		default:
+			/* the actiavation functions which do not have a stepwise function
+			   should not have it calculated */
+			return;
+	}			
+#endif
+
 	for(i = 0; i < 6; i++){
-#ifdef FIXEDFANN
+#ifndef FIXEDFANN
 		switch(ann->activation_function_output){
 			case FANN_SIGMOID:
+				break;
 			case FANN_SIGMOID_STEPWISE:
-				ann->activation_output_values[i] = (fann_type)((((log(ann->multiplier/(float)ann->activation_results[i] -1)*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_output_steepness);
+				ann->activation_output_values[i] = ((log(1.0/ann->activation_output_results[i] -1.0) * 1.0/-2.0) * 1.0/ann->activation_output_steepness);
 				break;
-			case FANN_THRESHOLD:
+			case FANN_SIGMOID_SYMMETRIC:
+			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+				ann->activation_output_values[i] = ((log((1.0-ann->activation_output_results[i]) / (ann->activation_output_results[i]+1.0)) * 1.0/-2.0) * 1.0/ann->activation_output_steepness);
 				break;
 		}
 #else
 		switch(ann->activation_function_output){
 			case FANN_SIGMOID:
-				break;
 			case FANN_SIGMOID_STEPWISE:
-				ann->activation_output_values[i] = ((log(1.0/ann->activation_results[i] -1.0) * 1.0/-2.0) * 1.0/ann->activation_output_steepness);
-				/* printf("%f -> %f\n", ann->activation_results[i], ann->activation_output_values[i]); */
+				ann->activation_output_values[i] = (fann_type)((((log(ann->multiplier/(float)ann->activation_output_results[i] -1)*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_output_steepness);
 				break;
-			case FANN_THRESHOLD:
+			case FANN_SIGMOID_SYMMETRIC:
+			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+				ann->activation_output_values[i] = (fann_type)((((log((ann->multiplier - (float)ann->activation_output_results[i])/((float)ann->activation_output_results[i] + ann->multiplier))*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_output_steepness);
 				break;
 		}
 #endif
@@ -543,6 +640,12 @@ void fann_error(struct fann *ann, const unsigned int errno, ...)
 	case FANN_E_CANT_ALLOCATE_MEM:
 		snprintf(errstr, FANN_ERRSTR_MAX, "Unable to allocate memory.\n");
 		break;
+	case FANN_E_CANT_TRAIN_ACTIVATION:
+		snprintf(errstr, FANN_ERRSTR_MAX, "Unable to train with the selected activation function.\n");
+		break;
+	case FANN_E_CANT_USE_ACTIVATION:
+		snprintf(errstr, FANN_ERRSTR_MAX, "Unable to use the selected activation function.\n");
+	break;
 	default:
 		vsnprintf(errstr, FANN_ERRSTR_MAX, "Unknown error.\n", ap);
 		break;
@@ -553,6 +656,10 @@ void fann_error(struct fann *ann, const unsigned int errno, ...)
 		fprintf(stderr, "FANN Error %d: %s", errno, errstr);
 	} else {
 		ann->errstr = errstr;
+		/* TODO automaticly print to stderr if nothing else selected
+		   (other choices are file or just to keep them in the struct)
+		*/
+		fprintf(stderr, "FANN Error %d: %s", errno, errstr);
 	}
 }
 
@@ -613,12 +720,13 @@ struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
 	ann->decimal_point = decimal_point;
 	ann->multiplier = multiplier;
 #endif
-	fann_initialise_result_array(ann);
-	
-	fann_set_activation_hidden_steepness(ann, activation_hidden_steepness);
-	fann_set_activation_output_steepness(ann, activation_output_steepness);
-	fann_set_activation_function_hidden(ann, activation_function_hidden);
-	fann_set_activation_function_output(ann, activation_function_output);
+
+	ann->activation_hidden_steepness = activation_hidden_steepness;
+	ann->activation_output_steepness = activation_output_steepness;
+	ann->activation_function_hidden = activation_function_hidden;
+	ann->activation_function_output = activation_function_output;
+	fann_update_stepwise_hidden(ann);
+	fann_update_stepwise_output(ann);
 	
 #ifdef DEBUG
 	printf("creating network with learning rate %f\n", learning_rate);
@@ -642,7 +750,7 @@ struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
 #endif
 	}
 	
-	ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron -1;
+	ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1;
 	ann->num_output = ((ann->last_layer-1)->last_neuron - (ann->last_layer-1)->first_neuron) - 1;
 	
 	/* allocate room for the actual neurons */
diff --git a/src/include/fann_activation.h b/src/include/fann_activation.h
index 607a584..42c677a 100644
--- a/src/include/fann_activation.h
+++ b/src/include/fann_activation.h
@@ -34,7 +34,7 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
    span: -inf < y < inf
    y = x*s, d = 1*s
    Can NOT be used in fixed point.
-   NOT implemented yet.
+   (NOT) implemented yet.
 */
 #define FANN_LINEAR 4
 
@@ -103,17 +103,25 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
 /* stepwise linear functions used for some of the activation functions */
+
+/* defines used for the stepwise linear functions */
+
 #define fann_linear_func(v1, r1, v2, r2, value) ((((r2-r1) * (value-v1))/(v2-v1)) + r1)
 #define fann_stepwise(v1, v2, v3, v4, v5, v6, r1, r2, r3, r4, r5, r6, value, multiplier) (value < v5 ? (value < v3 ? (value < v2 ? (value < v1 ? 0 : fann_linear_func(v1, r1, v2, r2, value)) : fann_linear_func(v2, r2, v3, r3, value)) : (value < v4 ? fann_linear_func(v3, r3, v4, r4, value) : fann_linear_func(v4, r4, v5, r5, value))) : (value < v6 ? fann_linear_func(v5, r5, v6, r6, value) : multiplier))
 
-#ifdef FIXEDFANN
-#define fann_sigmoid(steepness, value) ((fann_type)(0.5+((1.0/(1.0 + exp(-2.0 * ((float)steepness/multiplier) * ((float)value/multiplier))))*multiplier)))
-
-#else
+/* FANN_LINEAR */
+#define fann_linear(steepness, value) fann_mult(steepness, value)
+#define fann_linear_derive(steepness, value) (steepness)
 
+/* FANN_SIGMOID */
 #define fann_sigmoid(steepness, value) (1.0/(1.0 + exp(-2.0 * steepness * value)))
-#define fann_sigmoid_derive(steepness, value) ((2.0 * steepness * value * (1.0 - value)) + 0.01) /* the plus is a trick to the derived function, to avoid getting stuck on flat spots */
-#endif
+#define fann_sigmoid_derive(steepness, value) (2.0 * steepness * value * (1.0 - value)) /* the plus is a trick to the derived function, to avoid getting stuck on flat spots */
+
+/* FANN_SIGMOID_SYMMETRIC */
+#define fann_sigmoid_symmetric(steepness, value) (2.0/(1.0 + exp(-2.0 * steepness * value)) - 1.0)
+#define fann_sigmoid_symmetric_derive(steepness, value) steepness * (1.0 - (value*value))
 
+/* FANN_GAUSSIAN */
+#define fann_gaussian(steepness, value) (exp(-value * steepness * value * steepness))
 
 #endif
diff --git a/src/include/fann_data.h b/src/include/fann_data.h
index d861a4f..4397d1d 100644
--- a/src/include/fann_data.h
+++ b/src/include/fann_data.h
@@ -122,8 +122,9 @@ struct fann
 	   activation_results array, the result is saved, and in the
 	   two values arrays, the values that gives the results are saved.
 	 */
-	fann_type activation_results[6];
+	fann_type activation_hidden_results[6];
 	fann_type activation_hidden_values[6];
+	fann_type activation_output_results[6];
 	fann_type activation_output_values[6];
 
 	/* Total number of connections.
diff --git a/src/include/fann_errno.h b/src/include/fann_errno.h
index 2ae9f94..c598029 100644
--- a/src/include/fann_errno.h
+++ b/src/include/fann_errno.h
@@ -62,7 +62,13 @@ enum {
 	FANN_E_CANT_READ_TD,
 
 	/* Unable to allocate memory. */
-	FANN_E_CANT_ALLOCATE_MEM
+	FANN_E_CANT_ALLOCATE_MEM,
+
+	/* Unable to train with the selected activation function */
+	FANN_E_CANT_TRAIN_ACTIVATION,
+
+	/* Unable to use the selected activation function */
+	FANN_E_CANT_USE_ACTIVATION
 };
 
 #ifdef __cplusplus
diff --git a/src/include/fann_internal.h b/src/include/fann_internal.h
index 91f8ed2..b4d07b6 100644
--- a/src/include/fann_internal.h
+++ b/src/include/fann_internal.h
@@ -48,7 +48,6 @@ void fann_save_train_internal_fd(struct fann_train_data* data, FILE *file, char
 int fann_compare_connections(const void* c1, const void* c2);
 void fann_seed_rand();
 
-void fann_initialise_result_array(struct fann *ann);
 void fann_update_stepwise_hidden(struct fann *ann);
 void fann_update_stepwise_output(struct fann *ann);
 
@@ -61,6 +60,7 @@ struct fann_train_data* fann_read_train_from_fd(FILE *file, char *filename);
 #define fann_max(x, y) (((x) > (y)) ? (x) : (y))
 #define fann_min(x, y) (((x) < (y)) ? (x) : (y))
 #define fann_safe_free(x) if(x) free(x)
+#define fann_clip(x, lo, hi) (((x) < (lo)) ? (lo) : (((x) > (hi)) ? (hi) : (x)))
 
 #define fann_rand(min_value, max_value) (((double)(min_value))+(((double)(max_value)-((double)(min_value)))*rand()/(RAND_MAX+1.0)))
 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/libfann.git



More information about the debian-science-commits mailing list