[libfann] 150/242: Added fann_set_train_error_function instead of fann_use_tanh_error_function, for easy addition of more error functions in the future. Also renamed "forward" in to "shortcut" for shortcut connections

Christian Kastner chrisk-guest at moszumanska.debian.org
Sat Oct 4 21:10:36 UTC 2014


This is an automated email from the git hooks/post-receive script.

chrisk-guest pushed a commit to tag Version2_0_0
in repository libfann.

commit 9d2c5108647834e99d650fdacfb6915d0dfbd84b
Author: Steffen Nissen <lukesky at diku.dk>
Date:   Mon Jul 5 23:05:50 2004 +0000

    Added fann_set_train_error_function instead of fann_use_tanh_error_function, for easy addition of more error functions in the future. Also renamed "forward" in to "shortcut" for shortcut connections
---
 ChangeLog               |   4 +-
 TODO                    |  36 +++++++++++-
 doc/fann.xml            | 147 +++++++++++++++++++++++++-----------------------
 examples/Makefile       |   2 -
 src/Makefile.am         |   2 +-
 src/Makefile.in         |   1 +
 src/fann.c              |  20 +++----
 src/fann_io.c           |  18 +++---
 src/fann_options.c      |  14 ++---
 src/fann_train.c        |  14 ++---
 src/include/fann.h      |  20 +++----
 src/include/fann_data.h |  29 +++++++---
 12 files changed, 176 insertions(+), 131 deletions(-)

diff --git a/ChangeLog b/ChangeLog
index fa9747c..9ea79f4 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,10 +1,10 @@
 libfann (1.2.0) stable; urgency=low
 	* Fixes for better compability with different compilers
-	* Connections that skip layers
+	* Shortcut connections that skip layers
 	* fann_print_connections function
 	* More activation functions
 	* Batch training
-	* RPROP training
+	* RPROP training (default)
 	* Quickprop training
 	* fann_get_training_algorithm and fann_set_training_algorithm functions
 	* fann_test_data function
diff --git a/TODO b/TODO
index 87f622a..3fa3abe 100644
--- a/TODO
+++ b/TODO
@@ -3,7 +3,6 @@
 * Implement the activation functions defined in fann_activation.h
 * More checks to see if train data is properly formatted in the file (some have experienced nasty problems because a number was missing in one line).
 * C++ wrapper.
-* DLL.
 * COM Component.
 * Wrappers for other languages.
 * Implement optimal brain damage.
@@ -11,3 +10,38 @@
 * Allow recurrent networks.
 
 If you want to contribute to the library, please contact me at lukesky at diku.dk
+
+Things TODO when releasing a new version (mostly for fann developers)
+
+* No compile warnings
+	cd examples
+	make compiletest
+
+* Everything should compile and install ok
+	./configure && make
+	su
+	make install
+	exit
+	cd examples
+	make runtest
+
+* No memory leaks must exist
+	cd examples
+	rundebug
+
+* Documentation should be updated
+
+* Python bindings should compile and run
+	cd python
+	make
+	python simple_train.py
+
+* Changelog should be updated and timestamp should be set
+
+* PHP Extension should be up-to-date (including documentation)
+
+* Debian packages should be created
+
+* RPM packages should be created
+
+* Windows DLL's should be created
diff --git a/doc/fann.xml b/doc/fann.xml
index 3da4e77..392ae6b 100644
--- a/doc/fann.xml
+++ b/doc/fann.xml
@@ -785,16 +785,16 @@ fann_destroy(ann2);
           <para>This function appears in FANN >= 1.0.5.</para>
         </refsect1>
       </refentry>
-      <refentry id="api.fann_create_forward">
+      <refentry id="api.fann_create_shortcut">
         <refnamediv>
-          <refname>fann_create_forward</refname>
-          <refpurpose>Create a new artificial neural network with forward connections, and return a pointer to it.</refpurpose>
+          <refname>fann_create_shortcut</refname>
+          <refpurpose>Create a new artificial neural network with shortcut connections, and return a pointer to it.</refpurpose>
         </refnamediv>
         <refsect1>
           <title>Description</title>
           <methodsynopsis>
             <type>struct fann *</type>
-            <methodname>fann_create_forward</methodname>
+            <methodname>fann_create_shortcut</methodname>
             <methodparam>
               <type>float</type>
               <parameter>learning_rate</parameter>
@@ -809,12 +809,12 @@ fann_destroy(ann2);
             </methodparam>
           </methodsynopsis>
           <para>
-            <function>fann_create_forward</function> will create a new artificial neural network, and return
-	    a pointer to it. The network will be fully connected, and will furthermore have all forward 
+            <function>fann_create_shortcut</function> will create a new artificial neural network, and return
+	    a pointer to it. The network will be fully connected, and will furthermore have all shortcut 
 	    connections connected.
 	  </para>
 	  <para>
-            Forward connections are connections that skip layers. A fully connected network with forward
+            Shortcut connections are connections that skip layers. A fully connected network with shortcut
 	    connections, is a network where all neurons are connected to all neurons in later layers. 
 	    Including direct connections from the input layer to the output layer.
 	  </para>
@@ -826,16 +826,16 @@ fann_destroy(ann2);
           <para>This function appears in FANN >= 1.2.0.</para>
         </refsect1>
       </refentry>
-      <refentry id="api.fann_create_forward_array">
+      <refentry id="api.fann_create_shortcut_array">
         <refnamediv>
-          <refname>fann_create_forward_array</refname>
-          <refpurpose>Create a new artificial neural network with forward connections, and return a pointer to it.</refpurpose>
+          <refname>fann_create_shortcut_array</refname>
+          <refpurpose>Create a new artificial neural network with shortcut connections, and return a pointer to it.</refpurpose>
         </refnamediv>
         <refsect1>
           <title>Description</title>
           <methodsynopsis>
             <type>struct fann *</type>
-            <methodname>fann_create_forward_array</methodname>
+            <methodname>fann_create_shortcut_array</methodname>
             <methodparam>
               <type>float</type>
               <parameter>learning_rate</parameter>
@@ -850,8 +850,8 @@ fann_destroy(ann2);
             </methodparam>
           </methodsynopsis>
           <para>
-            <function>fann_create_forward_array</function> will create a new artificial neural network, and return a pointer to
-	    it. It is the same as <function>fann_create_forward</function>, only it accepts an array as its final parameter
+            <function>fann_create_shortcut_array</function> will create a new artificial neural network, and return a pointer to
+	    it. It is the same as <function>fann_create_shortcut</function>, only it accepts an array as its final parameter
 	    instead of variable arguments.
 	  </para>
           <para>This function appears in FANN >= 1.2.0.</para>
@@ -1691,31 +1691,6 @@ L   2 / N    6 ...cda
 	  <para>
 	    Prints all the parameters of the network, for easy viewing of all the values.
 	  </para>
-          <para>
-	    An example print of a freshly created (2 3 1) ANN is displayd here:
-	    <literallayout class="monospaced" id="api.fann_print_parameters.output">
-Input layer                :  2 neurons, 1 bias
-  Hidden layer             :  3 neurons, 1 bias
-Output layer               :  1 neurons
-Total neurons and biases   :  8
-Total connections          : 13
-Connection rate            :  1.00
-Forward connections        :  0
-Training algorithm         :  FANN_TRAIN_RPROP
-Learning rate              :  0.70
-Activation function hidden :  FANN_SIGMOID_STEPWISE
-Activation function output :  FANN_SIGMOID_STEPWISE
-Activation steepness hidden:  0.50
-Activation steepness output:  0.50
-Use tanh error function    :  1
-Quickprop decay            : -0.000100
-Quickprop mu               :  1.75
-RPROP increase factor      :  1.20
-RPROP decrease factor      :  0.50
-RPROP delta min            :  0.00
-RPROP delta max            : 50.00
-	    </literallayout>
-  	  </para>
           <para>This function appears in FANN >= 1.2.0.</para>
         </refsect1>
       </refentry>
@@ -2030,64 +2005,50 @@ RPROP delta max            : 50.00
           <para>This function appears in FANN >= 1.2.0. and replaces the <methodname>fann_set_activation_output_steepness</methodname> function from FANN >= 1.0.0.</para>
         </refsect1>
       </refentry>
-      <refentry id="api.fann_set_use_tanh_error_function">
+      <refentry id="api.fann_set_train_error_function">
         <refnamediv>
-          <refname>fann_set_use_tanh_error_function</refname>
-          <refpurpose>Sets whether the tanh error function is used.</refpurpose>
+          <refname>fann_set_train_error_function</refname>
+          <refpurpose>Sets the training error function to be used.</refpurpose>
         </refnamediv>
         <refsect1>
           <title>Description</title>
           <methodsynopsis>
             <type>void</type>
-            <methodname>fann_get_use_tanh_error_function</methodname>
+            <methodname>fann_set_train_error_function</methodname>
             <methodparam>
               <type>struct fann *</type>
               <parameter>ann</parameter>
             </methodparam>
             <methodparam>
               <type>unsigned int</type>
-              <parameter>use_tanh_error_function</parameter>
+              <parameter>train_error_function</parameter>
             </methodparam>
           </methodsynopsis>
+          <para>Set the training error function (as described in <link linkend="api.sec.constants.errorfunc">Training Error Functions</link>) of a network.</para>
 	  <para>
-	    If <parameter>use_tanh_error_function</parameter> is zero, the tanh error 
-	    function is not used and if it is one, the tanh error function is used.
-	  </para>
-          <para>
-	    The tanh error function is an error function that makes large deviations 
-	    stand out, by altering the error value used when training the network.
-	    The idea behind this is that it is worse to have 1 output that misses the target
-	    by 100%, than having 10 outputs that misses the target by 10%.
-	  </para>
-	  <para>
-	    The default behavior is to use the tanh error function.
+	    The default training error function is <link linkend="api.sec.constants.errorfunc"><constant>FANN_ERRORFUNC_TANH</constant></link>.
 	  </para>
           <para>This function appears in FANN >= 1.2.0.</para>
         </refsect1>
       </refentry>
-      <refentry id="api.fann_get_use_tanh_error_function">
+      <refentry id="api.fann_get_train_error_function">
         <refnamediv>
-          <refname>fann_get_use_tanh_error_function</refname>
-          <refpurpose>Sees if the tanh error function is used.</refpurpose>
+          <refname>fann_get_train_error_function</refname>
+          <refpurpose>Gets the training error function to be used.</refpurpose>
         </refnamediv>
         <refsect1>
           <title>Description</title>
           <methodsynopsis>
             <type>unsigned int</type>
-            <methodname>fann_get_use_tanh_error_function</methodname>
+            <methodname>fann_get_train_error_function</methodname>
             <methodparam>
               <type>struct fann *</type>
               <parameter>ann</parameter>
             </methodparam>
           </methodsynopsis>
-          <para>
-	    The tanh error function is an error function that makes large deviations 
-	    stand out, by altering the error value used when training the network.
-	    The idea behind this is that it is worse to have 1 output that misses the target
-	    by 100%, than having 10 outputs that misses the target by 10%.
-	  </para>
+          <para>Get the training error function (as described in <link linkend="api.sec.constants.errorfunc">Training Error Functions</link>) of a network.</para>
 	  <para>
-	    The default behavior is to use this tanh error function.
+	    The default training error function is <link linkend="api.sec.constants.errorfunc"><constant>FANN_ERRORFUNC_TANH</constant></link>.
 	  </para>
           <para>This function appears in FANN >= 1.2.0.</para>
         </refsect1>
@@ -2747,17 +2708,17 @@ RPROP delta max            : 50.00
             <varlistentry>
               <term>
                 <type>unsigned int</type>
-                <varname>forward_connections</varname>
+                <varname>shortcut_connections</varname>
               </term>
               <listitem>
                 <para>
-		  Is 1 if forward connections are used in the ann otherwise 0
-		  Forward connections are connections that skip layers.
-		  A fully connected ann with forward connections is an ann where
+		  Is 1 if shortcut connections are used in the ann otherwise 0
+		  Shortcut connections are connections that skip layers.
+		  A fully connected ann with shortcut connections is an ann where
 		  neurons have connections to all neurons in all later layers.
 		</para>
 		<para>
-		  ANNs with forward connections are created by <link linkend="api.fann_create_forward"><function>fann_create_forward</function></link>.
+		  ANNs with shortcut connections are created by <link linkend="api.fann_create_shortcut"><function>fann_create_shortcut</function></link>.
 		</para>
               </listitem>
             </varlistentry>
@@ -2987,7 +2948,7 @@ RPROP delta max            : 50.00
             <varlistentry>
               <term>
                 <type>unsigned int</type>
-                <varname>use_tanh_error_function</varname>
+                <varname>train_error_function</varname>
               </term>
               <listitem>
                 <para>When using this, training is usually faster.
@@ -3495,6 +3456,50 @@ RPROP delta max            : 50.00
           </variablelist>
         </refsect1>
       </refentry>
+      <refentry id="api.sec.constants.errorfunc">
+        <refnamediv>
+          <refname id="api.sec.constants.errorfunc.title">Training Error Functions</refname>
+          <refpurpose>Constants representing errors functions.</refpurpose>
+        </refnamediv>
+        <refsect1>
+          <title>Description</title>
+	  <para>
+	    These constants represent the error functions used when calculating the error during training.
+	  </para>
+	  <para>
+	    The training error function used is chosen by the 
+	    <link linkend="api.fann_set_train_error_function"><function>fann_set_train_error_function</function></link> 
+	    function. The default training error function is <constant>FANN_ERRORFUNC_TANH</constant>.
+	  </para>
+          <variablelist>
+            <title>Constants</title>
+            <varlistentry>
+              <term>FANN_ERRORFUNC_LINEAR</term>
+              <listitem>
+                <para>
+		  The basic linear error function which simply calculates the error as the difference
+		  between the real output and the desired output.
+		</para>
+              </listitem>
+            </varlistentry>
+            <varlistentry>
+              <term>FANN_ERRORFUNC_TANH</term>
+              <listitem>
+                <para>
+		  The tanh error function is an error function that makes large deviations 
+		  stand out, by altering the error value used when training the network.
+		  The idea behind this is that it is worse to have 1 output that misses the target
+		  by 100%, than having 10 outputs that misses the target by 10%.
+		</para>
+		<para>
+		  This is the default error function and it is usually better. It can however 
+		  give poor results with high learning rates.
+		</para>
+              </listitem>
+            </varlistentry>
+          </variablelist>
+        </refsect1>
+      </refentry>
       <refentry id="api.sec.constants.error">
         <refnamediv>
           <refname id="api.sec.constants.error.title">Error Codes</refname>
diff --git a/examples/Makefile b/examples/Makefile
index 6180d5e..a5e1df6 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -71,5 +71,3 @@ rundebug: $(DEBUG_TARGETS)
 	@echo
 	@echo Testing network with fixed points
 	valgrind --leak-check=yes --show-reachable=yes --leak-resolution=high ./xor_test_fixed_debug
-
-
diff --git a/src/Makefile.am b/src/Makefile.am
index 7ec97c4..86a79aa 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -3,9 +3,9 @@ SUBDIRS = include
 lib_LTLIBRARIES = libfloatfann.la libdoublefann.la libfixedfann.la libfann.la
 
 AM_LDFLAGS = -version-info 3:0:2
+AM_CFLAGS = -D_REENTRANT
 
 libfloatfann_la_SOURCES = floatfann.c
 libdoublefann_la_SOURCES = doublefann.c
 libfixedfann_la_SOURCES = fixedfann.c
 libfann_la_SOURCES = fann.c fann_io.c fann_train.c fann_train_data.c fann_options.c fann_error.c
-
diff --git a/src/Makefile.in b/src/Makefile.in
index 3cadb0b..6be8292 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -134,6 +134,7 @@ SUBDIRS = include
 lib_LTLIBRARIES = libfloatfann.la libdoublefann.la libfixedfann.la libfann.la
 
 AM_LDFLAGS = -version-info 3:0:2
+AM_CFLAGS = -D_REENTRANT
 
 libfloatfann_la_SOURCES = floatfann.c
 libdoublefann_la_SOURCES = doublefann.c
diff --git a/src/fann.c b/src/fann.c
index a45cab5..48ea685 100644
--- a/src/fann.c
+++ b/src/fann.c
@@ -280,9 +280,9 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, fl
 }
 
  
-/* create a neural network with forward connections.
+/* create a neural network with shortcut connections.
  */
-FANN_EXTERNAL struct fann * FANN_API fann_create_forward(float learning_rate,
+FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut(float learning_rate,
 	unsigned int num_layers, /* the number of layers, including the input and output layer */
 
 
@@ -299,16 +299,16 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_forward(float learning_rate,
 	}
 	va_end(layer_sizes);
 
-	ann = fann_create_forward_array(learning_rate, num_layers, layers);
+	ann = fann_create_shortcut_array(learning_rate, num_layers, layers);
 
 	free(layers);
 
 	return ann;
 }
 
-/* create a neural network with forward connections.
+/* create a neural network with shortcut connections.
  */
-FANN_EXTERNAL struct fann * FANN_API fann_create_forward_array(float learning_rate, unsigned int num_layers, unsigned int * layers)
+FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut_array(float learning_rate, unsigned int num_layers, unsigned int * layers)
 {
 	struct fann_layer *layer_it, *layer_it2, *last_layer;
 	struct fann *ann;
@@ -331,7 +331,7 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_forward_array(float learning_ra
 	}
 
 	ann->connection_rate = 1;
-	ann->forward_connections = 1;
+	ann->shortcut_connections = 1;
 #ifdef FIXEDFANN
 	decimal_point = ann->decimal_point;
 	multiplier = ann->multiplier;
@@ -360,7 +360,7 @@ FANN_EXTERNAL struct fann * FANN_API fann_create_forward_array(float learning_ra
 	}
 	
 #ifdef DEBUG
-	printf("creating fully forward connected network with learning rate %f.\n", learning_rate);
+	printf("creating fully shortcut connected network with learning rate %f.\n", learning_rate);
 	printf("input\n");
 	printf("  layer       : %d neurons, 1 bias\n", ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1);
 #endif
@@ -543,7 +543,7 @@ FANN_EXTERNAL fann_type * FANN_API fann_run(struct fann *ann, fann_type *input)
 			weights = neuron_it->weights;
 			
 			if(ann->connection_rate >= 1){
-				if(ann->forward_connections){
+				if(ann->shortcut_connections){
 					/* first go through the connections to the previous layers,
 					   then let the normal operation go through the rest.
 					*/
@@ -854,8 +854,8 @@ struct fann * fann_allocate_structure(float learning_rate, unsigned int num_laye
 	ann->training_algorithm = FANN_TRAIN_RPROP;
 	ann->num_MSE = 0;
 	ann->MSE_value = 0;
-	ann->forward_connections = 0;
-	ann->use_tanh_error_function = 1;
+	ann->shortcut_connections = 0;
+	ann->train_error_function = FANN_ERRORFUNC_TANH;
 
 	/* variables used for cascade correlation (reasonable defaults) */
 	/*ann->change_fraction = 0.01;
diff --git a/src/fann_io.c b/src/fann_io.c
index 3e4b092..11b3275 100644
--- a/src/fann_io.c
+++ b/src/fann_io.c
@@ -149,18 +149,18 @@ int fann_save_internal_fd(struct fann *ann, FILE *conf, const char *configuratio
 		/* save the decimal_point on a seperate line */
 		fprintf(conf, "%u\n", decimal_point);
 		
-		/* save the number layers "num_layers learning_rate connection_rate forward_connections activation_function_hidden activation_function_output activation_steepness_hidden activation_steepness_output" */	
-		fprintf(conf, "%u %f %f %u %u %u %d %d\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->forward_connections, ann->activation_function_hidden, ann->activation_function_output, (int)(ann->activation_steepness_hidden * fixed_multiplier), (int)(ann->activation_steepness_output * fixed_multiplier));
+		/* save the number layers "num_layers learning_rate connection_rate shortcut_connections activation_function_hidden activation_function_output activation_steepness_hidden activation_steepness_output" */	
+		fprintf(conf, "%u %f %f %u %u %u %d %d\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->shortcut_connections, ann->activation_function_hidden, ann->activation_function_output, (int)(ann->activation_steepness_hidden * fixed_multiplier), (int)(ann->activation_steepness_output * fixed_multiplier));
 	}else{
-		/* save the number layers "num_layers learning_rate connection_rate forward_connections activation_function_hidden activation_function_output activation_steepness_hidden activation_steepness_output" */	
-		fprintf(conf, "%u %f %f %u %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->forward_connections, ann->activation_function_hidden, ann->activation_function_output, ann->activation_steepness_hidden, ann->activation_steepness_output);
+		/* save the number layers "num_layers learning_rate connection_rate shortcut_connections activation_function_hidden activation_function_output activation_steepness_hidden activation_steepness_output" */	
+		fprintf(conf, "%u %f %f %u %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->shortcut_connections, ann->activation_function_hidden, ann->activation_function_output, ann->activation_steepness_hidden, ann->activation_steepness_output);
 	}
 #else
 	/* save the decimal_point on a seperate line */
 	fprintf(conf, "%u\n", ann->decimal_point);
 	
-	/* save the number layers "num_layers learning_rate connection_rate forward_connections activation_function_hidden activation_function_output activation_steepness_hidden activation_steepness_output" */	
-	fprintf(conf, "%u %f %f %u %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->forward_connections, ann->activation_function_hidden, ann->activation_function_output, ann->activation_steepness_hidden, ann->activation_steepness_output);	
+	/* save the number layers "num_layers learning_rate connection_rate shortcut_connections activation_function_hidden activation_function_output activation_steepness_hidden activation_steepness_output" */	
+	fprintf(conf, "%u %f %f %u %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->shortcut_connections, ann->activation_function_hidden, ann->activation_function_output, ann->activation_steepness_hidden, ann->activation_steepness_output);	
 #endif
 
 	for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
@@ -288,7 +288,7 @@ void fann_save_train_internal_fd(struct fann_train_data* data, FILE *file, char
  */
 struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
 {
-	unsigned int num_layers, layer_size, activation_function_hidden, activation_function_output, input_neuron, i, forward_connections;
+	unsigned int num_layers, layer_size, activation_function_hidden, activation_function_output, input_neuron, i, shortcut_connections;
 #ifdef FIXEDFANN
 	unsigned int decimal_point, multiplier;
 #endif
@@ -326,7 +326,7 @@ struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
 	multiplier = 1 << decimal_point;
 #endif
 	
-	if(fscanf(conf, "%u %f %f %u %u %u "FANNSCANF" "FANNSCANF"\n", &num_layers, &learning_rate, &connection_rate, &forward_connections, &activation_function_hidden, &activation_function_output, &activation_steepness_hidden, &activation_steepness_output) != 8){
+	if(fscanf(conf, "%u %f %f %u %u %u "FANNSCANF" "FANNSCANF"\n", &num_layers, &learning_rate, &connection_rate, &shortcut_connections, &activation_function_hidden, &activation_function_output, &activation_steepness_hidden, &activation_steepness_output) != 8){
 		fann_error(NULL, FANN_E_CANT_READ_CONFIG, configuration_file);
 		return NULL;
 	}
@@ -336,7 +336,7 @@ struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
 		return NULL;
 	}
 	ann->connection_rate = connection_rate;
-	ann->forward_connections = forward_connections;
+	ann->shortcut_connections = shortcut_connections;
 
 #ifdef FIXEDFANN
 	ann->decimal_point = decimal_point;
diff --git a/src/fann_options.c b/src/fann_options.c
index d62bd3b..341a7e4 100644
--- a/src/fann_options.c
+++ b/src/fann_options.c
@@ -40,8 +40,8 @@ FANN_EXTERNAL void FANN_API fann_print_parameters(struct fann *ann)
 	printf("Total neurons and biases   : %2d\n", fann_get_total_neurons(ann));
 	printf("Total connections          : %2d\n", ann->total_connections);
 	printf("Connection rate            : %5.2f\n", ann->connection_rate);
-	printf("Forward connections        : %2d\n", ann->forward_connections);
-	printf("Training algorithm         :  %s\n", FANN_TRAINING_NAMES[ann->training_algorithm]);	
+	printf("Shortcut connections        : %2d\n", ann->shortcut_connections);
+	printf("Training algorithm         :  %s\n", FANN_TRAIN_NAMES[ann->training_algorithm]);	
 	printf("Learning rate              : %5.2f\n", ann->learning_rate);
 	printf("Activation function hidden :  %s\n", FANN_ACTIVATION_NAMES[ann->activation_function_hidden]);
 	printf("Activation function output :  %s\n", FANN_ACTIVATION_NAMES[ann->activation_function_output]);
@@ -54,7 +54,7 @@ FANN_EXTERNAL void FANN_API fann_print_parameters(struct fann *ann)
 	printf("Decimal point              : %2d\n", ann->decimal_point);
 	printf("Multiplier                 : %2d\n", ann->multiplier);
 #endif
-	printf("Use tanh error function    : %2d\n", ann->use_tanh_error_function);
+	printf("Training error function    :  %s\n", FANN_ERRORFUNC_NAMES[ann->train_error_function]);
 	printf("Quickprop decay            : %9.6f\n", ann->quickprop_decay);
 	printf("Quickprop mu               : %5.2f\n", ann->quickprop_mu);
 	printf("RPROP increase factor      : %5.2f\n", ann->rprop_increase_factor);
@@ -183,9 +183,9 @@ struct fann_neuron** fann_get_connections(struct fann *ann)
    Makes the error used for calculating the slopes
    higher when the difference is higher.
  */
-FANN_EXTERNAL void FANN_API fann_set_use_tanh_error_function(struct fann *ann, unsigned int use_tanh_error_function)
+FANN_EXTERNAL void FANN_API fann_set_train_error_function(struct fann *ann, unsigned int train_error_function)
 {
-	ann->use_tanh_error_function = use_tanh_error_function;
+	ann->train_error_function = train_error_function;
 }
 
 /* Decay is used to make the weights do not go so high (default -0.0001). */
@@ -228,9 +228,9 @@ FANN_EXTERNAL void FANN_API fann_set_rprop_delta_max(struct fann *ann, float rpr
    Makes the error used for calculating the slopes
    higher when the difference is higher.
  */
-FANN_EXTERNAL unsigned int FANN_API fann_get_use_tanh_error_function(struct fann *ann)
+FANN_EXTERNAL unsigned int FANN_API fann_get_train_error_function(struct fann *ann)
 {
-	return ann->use_tanh_error_function;
+	return ann->train_error_function;
 }
 
 /* Decay is used to make the weights do not go so high (default -0.0001). */
diff --git a/src/fann_train.c b/src/fann_train.c
index 8d575ea..f70afa3 100644
--- a/src/fann_train.c
+++ b/src/fann_train.c
@@ -182,7 +182,7 @@ void fann_compute_MSE(struct fann *ann, fann_type *desired_output)
 		
 		ann->MSE_value += (float)(neuron_diff * neuron_diff);
 
-		if(ann->use_tanh_error_function){
+		if(ann->train_error_function){ /* TODO make switch when more functions */
 			if ( neuron_diff < -.9999999 )
 				neuron_diff = -17.0;
 			else if ( neuron_diff > .9999999 )
@@ -227,9 +227,9 @@ void fann_backpropagate_MSE(struct fann *ann)
 		last_neuron = layer_it->last_neuron;
 
 		/* for each connection in this layer, propagate the error backwards*/
-		if(ann->connection_rate >= 1 && !ann->forward_connections){
+		if(ann->connection_rate >= 1 && !ann->shortcut_connections){
 			/* optimization for fully connected networks */
-			/* but not forward connected networks */
+			/* but not shortcut connected networks */
 			error_prev_layer = error_begin + ((layer_it-1)->first_neuron - first_neuron);
 			for(neuron_it = layer_it->first_neuron;
 				neuron_it != last_neuron; neuron_it++){
@@ -316,9 +316,9 @@ void fann_update_weights(struct fann *ann)
 		printf("layer[%d]\n", layer_it - first_layer);
 #endif
 		last_neuron = layer_it->last_neuron;
-		if(ann->connection_rate >= 1 && !ann->forward_connections){
+		if(ann->connection_rate >= 1 && !ann->shortcut_connections){
 			/* optimization for fully connected networks */
-			/* but not forward connected networks */			
+			/* but not shortcut connected networks */			
 			prev_neurons = (layer_it-1)->first_neuron;
 			for(neuron_it = layer_it->first_neuron;
 				neuron_it != last_neuron; neuron_it++){
@@ -378,9 +378,9 @@ void fann_update_slopes_batch(struct fann *ann)
 		printf("layer[%d]\n", layer_it - first_layer);
 #endif
 		last_neuron = layer_it->last_neuron;
-		if(ann->connection_rate >= 1 && !ann->forward_connections){
+		if(ann->connection_rate >= 1 && !ann->shortcut_connections){
 			/* optimization for fully connected networks */
-			/* but not forward connected networks */			
+			/* but not shortcut connected networks */			
 			prev_neurons = (layer_it-1)->first_neuron;
 			for(neuron_it = layer_it->first_neuron;
 				neuron_it != last_neuron; neuron_it++){
diff --git a/src/include/fann.h b/src/include/fann.h
index 737d29a..9fdf739 100644
--- a/src/include/fann.h
+++ b/src/include/fann.h
@@ -121,15 +121,15 @@ FANN_EXTERNAL struct fann * FANN_API fann_create(float connection_rate, float le
 FANN_EXTERNAL struct fann * FANN_API fann_create_array(float connection_rate, float learning_rate,
 	unsigned int num_layers, unsigned int * layers);
 
-/* create a fully connected neural network with forward connections.
+/* create a fully connected neural network with shortcut connections.
  */
-FANN_EXTERNAL struct fann * FANN_API fann_create_forward(float learning_rate,
+FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut(float learning_rate,
 	unsigned int num_layers, /* the number of layers, including the input and output layer */
 	...); /* the number of neurons in each of the layers, starting with the input layer and ending with the output layer */
 
-/* create a neural network with forward connections.
+/* create a neural network with shortcut connections.
  */
-FANN_EXTERNAL struct fann * FANN_API fann_create_forward_array(float learning_rate, unsigned int num_layers, unsigned int * layers);	
+FANN_EXTERNAL struct fann * FANN_API fann_create_shortcut_array(float learning_rate, unsigned int num_layers, unsigned int * layers);	
 	
 /* Runs a input through the network, and returns the output.
  */
@@ -377,17 +377,13 @@ FANN_EXTERNAL fann_type FANN_API fann_get_activation_output_steepness(struct fan
  */
 FANN_EXTERNAL void FANN_API fann_set_activation_output_steepness(struct fann *ann, fann_type steepness);
 
-/* When using this, training is usually faster. (default).
-   Makes the error used for calculating the slopes
-   higher when the difference is higher.
+/* Get the error function used during training. (default FANN_ERRORFUNC_TANH)
  */
-FANN_EXTERNAL void FANN_API fann_set_use_tanh_error_function(struct fann *ann, unsigned int use_tanh_error_function);
+FANN_EXTERNAL void FANN_API fann_set_train_error_function(struct fann *ann, unsigned int train_error_function);
 
-/* When using this, training is usually faster. (default).
-   Makes the error used for calculating the slopes
-   higher when the difference is higher.
+/* Get the error function used during training.
  */
-FANN_EXTERNAL unsigned int FANN_API fann_get_use_tanh_error_function(struct fann *ann);
+FANN_EXTERNAL unsigned int FANN_API fann_get_train_error_function(struct fann *ann);
 
 /* Decay is used to make the weights do not go so high (default -0.0001). */
 FANN_EXTERNAL float FANN_API fann_get_quickprop_decay(struct fann *ann);
diff --git a/src/include/fann_data.h b/src/include/fann_data.h
index 8aebe2e..23cff67 100644
--- a/src/include/fann_data.h
+++ b/src/include/fann_data.h
@@ -75,12 +75,12 @@ struct fann
 	 */
 	float connection_rate;
 
-	/* is 1 if forward connections are used in the ann otherwise 0
-	 * Forward connections are connections that skip layers.
-	 * A fully connected ann with forward connections are a ann where
+	/* is 1 if shortcut connections are used in the ann otherwise 0
+	 * Shortcut connections are connections that skip layers.
+	 * A fully connected ann with shortcut connections are a ann where
 	 * neurons have connections to all neurons in all later layers.
 	 */
-	unsigned int forward_connections;
+	unsigned int shortcut_connections;
 
 	/* pointer to the first layer (input layer) in an array af all the layers,
 	 * including the input and outputlayers 
@@ -167,11 +167,9 @@ struct fann
 	 */
 	float MSE_value;
 
-	/* When using this, training is usually faster.
-	   Makes the error used for calculating the slopes
-	   higher when the difference is higher.
+	/* The error function used during training. (default FANN_ERRORFUNC_TANH)
 	 */
-	unsigned int use_tanh_error_function;
+	unsigned int train_error_function;
 	
 	/* Variables for use with Cascade Correlation */
 
@@ -258,11 +256,24 @@ enum {
 	FANN_TRAIN_QUICKPROP
 };
 
-static char const * const FANN_TRAINING_NAMES[] = {
+static char const * const FANN_TRAIN_NAMES[] = {
 	"FANN_TRAIN_INCREMENTAL",
 	"FANN_TRAIN_BATCH",
 	"FANN_TRAIN_RPROP",
 	"FANN_TRAIN_QUICKPROP"
 };
 
+enum {
+	/* Standard linear error function */
+	FANN_ERRORFUNC_LINEAR = 0,
+	/* Tanh error function, usually better but can require
+	   a lower learning rate */
+	FANN_ERRORFUNC_TANH
+};
+
+static char const * const FANN_ERRORFUNC_NAMES[] = {
+	"FANN_ERRORFUNC_LINEAR",
+	"FANN_ERRORFUNC_TANH"
+};
+
 #endif

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/libfann.git



More information about the debian-science-commits mailing list