[libfann] 139/242: Made several minor changes to prepare for release 1.2.0, also added fann_print_parameters and fann_test_data
Christian Kastner
chrisk-guest at moszumanska.debian.org
Sat Oct 4 21:10:31 UTC 2014
This is an automated email from the git hooks/post-receive script.
chrisk-guest pushed a commit to tag Version2_0_0
in repository libfann.
commit 48cc674e40fbfa69d22b0e53345d2ba8be7611e6
Author: Steffen Nissen <lukesky at diku.dk>
Date: Thu Jun 24 21:30:07 2004 +0000
Made several minor changes to prepare for release 1.2.0, also added fann_print_parameters and fann_test_data
---
src/Makefile.am | 2 +-
src/Makefile.in | 2 +-
src/fann.c | 16 +++--
src/fann_options.c | 33 +++++++++
src/fann_train.c | 20 +++---
src/fann_train_data.c | 42 ++++++++----
src/include/fann.h | 58 +++++++++-------
src/include/fann_activation.h | 156 +++++++++++++++++++++++-------------------
src/include/fann_data.h | 29 +++++---
9 files changed, 218 insertions(+), 140 deletions(-)
diff --git a/src/Makefile.am b/src/Makefile.am
index fd3d8a2..7ec97c4 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -2,7 +2,7 @@ SUBDIRS = include
lib_LTLIBRARIES = libfloatfann.la libdoublefann.la libfixedfann.la libfann.la
-AM_LDFLAGS = -version-info 2:0:1
+AM_LDFLAGS = -version-info 3:0:2
libfloatfann_la_SOURCES = floatfann.c
libdoublefann_la_SOURCES = doublefann.c
diff --git a/src/Makefile.in b/src/Makefile.in
index 62cc8c5..3cadb0b 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -133,7 +133,7 @@ SUBDIRS = include
lib_LTLIBRARIES = libfloatfann.la libdoublefann.la libfixedfann.la libfann.la
-AM_LDFLAGS = -version-info 2:0:1
+AM_LDFLAGS = -version-info 3:0:2
libfloatfann_la_SOURCES = floatfann.c
libdoublefann_la_SOURCES = doublefann.c
diff --git a/src/fann.c b/src/fann.c
index 7b0744e..02fab8d 100644
--- a/src/fann.c
+++ b/src/fann.c
@@ -701,6 +701,9 @@ void fann_destroy(struct fann *ann)
fann_safe_free(ann->first_layer);
fann_safe_free(ann->output);
fann_safe_free(ann->train_errors);
+ fann_safe_free(ann->train_slopes);
+ fann_safe_free(ann->prev_train_slopes);
+ fann_safe_free(ann->prev_steps);
fann_safe_free(ann->errstr);
fann_safe_free(ann);
}
@@ -721,7 +724,7 @@ void fann_print_connections(struct fann *ann)
struct fann_neuron *neuron_it;
unsigned int i, value;
char *neurons;
- unsigned int num_neurons = fann_get_total_neurons(ann)+1;
+ unsigned int num_neurons = fann_get_total_neurons(ann) - fann_get_num_output(ann);
neurons = (char *)malloc(num_neurons+1);
neurons[num_neurons] = 0;
@@ -733,14 +736,15 @@ void fann_print_connections(struct fann *ann)
for(layer_it = ann->first_layer+1; layer_it != ann->last_layer; layer_it++){
for(neuron_it = layer_it->first_neuron;
- neuron_it != layer_it->last_neuron; neuron_it++){
+ neuron_it != layer_it->last_neuron-1; neuron_it++){
+
memset(neurons, (int)'.', num_neurons);
for(i = 0; i < neuron_it->num_connections; i++){
value = (unsigned int)(fann_abs(neuron_it->weights[i])+0.5);
if(value > 25) value = 25;
neurons[neuron_it->connected_neurons[i] - ann->first_layer->first_neuron] = 'a' + value;
}
- printf("L %03d / N %04d %s\n", layer_it - ann->first_layer,
+ printf("L %3d / N %4d %s\n", layer_it - ann->first_layer,
neuron_it - ann->first_layer->first_neuron, neurons);
}
}
@@ -846,9 +850,9 @@ struct fann * fann_allocate_structure(float learning_rate, unsigned int num_laye
ann->train_slopes = NULL;
ann->prev_steps = NULL;
ann->prev_train_slopes = NULL;
- ann->training_algorithm = FANN_RPROP_TRAIN;
- ann->num_errors = 0;
- ann->error_value = 0;
+ ann->training_algorithm = FANN_TRAIN_RPROP;
+ ann->num_MSE = 0;
+ ann->MSE_value = 0;
ann->forward_connections = 0;
ann->use_tanh_error_function = 1;
diff --git a/src/fann_options.c b/src/fann_options.c
index 5334504..edfc172 100644
--- a/src/fann_options.c
+++ b/src/fann_options.c
@@ -26,6 +26,39 @@
#include "fann.h"
#include "fann_errno.h"
+void fann_print_parameters(struct fann *ann)
+{
+ struct fann_layer *layer_it;
+
+ printf("Input layer : %2d neurons, 1 bias\n", ann->num_input);
+ for(layer_it = ann->first_layer+1; layer_it != ann->last_layer-1; layer_it++){
+ printf(" Hidden layer : %2d neurons, 1 bias\n",
+ layer_it->last_neuron - layer_it->first_neuron - 1);
+ }
+ printf("Output layer : %2d neurons\n", ann->num_output);
+ printf("Total neurons and biases : %2d\n", fann_get_total_neurons(ann));
+ printf("Total connections : %2d\n", ann->total_connections);
+ printf("Connection rate : %5.2f\n", ann->connection_rate);
+ printf("Forward connections : %2d\n", ann->forward_connections);
+ printf("Training algorithm : %s\n", FANN_TRAINING_NAMES[ann->training_algorithm]);
+ printf("Learning rate : %5.2f\n", ann->learning_rate);
+ printf("Activation function hidden : %s\n", FANN_ACTIVATION_NAMES[ann->activation_function_hidden]);
+ printf("Activation function output : %s\n", FANN_ACTIVATION_NAMES[ann->activation_function_output]);
+ printf("Activation steepness hidden: %5.2f\n", ann->activation_steepness_hidden);
+ printf("Activation steepness output: %5.2f\n", ann->activation_steepness_output);
+#ifdef FIXEDFANN
+ printf("Decimal point : %2d\n", ann->decimal_point);
+ printf("Multiplier : %2d\n", ann->multiplier);
+#endif
+ printf("Use tanh error function : %2d\n", ann->use_tanh_error_function);
+ printf("Quickprop decay : %9.6f\n", ann->quickprop_decay);
+ printf("Quickprop mu : %5.2f\n", ann->quickprop_mu);
+ printf("RPROP increase factor : %5.2f\n", ann->rprop_increase_factor);
+ printf("RPROP decrease factor : %5.2f\n", ann->rprop_decrease_factor);
+ printf("RPROP delta min : %5.2f\n", ann->rprop_delta_min);
+ printf("RPROP delta max : %5.2f\n", ann->rprop_delta_max);
+}
+
unsigned int fann_get_training_algorithm(struct fann *ann)
{
return ann->training_algorithm;
diff --git a/src/fann_train.c b/src/fann_train.c
index cc42e12..674a5c7 100644
--- a/src/fann_train.c
+++ b/src/fann_train.c
@@ -89,14 +89,14 @@ fann_type *fann_test(struct fann *ann, fann_type *input, fann_type *desired_outp
}
#ifdef FIXEDFANN
- ann->error_value += (neuron_diff/(float)ann->multiplier) * (neuron_diff/(float)ann->multiplier);
+ ann->MSE_value += (neuron_diff/(float)ann->multiplier) * (neuron_diff/(float)ann->multiplier);
#else
- ann->error_value += neuron_diff * neuron_diff;
+ ann->MSE_value += neuron_diff * neuron_diff;
#endif
desired_output++;
}
- ann->num_errors++;
+ ann->num_MSE++;
return output_begin;
}
@@ -113,8 +113,8 @@ float fann_get_error(struct fann *ann)
*/
float fann_get_MSE(struct fann *ann)
{
- if(ann->num_errors){
- return ann->error_value/(float)ann->num_errors;
+ if(ann->num_MSE){
+ return ann->MSE_value/(float)ann->num_MSE;
}else{
return 0;
}
@@ -132,8 +132,8 @@ void fann_reset_error(struct fann *ann)
*/
void fann_reset_MSE(struct fann *ann)
{
- ann->num_errors = 0;
- ann->error_value = 0;
+ ann->num_MSE = 0;
+ ann->MSE_value = 0;
}
/* INTERNAL FUNCTION
@@ -180,7 +180,7 @@ void fann_compute_MSE(struct fann *ann, fann_type *desired_output)
neuron_diff /= 2.0;
}
- ann->error_value += neuron_diff * neuron_diff;
+ ann->MSE_value += neuron_diff * neuron_diff;
if(ann->use_tanh_error_function){
if ( neuron_diff < -.9999999 )
@@ -198,7 +198,7 @@ void fann_compute_MSE(struct fann *ann, fann_type *desired_output)
desired_output++;
error_it++;
}
- ann->num_errors++;
+ ann->num_MSE++;
}
/* INTERNAL FUNCTION
@@ -439,7 +439,7 @@ void fann_clear_train_arrays(struct fann *ann)
}
}
- if(ann->training_algorithm == FANN_RPROP_TRAIN){
+ if(ann->training_algorithm == FANN_TRAIN_RPROP){
for(i = 0; i < ann->total_connections; i++){
ann->prev_train_slopes[i] = 0.0125;
}
diff --git a/src/fann_train_data.c b/src/fann_train_data.c
index 0baefc3..c4d0b1f 100644
--- a/src/fann_train_data.c
+++ b/src/fann_train_data.c
@@ -157,16 +157,16 @@ float fann_train_epoch_incremental(struct fann *ann, struct fann_train_data *dat
float fann_train_epoch(struct fann *ann, struct fann_train_data *data)
{
switch(ann->training_algorithm){
- case FANN_QUICKPROP_TRAIN:
+ case FANN_TRAIN_QUICKPROP:
return fann_train_epoch_quickprop(ann, data);
break;
- case FANN_RPROP_TRAIN:
+ case FANN_TRAIN_RPROP:
return fann_train_epoch_irpropm(ann, data);
break;
- case FANN_BATCH_TRAIN:
+ case FANN_TRAIN_BATCH:
return fann_train_epoch_batch(ann, data);
break;
- case FANN_INCREMENTAL_TRAIN:
+ case FANN_TRAIN_INCREMENTAL:
return fann_train_epoch_incremental(ann, data);
break;
default:
@@ -174,6 +174,20 @@ float fann_train_epoch(struct fann *ann, struct fann_train_data *data)
}
}
+/* Test a set of training data and calculate the MSE
+ */
+float fann_test_data(struct fann *ann, struct fann_train_data *data)
+{
+ unsigned int i;
+ fann_reset_MSE(ann);
+
+ for(i = 0; i != data->num_data; i++){
+ fann_test(ann, data->input[i], data->output[i]);
+ }
+
+ return fann_get_MSE(ann);
+}
+
/* Train directly on the training data.
*/
void fann_train_on_data_callback(struct fann *ann, struct fann_train_data *data, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error, int (*callback)(unsigned int epochs, float error))
@@ -184,17 +198,17 @@ void fann_train_on_data_callback(struct fann *ann, struct fann_train_data *data,
#ifdef DEBUG
printf("Training with ");
switch(ann->training_algorithm){
- case FANN_QUICKPROP_TRAIN:
- printf("FANN_QUICKPROP_TRAIN");
+ case FANN_TRAIN_QUICKPROP:
+ printf("FANN_TRAIN_QUICKPROP");
break;
- case FANN_RPROP_TRAIN:
- printf("FANN_RPROP_TRAIN");
+ case FANN_TRAIN_RPROP:
+ printf("FANN_TRAIN_RPROP");
break;
- case FANN_BATCH_TRAIN:
- printf("FANN_BATCH_TRAIN");
+ case FANN_TRAIN_BATCH:
+ printf("FANN_TRAIN_BATCH");
break;
- case FANN_INCREMENTAL_TRAIN:
- printf("FANN_INCREMENTAL_TRAIN");
+ case FANN_TRAIN_INCREMENTAL:
+ printf("FANN_TRAIN_INCREMENTAL");
break;
}
printf("\n");
@@ -206,8 +220,8 @@ void fann_train_on_data_callback(struct fann *ann, struct fann_train_data *data,
/* some training algorithms need stuff to be cleared etc. before training starts.
*/
- if(ann->training_algorithm == FANN_RPROP_TRAIN ||
- ann->training_algorithm == FANN_QUICKPROP_TRAIN){
+ if(ann->training_algorithm == FANN_TRAIN_RPROP ||
+ ann->training_algorithm == FANN_TRAIN_QUICKPROP){
fann_clear_train_arrays(ann);
}
diff --git a/src/include/fann.h b/src/include/fann.h
index 1ce1075..aee46fb 100644
--- a/src/include/fann.h
+++ b/src/include/fann.h
@@ -76,7 +76,7 @@ struct fann * fann_create(float connection_rate, float learning_rate,
struct fann * fann_create_array(float connection_rate, float learning_rate,
unsigned int num_layers, unsigned int * layers);
-/* create a neural network with forward connections.
+/* create a fully connected neural network with forward connections.
*/
struct fann * fann_create_forward(float learning_rate,
unsigned int num_layers, /* the number of layers, including the input and output layer */
@@ -151,10 +151,6 @@ int fann_save_to_fixed(struct fann *ann, const char *configuration_file);
*/
void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output);
-/* Train one epoch with a set of training data.
- */
-float fann_train_epoch(struct fann *ann, struct fann_train_data *data);
-
#endif /* NOT FIXEDFANN */
/* Test with a set of inputs, and a set of desired outputs.
@@ -204,6 +200,14 @@ void fann_destroy_train(struct fann_train_data* train_data);
#ifndef FIXEDFANN
+/* Train one epoch with a set of training data.
+ */
+float fann_train_epoch(struct fann *ann, struct fann_train_data *data);
+
+/* Test a set of training data and calculate the MSE
+ */
+float fann_test_data(struct fann *ann, struct fann_train_data *data);
+
/* Trains on an entire dataset, for a maximum of max_epochs
epochs or until mean square error is lower than desired_error.
Reports about the progress is given every
@@ -255,6 +259,9 @@ void fann_save_train_to_fixed(struct fann_train_data* data, char *filename, unsi
/* ----- Implemented in fann_options.c Get and set options for the ANNs ----- */
+/* Prints all of the parameters and options of the ANN */
+void fann_print_parameters(struct fann *ann);
+
/* Get the training algorithm.
*/
unsigned int fann_get_training_algorithm(struct fann *ann);
@@ -327,31 +334,13 @@ fann_type fann_get_activation_output_steepness(struct fann *ann);
*/
void fann_set_activation_output_steepness(struct fann *ann, fann_type steepness);
-/* When using this, training is usually faster. (default ).
+/* When using this, training is usually faster. (default).
Makes the error used for calculating the slopes
higher when the difference is higher.
*/
void fann_set_use_tanh_error_function(struct fann *ann, unsigned int use_tanh_error_function);
-/* Decay is used to make the weights do not go so high (default -0.0001). */
-void fann_set_quickprop_decay(struct fann *ann, float quickprop_decay);
-
-/* Mu is a factor used to increase and decrease the stepsize (default 1.75). */
-void fann_set_quickprop_mu(struct fann *ann, float quickprop_mu);
-
-/* Tells how much the stepsize should increase during learning (default 1.2). */
-void fann_set_rprop_increase_factor(struct fann *ann, float rprop_increase_factor);
-
-/* Tells how much the stepsize should decrease during learning (default 0.5). */
-void fann_set_rprop_decrease_factor(struct fann *ann, float rprop_decrease_factor);
-
-/* The minimum stepsize (default 0.0). */
-void fann_set_rprop_delta_min(struct fann *ann, float rprop_delta_min);
-
-/* The maximum stepsize (default 50.0). */
-void fann_set_rprop_delta_max(struct fann *ann, float rprop_delta_max);
-
-/* When using this, training is usually faster. (default ).
+/* When using this, training is usually faster. (default).
Makes the error used for calculating the slopes
higher when the difference is higher.
*/
@@ -360,21 +349,38 @@ unsigned int fann_get_use_tanh_error_function(struct fann *ann);
/* Decay is used to make the weights do not go so high (default -0.0001). */
float fann_get_quickprop_decay(struct fann *ann);
+/* Decay is used to make the weights do not go so high (default -0.0001). */
+void fann_set_quickprop_decay(struct fann *ann, float quickprop_decay);
+
/* Mu is a factor used to increase and decrease the stepsize (default 1.75). */
float fann_get_quickprop_mu(struct fann *ann);
+/* Mu is a factor used to increase and decrease the stepsize (default 1.75). */
+void fann_set_quickprop_mu(struct fann *ann, float quickprop_mu);
+
/* Tells how much the stepsize should increase during learning (default 1.2). */
float fann_get_rprop_increase_factor(struct fann *ann);
+/* Tells how much the stepsize should increase during learning (default 1.2). */
+void fann_set_rprop_increase_factor(struct fann *ann, float rprop_increase_factor);
+
/* Tells how much the stepsize should decrease during learning (default 0.5). */
float fann_get_rprop_decrease_factor(struct fann *ann);
+/* Tells how much the stepsize should decrease during learning (default 0.5). */
+void fann_set_rprop_decrease_factor(struct fann *ann, float rprop_decrease_factor);
+
/* The minimum stepsize (default 0.0). */
float fann_get_rprop_delta_min(struct fann *ann);
+/* The minimum stepsize (default 0.0). */
+void fann_set_rprop_delta_min(struct fann *ann, float rprop_delta_min);
+
/* The maximum stepsize (default 50.0). */
float fann_get_rprop_delta_max(struct fann *ann);
-
+
+/* The maximum stepsize (default 50.0). */
+void fann_set_rprop_delta_max(struct fann *ann, float rprop_delta_max);
/* Get the number of input neurons.
*/
diff --git a/src/include/fann_activation.h b/src/include/fann_activation.h
index b8fed1f..f55b68e 100644
--- a/src/include/fann_activation.h
+++ b/src/include/fann_activation.h
@@ -30,78 +30,92 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
d is the derivation.
*/
-/* Linear activation function.
- span: -inf < y < inf
- y = x*s, d = 1*s
- Can NOT be used in fixed point.
- (NOT) implemented yet.
-*/
-#define FANN_LINEAR 4
-
-/* Threshold activation function.
- x < 0 -> y = 0, x >= 0 -> y = 1
- Can NOT be used during training.
-*/
-#define FANN_THRESHOLD 2
-
-/* Threshold activation function.
- x < 0 -> y = 0, x >= 0 -> y = 1
- Can NOT be used during training.
-*/
-#define FANN_THRESHOLD_SYMMETRIC 11
-
-/* Sigmoid activation function.
- One of the most used activation functions.
- span: 0 < y < 1
- y = 1/(1 + exp(-2*s*x)), d = 2*s*y*(1 - y)
-*/
-#define FANN_SIGMOID 1
-
-/* Stepwise linear approximation to sigmoid.
- Faster than sigmoid but a bit less precise.
-*/
-#define FANN_SIGMOID_STEPWISE 3 /* (default) */
-
-
-/* Symmetric sigmoid activation function, aka. tanh.
- One of the most used activation functions.
- span: -1 < y < 1
- y = tanh(s*x) = 2/(1 + exp(-2*s*x)) - 1, d = s*(1-(y*y))
-*/
-#define FANN_SIGMOID_SYMMETRIC 5
+enum {
+ /* Linear activation function.
+ span: -inf < y < inf
+ y = x*s, d = 1*s
+ Can NOT be used in fixed point.
+ */
+ FANN_LINEAR = 0,
+
+ /* Threshold activation function.
+ x < 0 -> y = 0, x >= 0 -> y = 1
+ Can NOT be used during training.
+ */
+ FANN_THRESHOLD,
+
+ /* Threshold activation function.
+ x < 0 -> y = 0, x >= 0 -> y = 1
+ Can NOT be used during training.
+ */
+ FANN_THRESHOLD_SYMMETRIC,
+
+ /* Sigmoid activation function.
+ One of the most used activation functions.
+ span: 0 < y < 1
+ y = 1/(1 + exp(-2*s*x)), d = 2*s*y*(1 - y)
+ */
+ FANN_SIGMOID,
+
+ /* Stepwise linear approximation to sigmoid.
+ Faster than sigmoid but a bit less precise.
+ */
+ FANN_SIGMOID_STEPWISE, /* (default) */
+
+
+ /* Symmetric sigmoid activation function, aka. tanh.
+ One of the most used activation functions.
+ span: -1 < y < 1
+ y = tanh(s*x) = 2/(1 + exp(-2*s*x)) - 1, d = s*(1-(y*y))
+ */
+ FANN_SIGMOID_SYMMETRIC,
-/* Stepwise linear approximation to symmetric sigmoid.
- Faster than symmetric sigmoid but a bit less precise.
-*/
-#define FANN_SIGMOID_SYMMETRIC_STEPWISE 6
-
-/* Gausian activation function.
- 0 when x = -inf, 1 when x = 0 and 0 when x = inf
- span: 0 < y < 1
- y = exp(-x*s*x*s), d = -2*x*y*s
- NOT implemented yet.
-*/
-#define FANN_GAUSSIAN 7
-
-/* Stepwise linear approximation to gaussian.
- Faster than gaussian but a bit less precise.
- NOT implemented yet.
-*/
-#define FANN_GAUSSIAN_STEPWISE 8 /* not implemented yet. */
-
-/* Fast (sigmoid like) activation function defined by David Elliott
- span: 0 < y < 1
- y = ((x*s) / 2) / (1 + |x*s|) + 0.5, d = s*1/(2*(1+|x|)*(1+|x|))
- NOT implemented yet.
-*/
-#define FANN_ELLIOT 9
-
-/* Fast (symmetric sigmoid like) activation function defined by David Elliott
- span: -1 < y < 1
- y = (x*s) / (1 + |x*s|), d = s*1/((1+|x|)*(1+|x|))
- NOT implemented yet.
-*/
-#define FANN_ELLIOT_SYMMETRIC 10
+ /* Stepwise linear approximation to symmetric sigmoid.
+ Faster than symmetric sigmoid but a bit less precise.
+ */
+ FANN_SIGMOID_SYMMETRIC_STEPWISE,
+
+ /* Gausian activation function.
+ 0 when x = -inf, 1 when x = 0 and 0 when x = inf
+ span: 0 < y < 1
+ y = exp(-x*s*x*s), d = -2*x*y*s
+ */
+ FANN_GAUSSIAN,
+
+ /* Stepwise linear approximation to gaussian.
+ Faster than gaussian but a bit less precise.
+ NOT implemented yet.
+ */
+ FANN_GAUSSIAN_STEPWISE,
+
+ /* Fast (sigmoid like) activation function defined by David Elliott
+ span: 0 < y < 1
+ y = ((x*s) / 2) / (1 + |x*s|) + 0.5, d = s*1/(2*(1+|x|)*(1+|x|))
+ NOT implemented yet.
+ */
+ FANN_ELLIOT,
+
+ /* Fast (symmetric sigmoid like) activation function defined by David Elliott
+ span: -1 < y < 1
+ y = (x*s) / (1 + |x*s|), d = s*1/((1+|x|)*(1+|x|))
+ NOT implemented yet.
+ */
+ FANN_ELLIOT_SYMMETRIC,
+};
+
+static char const * const FANN_ACTIVATION_NAMES[] = {
+ "FANN_LINEAR",
+ "FANN_THRESHOLD",
+ "FANN_THRESHOLD_SYMMETRIC",
+ "FANN_SIGMOID",
+ "FANN_SIGMOID_STEPWISE",
+ "FANN_SIGMOID_SYMMETRIC",
+ "FANN_SIGMOID_SYMMETRIC_STEPWISE",
+ "FANN_GAUSSIAN",
+ "FANN_GAUSSIAN_STEPWISE",
+ "FANN_ELLIOT",
+ "FANN_ELLIOT_SYMMETRIC"
+};
/* Implementation of the activation functions
*/
diff --git a/src/include/fann_data.h b/src/include/fann_data.h
index ebe8e05..8aebe2e 100644
--- a/src/include/fann_data.h
+++ b/src/include/fann_data.h
@@ -158,14 +158,14 @@ struct fann
/* used to store outputs in */
fann_type *output;
- /* the number of data used to calculate the error.
+ /* the number of data used to calculate the mean square error.
*/
- unsigned int num_errors;
+ unsigned int num_MSE;
/* the total error value.
- the real mean square error is error_value/num_errors
+ the real mean square error is MSE_value/num_MSE
*/
- float error_value;
+ float MSE_value;
/* When using this, training is usually faster.
Makes the error used for calculating the slopes
@@ -248,14 +248,21 @@ struct fann_error
};
enum {
- /* The quickprop training algorithm */
- FANN_QUICKPROP_TRAIN = 0,
+ /* Standard backpropagation incremental or online training */
+ FANN_TRAIN_INCREMENTAL = 0,
+ /* Standard backpropagation batch training */
+ FANN_TRAIN_BATCH,
/* The iRprop- training algorithm */
- FANN_RPROP_TRAIN,
- /* Standard batch training */
- FANN_BATCH_TRAIN,
- /* Standard incremental or online training */
- FANN_INCREMENTAL_TRAIN
+ FANN_TRAIN_RPROP,
+ /* The quickprop training algorithm */
+ FANN_TRAIN_QUICKPROP
+};
+
+static char const * const FANN_TRAINING_NAMES[] = {
+ "FANN_TRAIN_INCREMENTAL",
+ "FANN_TRAIN_BATCH",
+ "FANN_TRAIN_RPROP",
+ "FANN_TRAIN_QUICKPROP"
};
#endif
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/libfann.git
More information about the debian-science-commits
mailing list