[libfann] 86/242: split fann.c and fann_internal.c to several files (also rearange fann.h)
Christian Kastner
chrisk-guest at moszumanska.debian.org
Sat Oct 4 21:10:22 UTC 2014
This is an automated email from the git hooks/post-receive script.
chrisk-guest pushed a commit to tag Version2_0_0
in repository libfann.
commit e26c4c4164c4fa70aa52ce4bef1b2c4d62a5a7fd
Author: Steffen Nissen <lukesky at diku.dk>
Date: Mon Feb 16 22:42:06 2004 +0000
split fann.c and fann_internal.c to several files (also rearange fann.h)
---
autogen.sh | 8 +-
config.guess | 21 +-
config.sub | 35 +-
libtool | 11 +-
ltmain.sh | 11 +-
src/Makefile.am | 3 +-
src/Makefile.in | 16 +-
src/doublefann.c | 10 +-
src/fann.c | 823 +++++++++--------------------------------
src/fann_error.c | 170 +++++++++
src/fann_internal.c | 883 --------------------------------------------
src/fann_io.c | 409 ++++++++++++++++++++
src/fann_options.c | 307 +++++++++++++++
src/fann_train.c | 268 ++++++++++++++
src/fann_train_data.c | 330 +++++++++++++++++
src/fixedfann.c | 11 +-
src/floatfann.c | 11 +-
src/include/fann.h | 211 ++++++-----
src/include/fann_internal.h | 1 -
19 files changed, 1874 insertions(+), 1665 deletions(-)
diff --git a/autogen.sh b/autogen.sh
index b78a9ef..9b9b03c 100755
--- a/autogen.sh
+++ b/autogen.sh
@@ -1,8 +1,8 @@
#!/bin/sh
-aclocal
+aclocal-1.7
libtoolize --force
-autoheader
-autoconf
-automake --add-missing --copy
+autoheader2.50
+autoconf2.50
+automake-1.7 --add-missing --copy
./configure $@
diff --git a/config.guess b/config.guess
index 1127162..e8c6fc0 100755
--- a/config.guess
+++ b/config.guess
@@ -3,7 +3,7 @@
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
# 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
-timestamp='2003-10-07'
+timestamp='2004-01-05'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
@@ -221,6 +221,9 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
mvmeppc:OpenBSD:*:*)
echo powerpc-unknown-openbsd${UNAME_RELEASE}
exit 0 ;;
+ pegasos:OpenBSD:*:*)
+ echo powerpc-unknown-openbsd${UNAME_RELEASE}
+ exit 0 ;;
pmax:OpenBSD:*:*)
echo mipsel-unknown-openbsd${UNAME_RELEASE}
exit 0 ;;
@@ -307,6 +310,9 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
*:OS/390:*:*)
echo i370-ibm-openedition
exit 0 ;;
+ *:OS400:*:*)
+ echo powerpc-ibm-os400
+ exit 0 ;;
arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
echo arm-acorn-riscix${UNAME_RELEASE}
exit 0;;
@@ -742,6 +748,11 @@ EOF
FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
exit 0 ;;
+ 5000:UNIX_System_V:4.*:*)
+ FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
+ echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit 0 ;;
i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
exit 0 ;;
@@ -986,6 +997,9 @@ EOF
i*86:atheos:*:*)
echo ${UNAME_MACHINE}-unknown-atheos
exit 0 ;;
+ i*86:syllable:*:*)
+ echo ${UNAME_MACHINE}-pc-syllable
+ exit 0 ;;
i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
echo i386-unknown-lynxos${UNAME_RELEASE}
exit 0 ;;
@@ -1172,7 +1186,7 @@ EOF
*:QNX:*:4*)
echo i386-pc-qnx
exit 0 ;;
- NSR-[DGKLNPTVWY]:NONSTOP_KERNEL:*:*)
+ NSR-?:NONSTOP_KERNEL:*:*)
echo nsr-tandem-nsk${UNAME_RELEASE}
exit 0 ;;
*:NonStop-UX:*:*)
@@ -1216,6 +1230,9 @@ EOF
SEI:*:*:SEIUX)
echo mips-sei-seiux${UNAME_RELEASE}
exit 0 ;;
+ *:DRAGONFLY:*:*)
+ echo ${UNAME_MACHINE}-unknown-dragonfly${UNAME_RELEASE}
+ exit 0 ;;
esac
#echo '(No uname command or uname output not recognized.)' 1>&2
diff --git a/config.sub b/config.sub
index 79657cd..463186d 100755
--- a/config.sub
+++ b/config.sub
@@ -3,7 +3,7 @@
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
# 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
-timestamp='2003-10-07'
+timestamp='2004-01-05'
# This file is (in principle) common to ALL GNU software.
# The presence of a machine in this file suggests that SOME GNU software
@@ -118,7 +118,8 @@ esac
# Here we must recognize all the valid KERNEL-OS combinations.
maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
case $maybe_os in
- nto-qnx* | linux-gnu* | linux-dietlibc | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | storm-chaos* | os2-emx* | rtmk-nova*)
+ nto-qnx* | linux-gnu* | linux-dietlibc | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | \
+ kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | storm-chaos* | os2-emx* | rtmk-nova*)
os=-$maybe_os
basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
;;
@@ -379,6 +380,9 @@ case $basic_machine in
amd64)
basic_machine=x86_64-pc
;;
+ amd64-*)
+ basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
amdahl)
basic_machine=580-amdahl
os=-sysv
@@ -743,6 +747,10 @@ case $basic_machine in
basic_machine=or32-unknown
os=-coff
;;
+ os400)
+ basic_machine=powerpc-ibm
+ os=-os400
+ ;;
OSE68000 | ose68000)
basic_machine=m68000-ericsson
os=-ose
@@ -963,6 +971,10 @@ case $basic_machine in
tower | tower-32)
basic_machine=m68k-ncr
;;
+ tpf)
+ basic_machine=s390x-ibm
+ os=-tpf
+ ;;
udi29k)
basic_machine=a29k-amd
os=-udi
@@ -1137,13 +1149,13 @@ case $os in
| -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
| -chorusos* | -chorusrdb* \
| -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
- | -mingw32* | -linux-gnu* | -uxpv* | -beos* | -mpeix* | -udk* \
+ | -mingw32* | -linux-gnu* | -linux-uclibc* | -uxpv* | -beos* | -mpeix* | -udk* \
| -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
| -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
| -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
| -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
| -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
- | -powermax* | -dnix* | -nx6 | -nx7 | -sei*)
+ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly*)
# Remember, each alternative MUST END IN *, to match a version number.
;;
-qnx*)
@@ -1182,6 +1194,9 @@ case $os in
-opened*)
os=-openedition
;;
+ -os400*)
+ os=-os400
+ ;;
-wince*)
os=-wince
;;
@@ -1203,6 +1218,9 @@ case $os in
-atheos*)
os=-atheos
;;
+ -syllable*)
+ os=-syllable
+ ;;
-386bsd)
os=-bsd
;;
@@ -1225,6 +1243,9 @@ case $os in
-sinix*)
os=-sysv4
;;
+ -tpf*)
+ os=-tpf
+ ;;
-triton*)
os=-sysv3
;;
@@ -1473,9 +1494,15 @@ case $basic_machine in
-mvs* | -opened*)
vendor=ibm
;;
+ -os400*)
+ vendor=ibm
+ ;;
-ptx*)
vendor=sequent
;;
+ -tpf*)
+ vendor=ibm
+ ;;
-vxsim* | -vxworks* | -windiss*)
vendor=wrs
;;
diff --git a/libtool b/libtool
index 5811571..2eb3554 100755
--- a/libtool
+++ b/libtool
@@ -402,7 +402,7 @@ modename="$progname"
PROGRAM=ltmain.sh
PACKAGE=libtool
VERSION=1.5.0a
-TIMESTAMP=" (1.1220.2.35 2003/11/12 18:51:58) Debian$Rev: 159 $"
+TIMESTAMP=" (1.1220.2.35 2003/11/12 18:51:58) Debian$Rev: 179 $"
default_mode=
help="Try \`$progname --help' for more information."
@@ -1204,7 +1204,7 @@ EOF
;;
esac
libtool_args="$nonopt"
- base_compile="$nonopt"
+ base_compile="$nonopt $@"
compile_command="$nonopt"
finalize_command="$nonopt"
@@ -1254,7 +1254,7 @@ EOF
# Only attempt this if the compiler in the base link
# command doesn't match the default compiler.
if test -n "$available_tags" && test -z "$tagname"; then
- case "$base_compile " in
+ case $base_compile in
# Blanks in the command may have been stripped by the calling shell,
# but not from the CC environment variable when configure was run.
"$CC "* | " $CC "* | "`$echo $CC` "* | " `$echo $CC` "*) ;;
@@ -1265,7 +1265,7 @@ EOF
if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$0" > /dev/null; then
# Evaluate the configuration.
eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $0`"
- case "$base_compile " in
+ case $base_compile in
"$CC "* | " $CC "* | "`$echo $CC` "* | " `$echo $CC` "*)
# The compiler in $compile_command matches
# the one in the tagged configuration.
@@ -1321,7 +1321,6 @@ EOF
# Go through the arguments, transforming them on the way.
while test "$#" -gt 0; do
arg="$1"
- base_compile="$base_compile $arg"
shift
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
@@ -5984,7 +5983,7 @@ relink_command=\"$relink_command\""
tmpdir="/tmp"
test -n "$TMPDIR" && tmpdir="$TMPDIR"
tmpdir="$tmpdir/libtool-$$"
- if $mkdir -p "$tmpdir" && chmod 700 "$tmpdir"; then :
+ if $mkdir "$tmpdir" && chmod 700 "$tmpdir"; then :
else
$echo "$modename: error: cannot create temporary directory \`$tmpdir'" 1>&2
continue
diff --git a/ltmain.sh b/ltmain.sh
index b5afd12..99938a5 100644
--- a/ltmain.sh
+++ b/ltmain.sh
@@ -56,7 +56,7 @@ modename="$progname"
PROGRAM=ltmain.sh
PACKAGE=libtool
VERSION=1.5.0a
-TIMESTAMP=" (1.1220.2.35 2003/11/12 18:51:58) Debian$Rev: 159 $"
+TIMESTAMP=" (1.1220.2.35 2003/11/12 18:51:58) Debian$Rev: 179 $"
default_mode=
help="Try \`$progname --help' for more information."
@@ -858,7 +858,7 @@ EOF
;;
esac
libtool_args="$nonopt"
- base_compile="$nonopt"
+ base_compile="$nonopt $@"
compile_command="$nonopt"
finalize_command="$nonopt"
@@ -908,7 +908,7 @@ EOF
# Only attempt this if the compiler in the base link
# command doesn't match the default compiler.
if test -n "$available_tags" && test -z "$tagname"; then
- case "$base_compile " in
+ case $base_compile in
# Blanks in the command may have been stripped by the calling shell,
# but not from the CC environment variable when configure was run.
"$CC "* | " $CC "* | "`$echo $CC` "* | " `$echo $CC` "*) ;;
@@ -919,7 +919,7 @@ EOF
if grep "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$0" > /dev/null; then
# Evaluate the configuration.
eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $0`"
- case "$base_compile " in
+ case $base_compile in
"$CC "* | " $CC "* | "`$echo $CC` "* | " `$echo $CC` "*)
# The compiler in $compile_command matches
# the one in the tagged configuration.
@@ -975,7 +975,6 @@ EOF
# Go through the arguments, transforming them on the way.
while test "$#" -gt 0; do
arg="$1"
- base_compile="$base_compile $arg"
shift
case $arg in
*[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"")
@@ -5638,7 +5637,7 @@ relink_command=\"$relink_command\""
tmpdir="/tmp"
test -n "$TMPDIR" && tmpdir="$TMPDIR"
tmpdir="$tmpdir/libtool-$$"
- if $mkdir -p "$tmpdir" && chmod 700 "$tmpdir"; then :
+ if $mkdir "$tmpdir" && chmod 700 "$tmpdir"; then :
else
$echo "$modename: error: cannot create temporary directory \`$tmpdir'" 1>&2
continue
diff --git a/src/Makefile.am b/src/Makefile.am
index 4f55036..98f3247 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -8,4 +8,5 @@ AM_CFLAGS = -O9 -Wall
libfloatfann_la_SOURCES = floatfann.c
libdoublefann_la_SOURCES = doublefann.c
libfixedfann_la_SOURCES = fixedfann.c
-libfann_la_SOURCES = fann.c fann_internal.c
+libfann_la_SOURCES = fann.c fann_io.c fann_train.c fann_train_data.c fann_options.c fann_error.c
+
diff --git a/src/Makefile.in b/src/Makefile.in
index 5fdd133..a510eb1 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -139,7 +139,7 @@ AM_CFLAGS = -O9 -Wall
libfloatfann_la_SOURCES = floatfann.c
libdoublefann_la_SOURCES = doublefann.c
libfixedfann_la_SOURCES = fixedfann.c
-libfann_la_SOURCES = fann.c fann_internal.c
+libfann_la_SOURCES = fann.c fann_io.c fann_train.c fann_train_data.c fann_options.c fann_error.c
subdir = src
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs
@@ -153,7 +153,8 @@ am_libdoublefann_la_OBJECTS = doublefann.lo
libdoublefann_la_OBJECTS = $(am_libdoublefann_la_OBJECTS)
libfann_la_LDFLAGS =
libfann_la_LIBADD =
-am_libfann_la_OBJECTS = fann.lo fann_internal.lo
+am_libfann_la_OBJECTS = fann.lo fann_io.lo fann_train.lo \
+ fann_train_data.lo fann_options.lo fann_error.lo
libfann_la_OBJECTS = $(am_libfann_la_OBJECTS)
libfixedfann_la_LDFLAGS =
libfixedfann_la_LIBADD =
@@ -168,7 +169,10 @@ DEFAULT_INCLUDES = -I. -I$(srcdir) -I$(top_builddir)/src/include
depcomp = $(SHELL) $(top_srcdir)/depcomp
am__depfiles_maybe = depfiles
@AMDEP_TRUE at DEP_FILES = ./$(DEPDIR)/doublefann.Plo ./$(DEPDIR)/fann.Plo \
- at AMDEP_TRUE@ ./$(DEPDIR)/fann_internal.Plo \
+ at AMDEP_TRUE@ ./$(DEPDIR)/fann_error.Plo ./$(DEPDIR)/fann_io.Plo \
+ at AMDEP_TRUE@ ./$(DEPDIR)/fann_options.Plo \
+ at AMDEP_TRUE@ ./$(DEPDIR)/fann_train.Plo \
+ at AMDEP_TRUE@ ./$(DEPDIR)/fann_train_data.Plo \
@AMDEP_TRUE@ ./$(DEPDIR)/fixedfann.Plo ./$(DEPDIR)/floatfann.Plo
COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
@@ -243,7 +247,11 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/doublefann.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/fann.Plo at am__quote@
- at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/fann_internal.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/fann_error.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/fann_io.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/fann_options.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/fann_train.Plo at am__quote@
+ at AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/fann_train_data.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/fixedfann.Plo at am__quote@
@AMDEP_TRUE@@am__include@ @am__quote at ./$(DEPDIR)/floatfann.Plo at am__quote@
diff --git a/src/doublefann.c b/src/doublefann.c
index fe15cf1..7769d7a 100644
--- a/src/doublefann.c
+++ b/src/doublefann.c
@@ -20,5 +20,13 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
/* Easy way to allow for build of multiple binaries */
#include "doublefann.h"
-#include "fann.c"
+
#include "fann.h"
+#include "fann_internal.h"
+
+#include "fann.c"
+#include "fann_io.c"
+#include "fann_train.c"
+#include "fann_train_data.c"
+#include "fann_options.c"
+#include "fann_error.c"
diff --git a/src/fann.c b/src/fann.c
index 2855140..f1d5306 100644
--- a/src/fann.c
+++ b/src/fann.c
@@ -21,14 +21,35 @@
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
+#include <sys/time.h>
+#include <time.h>
#include "config.h"
-
#include "fann.h"
-
#include "fann_errno.h"
-//#define DEBUGTRAIN
+
+
+/* create a neural network.
+ */
+struct fann * fann_create(float connection_rate, float learning_rate,
+ unsigned int num_layers, /* the number of layers, including the input and output layer */
+
+
+ ...) /* the number of neurons in each of the layers, starting with the input layer and ending with the output layer */
+{
+ va_list layer_sizes;
+ unsigned int layers[num_layers];
+ int i = 0;
+
+ va_start(layer_sizes, num_layers);
+ for ( i=0 ; i<num_layers ; i++ ) {
+ layers[i] = va_arg(layer_sizes, unsigned int);
+ }
+ va_end(layer_sizes);
+
+ return fann_create_array(connection_rate, learning_rate, num_layers, layers);
+}
/* create a neural network.
*/
@@ -255,533 +276,6 @@ struct fann * fann_create_array(float connection_rate, float learning_rate, unsi
return ann;
}
-struct fann * fann_create(float connection_rate, float learning_rate,
- unsigned int num_layers, /* the number of layers, including the input and output layer */
-
-
- ...) /* the number of neurons in each of the layers, starting with the input layer and ending with the output layer */
-{
- va_list layer_sizes;
- unsigned int layers[num_layers];
- int i = 0;
-
- va_start(layer_sizes, num_layers);
- for ( i=0 ; i<num_layers ; i++ ) {
- layers[i] = va_arg(layer_sizes, unsigned int);
- }
- va_end(layer_sizes);
-
- return fann_create_array(connection_rate, learning_rate, num_layers, layers);
-}
-
-/* Create a network from a configuration file.
- */
-struct fann * fann_create_from_file(const char *configuration_file)
-{
- struct fann *ann;
- FILE *conf = fopen(configuration_file, "r");
- if(!conf){
- fann_error(NULL, FANN_E_CANT_OPEN_CONFIG_R, configuration_file);
- return NULL;
- }
- ann = fann_create_from_fd(conf, configuration_file);
- fclose(conf);
- return ann;
-}
-
-/* deallocate the network.
- */
-void fann_destroy(struct fann *ann)
-{
- fann_safe_free((ann->first_layer+1)->first_neuron->weights);
- fann_safe_free((ann->first_layer+1)->first_neuron->connected_neurons);
- fann_safe_free(ann->first_layer->first_neuron);
- fann_safe_free(ann->first_layer);
- fann_safe_free(ann->output);
- fann_safe_free(ann->train_deltas);
- fann_safe_free(ann->errstr);
- fann_safe_free(ann);
-}
-
-/* Save the network.
- */
-void fann_save(struct fann *ann, const char *configuration_file)
-{
- fann_save_internal(ann, configuration_file, 0);
-}
-
-/* Save the network as fixed point data.
- */
-int fann_save_to_fixed(struct fann *ann, const char *configuration_file)
-{
- return fann_save_internal(ann, configuration_file, 1);
-}
-
-void fann_set_learning_rate(struct fann *ann, float learning_rate)
-{
- ann->learning_rate = learning_rate;
-}
-
-void fann_set_activation_function_hidden(struct fann *ann, unsigned int activation_function)
-{
- ann->activation_function_hidden = activation_function;
- fann_update_stepwise_hidden(ann);
-}
-
-void fann_set_activation_function_output(struct fann *ann, unsigned int activation_function)
-{
- ann->activation_function_output = activation_function;
- fann_update_stepwise_output(ann);
-}
-
-void fann_set_activation_hidden_steepness(struct fann *ann, fann_type steepness)
-{
- ann->activation_hidden_steepness = steepness;
- fann_update_stepwise_hidden(ann);
-}
-
-void fann_set_activation_output_steepness(struct fann *ann, fann_type steepness)
-{
- ann->activation_output_steepness = steepness;
- fann_update_stepwise_output(ann);
-}
-
-float fann_get_learning_rate(struct fann *ann)
-{
- return ann->learning_rate;
-}
-
-unsigned int fann_get_num_input(struct fann *ann)
-{
- return ann->num_input;
-}
-
-unsigned int fann_get_num_output(struct fann *ann)
-{
- return ann->num_output;
-}
-
-unsigned int fann_get_activation_function_hidden(struct fann *ann)
-{
- return ann->activation_function_hidden;
-}
-
-unsigned int fann_get_activation_function_output(struct fann *ann)
-{
- return ann->activation_function_output;
-}
-
-fann_type fann_get_activation_hidden_steepness(struct fann *ann)
-{
- return ann->activation_hidden_steepness;
-}
-
-fann_type fann_get_activation_output_steepness(struct fann *ann)
-{
- return ann->activation_output_steepness;
-}
-
-unsigned int fann_get_total_neurons(struct fann *ann)
-{
- /* -1, because there is always an unused bias neuron in the last layer */
- return ann->total_neurons - 1;
-}
-
-unsigned int fann_get_total_connections(struct fann *ann)
-{
- return ann->total_connections;
-}
-
-void fann_randomize_weights(struct fann *ann, fann_type min_weight, fann_type max_weight)
-{
- fann_type *last_weight;
- fann_type *weights = (ann->first_layer+1)->first_neuron->weights;
- last_weight = weights + ann->total_connections;
- for(;weights != last_weight; weights++){
- *weights = (fann_type)(fann_rand(min_weight, max_weight));
- }
-}
-
-#ifndef FIXEDFANN
-/* Trains the network with the backpropagation algorithm.
- */
-void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
-{
- struct fann_neuron *neuron_it, *last_neuron, *neurons;
- fann_type neuron_value, *delta_it, *delta_begin, tmp_delta;
- struct fann_layer *layer_it;
- unsigned int i, shift_prev_layer;
-
- /* store some variabels local for fast access */
- const float learning_rate = ann->learning_rate;
- const fann_type activation_output_steepness = ann->activation_output_steepness;
- const fann_type activation_hidden_steepness = ann->activation_hidden_steepness;
- const struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
-
- const struct fann_neuron *last_layer_begin = (ann->last_layer-1)->first_neuron;
- const struct fann_neuron *last_layer_end = last_layer_begin + ann->num_output;
- struct fann_layer *first_layer = ann->first_layer;
- struct fann_layer *last_layer = ann->last_layer;
-
- fann_run(ann, input);
- /* if no room allocated for the delta variabels, allocate it now */
- if(ann->train_deltas == NULL){
- ann->train_deltas = (fann_type *)calloc(ann->total_neurons, sizeof(fann_type));
- if(ann->train_deltas == NULL){
- fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
- return;
- }
- }
- delta_begin = ann->train_deltas;
-
- /* clear the delta variabels */
- memset(delta_begin, 0, (ann->total_neurons) * sizeof(fann_type));
-
-#ifdef DEBUGTRAIN
- printf("calculate deltas\n");
-#endif
- /* calculate the error and place it in the output layer */
- delta_it = delta_begin + (last_layer_begin - first_neuron);
-
- for(; last_layer_begin != last_layer_end; last_layer_begin++){
- neuron_value = last_layer_begin->value;
- switch(ann->activation_function_output){
- case FANN_LINEAR:
- *delta_it = fann_linear_derive(activation_output_steepness, neuron_value) * (*desired_output - neuron_value);
- break;
- case FANN_SIGMOID:
- case FANN_SIGMOID_STEPWISE:
- *delta_it = fann_sigmoid_derive(activation_output_steepness, neuron_value) * (*desired_output - neuron_value);
- break;
- case FANN_SIGMOID_SYMMETRIC:
- case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- *delta_it = fann_sigmoid_symmetric_derive(activation_output_steepness, neuron_value) * (*desired_output - neuron_value);
- break;
- default:
- fann_error((struct fann_error *)ann, FANN_E_CANT_TRAIN_ACTIVATION);
- return;
- }
-
- ann->error_value += (*desired_output - neuron_value) * (*desired_output - neuron_value);
-
-#ifdef DEBUGTRAIN
- printf("delta1[%d] = "FANNPRINTF"\n", (delta_it - delta_begin), *delta_it);
-#endif
- desired_output++;
- delta_it++;
- }
- ann->num_errors++;
-
-
- /* go through all the layers, from last to first. And propagate the error backwards */
- for(layer_it = last_layer-1; layer_it != first_layer; --layer_it){
- last_neuron = layer_it->last_neuron;
-
- /* for each connection in this layer, propagate the error backwards*/
- if(ann->connection_rate == 1){ /* optimization for fully connected networks */
- shift_prev_layer = (layer_it-1)->first_neuron - first_neuron;
- for(neuron_it = layer_it->first_neuron;
- neuron_it != last_neuron; neuron_it++){
- tmp_delta = *(delta_begin + (neuron_it - first_neuron));
- for(i = 0; i < neuron_it->num_connections; i++){
- *(delta_begin + i + shift_prev_layer) += tmp_delta * neuron_it->weights[i];
-#ifdef DEBUGTRAIN
- printf("delta2[%d] = "FANNPRINTF" += ("FANNPRINTF" * "FANNPRINTF")\n", (i + shift_prev_layer), *(delta_begin + i + shift_prev_layer), tmp_delta, neuron_it->weights[i]);
-#endif
- }
- }
- }else{
- for(neuron_it = layer_it->first_neuron;
- neuron_it != last_neuron; neuron_it++){
- tmp_delta = *(delta_begin + (neuron_it - first_neuron));
- for(i = 0; i < neuron_it->num_connections; i++){
- *(delta_begin + (neuron_it->connected_neurons[i] - first_neuron)) +=
- tmp_delta * neuron_it->weights[i];
- }
- }
- }
-
- /* then calculate the actual errors in the previous layer */
- delta_it = delta_begin + ((layer_it-1)->first_neuron - first_neuron);
- last_neuron = (layer_it-1)->last_neuron;
-
- switch(ann->activation_function_hidden){
- case FANN_LINEAR:
- for(neuron_it = (layer_it-1)->first_neuron;
- neuron_it != last_neuron; neuron_it++){
- neuron_value = neuron_it->value;
- *delta_it *= fann_linear_derive(activation_hidden_steepness, neuron_value) * learning_rate;
- delta_it++;
- }
- break;
- case FANN_SIGMOID:
- case FANN_SIGMOID_STEPWISE:
- for(neuron_it = (layer_it-1)->first_neuron;
- neuron_it != last_neuron; neuron_it++){
- neuron_value = neuron_it->value;
- neuron_value = fann_clip(neuron_value, 0.01, 0.99);
- *delta_it *= fann_sigmoid_derive(activation_hidden_steepness, neuron_value);
-#ifdef DEBUGTRAIN
- printf("delta3[%d] = "FANNPRINTF" *= fann_sigmoid_derive(%f, %f) * %f\n", (delta_it - delta_begin), *delta_it, activation_hidden_steepness, neuron_value, learning_rate);
-#endif
- delta_it++;
- }
- break;
- case FANN_SIGMOID_SYMMETRIC:
- case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- for(neuron_it = (layer_it-1)->first_neuron;
- neuron_it != last_neuron; neuron_it++){
- neuron_value = neuron_it->value;
- neuron_value = fann_clip(neuron_value, -0.98, 0.98);
- *delta_it *= fann_sigmoid_symmetric_derive(activation_hidden_steepness, neuron_value);
-#ifdef DEBUGTRAIN
- printf("delta3[%d] = "FANNPRINTF" *= fann_sigmoid_symmetric_derive(%f, %f) * %f\n", (delta_it - delta_begin), *delta_it, activation_hidden_steepness, neuron_value, learning_rate);
-#endif
- delta_it++;
- }
- break;
- default:
- fann_error((struct fann_error *)ann, FANN_E_CANT_TRAIN_ACTIVATION);
- return;
- }
- }
-
-#ifdef DEBUGTRAIN
- printf("\nupdate weights\n");
-#endif
-
- for(layer_it = (first_layer+1); layer_it != last_layer; layer_it++){
-#ifdef DEBUGTRAIN
- printf("layer[%d]\n", layer_it - first_layer);
-#endif
- last_neuron = layer_it->last_neuron;
- if(ann->connection_rate == 1){ /* optimization for fully connected networks */
- neurons = (layer_it-1)->first_neuron;
- for(neuron_it = layer_it->first_neuron;
- neuron_it != last_neuron; neuron_it++){
- tmp_delta = *(delta_begin + (neuron_it - first_neuron));
- for(i = 0; i < neuron_it->num_connections; i++){
-#ifdef DEBUGTRAIN
- printf("weights[%d] += "FANNPRINTF" = %f * %f\n", i, tmp_delta * neurons[i].value, tmp_delta, neurons[i].value);
-#endif
- neuron_it->weights[i] += learning_rate * tmp_delta * neurons[i].value;
- }
- }
- }else{
- for(neuron_it = layer_it->first_neuron;
- neuron_it != last_neuron; neuron_it++){
- tmp_delta = *(delta_begin + (neuron_it - first_neuron));
- for(i = 0; i < neuron_it->num_connections; i++){
- neuron_it->weights[i] += learning_rate * tmp_delta * neuron_it->connected_neurons[i]->value;
- }
- }
- }
- }
-}
-#endif
-
-/* Tests the network.
- */
-fann_type *fann_test(struct fann *ann, fann_type *input, fann_type *desired_output)
-{
- fann_type neuron_value;
- fann_type *output_begin = fann_run(ann, input);
- fann_type *output_it;
- const fann_type *output_end = output_begin + ann->num_output;
-
- /* calculate the error */
- for(output_it = output_begin;
- output_it != output_end; output_it++){
- neuron_value = *output_it;
-
-#ifdef FIXEDFANN
- ann->error_value += ((*desired_output - neuron_value)/(float)ann->multiplier) * ((*desired_output - neuron_value)/(float)ann->multiplier);
-#else
- ann->error_value += (*desired_output - neuron_value) * (*desired_output - neuron_value);
-#endif
-
- desired_output++;
- }
- ann->num_errors++;
-
- return output_begin;
-}
-
-/* Reads training data from a file.
- */
-struct fann_train_data* fann_read_train_from_file(char *configuration_file)
-{
- struct fann_train_data* data;
- FILE *file = fopen(configuration_file, "r");
-
- if(!file){
- fann_error(NULL, FANN_E_CANT_OPEN_CONFIG_R, configuration_file);
- return NULL;
- }
-
- data = fann_read_train_from_fd(file, configuration_file);
- fclose(file);
- return data;
-}
-
-/* Save training data to a file
- */
-void fann_save_train(struct fann_train_data* data, char *filename)
-{
- fann_save_train_internal(data, filename, 0, 0);
-}
-
-/* Save training data to a file in fixed point algebra.
- (Good for testing a network in fixed point)
-*/
-void fann_save_train_to_fixed(struct fann_train_data* data, char *filename, unsigned int decimal_point)
-{
- fann_save_train_internal(data, filename, 1, decimal_point);
-}
-
-/* deallocate the train data structure.
- */
-void fann_destroy_train(struct fann_train_data *data)
-{
- unsigned int i;
- if(data->input){
- for(i = 0; i != data->num_data; i++){
- fann_safe_free(data->input[i]);
- }
- }
-
- if(data->output){
- for(i = 0; i != data->num_data; i++){
- fann_safe_free(data->output[i]);
- }
- }
-
- fann_safe_free(data->input);
- fann_safe_free(data->output);
- fann_safe_free(data);
-}
-
-#ifndef FIXEDFANN
-
-/* Train directly on the training data.
- */
-void fann_train_on_data_callback(struct fann *ann, struct fann_train_data *data, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error, int (*callback)(unsigned int epochs, float error))
-{
- float error;
- unsigned int i, j;
-
- if(epochs_between_reports && callback == NULL){
- printf("Max epochs %8d. Desired error: %.10f\n", max_epochs, desired_error);
- }
-
- for(i = 1; i <= max_epochs; i++){
- /* train */
- fann_reset_MSE(ann);
-
- for(j = 0; j != data->num_data; j++){
- fann_train(ann, data->input[j], data->output[j]);
- }
-
- error = fann_get_MSE(ann);
-
- /* print current output */
- if(epochs_between_reports &&
- (i % epochs_between_reports == 0
- || i == max_epochs
- || i == 1
- || error < desired_error)){
- if (callback == NULL) {
- printf("Epochs %8d. Current error: %.10f\n", i, error);
- } else if((*callback)(i, error) == -1){
- /* you can break the training by returning -1 */
- break;
- }
- }
-
- if(error < desired_error){
- break;
- }
- }
-}
-
-void fann_train_on_data(struct fann *ann, struct fann_train_data *data, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error)
-{
- fann_train_on_data_callback(ann, data, max_epochs, epochs_between_reports, desired_error, NULL);
-}
-
-
-/* Wrapper to make it easy to train directly on a training data file.
- */
-void fann_train_on_file_callback(struct fann *ann, char *filename, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error, int (*callback)(unsigned int epochs, float error))
-{
- struct fann_train_data *data = fann_read_train_from_file(filename);
- if(data == NULL){
- return;
- }
- fann_train_on_data_callback(ann, data, max_epochs, epochs_between_reports, desired_error, callback);
- fann_destroy_train(data);
-}
-
-void fann_train_on_file(struct fann *ann, char *filename, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error)
-{
- fann_train_on_file_callback(ann, filename, max_epochs, epochs_between_reports, desired_error, NULL);
-}
-
-
-#endif
-
-/* get the mean square error.
- (obsolete will be removed at some point, use fann_get_MSE)
- */
-float fann_get_error(struct fann *ann)
-{
- return fann_get_MSE(ann);
-}
-
-/* get the mean square error.
- */
-float fann_get_MSE(struct fann *ann)
-{
- if(ann->num_errors){
- return ann->error_value/(float)ann->num_errors;
- }else{
- return 0;
- }
-}
-
-/* reset the mean square error.
- (obsolete will be removed at some point, use fann_reset_MSE)
- */
-void fann_reset_error(struct fann *ann)
-{
- fann_reset_MSE(ann);
-}
-
-/* reset the mean square error.
- */
-void fann_reset_MSE(struct fann *ann)
-{
- ann->num_errors = 0;
- ann->error_value = 0;
-}
-
-#ifdef FIXEDFANN
-/* returns the position of the fix point.
- */
-unsigned int fann_get_decimal_point(struct fann *ann)
-{
- return ann->decimal_point;
-}
-
-/* returns the multiplier that fix point data is multiplied with.
- */
-unsigned int fann_get_multiplier(struct fann *ann)
-{
- return ann->multiplier;
-}
-
-#endif
-
/* runs the network.
*/
fann_type* fann_run(struct fann *ann, fann_type *input)
@@ -999,157 +493,188 @@ fann_type* fann_run(struct fann *ann, fann_type *input)
return ann->output;
}
-/* resets the last error number
+/* deallocate the network.
*/
-void fann_reset_errno(struct fann_error *errdat)
+void fann_destroy(struct fann *ann)
{
- errdat->errno_f = 0;
+ fann_safe_free((ann->first_layer+1)->first_neuron->weights);
+ fann_safe_free((ann->first_layer+1)->first_neuron->connected_neurons);
+ fann_safe_free(ann->first_layer->first_neuron);
+ fann_safe_free(ann->first_layer);
+ fann_safe_free(ann->output);
+ fann_safe_free(ann->train_deltas);
+ fann_safe_free(ann->errstr);
+ fann_safe_free(ann);
}
-/* resets the last errstr
- */
-void fann_reset_errstr(struct fann_error *errdat)
+void fann_randomize_weights(struct fann *ann, fann_type min_weight, fann_type max_weight)
{
- if ( errdat->errstr != NULL )
- free(errdat->errstr);
- errdat->errstr = NULL;
+ fann_type *last_weight;
+ fann_type *weights = (ann->first_layer+1)->first_neuron->weights;
+ last_weight = weights + ann->total_connections;
+ for(;weights != last_weight; weights++){
+ *weights = (fann_type)(fann_rand(min_weight, max_weight));
+ }
}
-/* returns the last error number
+/* INTERNAL FUNCTION
+ Allocates the main structure and sets some default values.
*/
-unsigned int fann_get_errno(struct fann_error *errdat)
+struct fann * fann_allocate_structure(float learning_rate, unsigned int num_layers)
{
- return errdat->errno_f;
-}
+ struct fann *ann;
+
+ if(num_layers < 2){
+#ifdef DEBUG
+ printf("less than 2 layers - ABORTING.\n");
+#endif
+ return NULL;
+ }
-/* returns the last errstr
- */
-char * fann_get_errstr(struct fann_error *errdat)
-{
- char *errstr = errdat->errstr;
+ /* allocate and initialize the main network structure */
+ ann = (struct fann *)malloc(sizeof(struct fann));
+ if(ann == NULL){
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ return NULL;
+ }
- fann_reset_errno(errdat);
- fann_reset_errstr(errdat);
+ ann->learning_rate = learning_rate;
+ ann->total_neurons = 0;
+ ann->total_connections = 0;
+ ann->num_input = 0;
+ ann->num_output = 0;
+ ann->train_deltas = NULL;
+ ann->num_errors = 0;
- return errstr;
-}
+ fann_init_error_data((struct fann_error *)ann);
-/* change where errors are logged to
- */
-void fann_set_error_log(struct fann_error *errdat, FILE *log)
-{
- errdat->error_log = log;
-}
+#ifdef FIXEDFANN
+ /* these values are only boring defaults, and should really
+ never be used, since the real values are always loaded from a file. */
+ ann->decimal_point = 8;
+ ann->multiplier = 256;
+#endif
+
+ ann->activation_function_hidden = FANN_SIGMOID_STEPWISE;
+ ann->activation_function_output = FANN_SIGMOID_STEPWISE;
+#ifdef FIXEDFANN
+ ann->activation_hidden_steepness = ann->multiplier/2;
+ ann->activation_output_steepness = ann->multiplier/2;
+#else
+ ann->activation_hidden_steepness = 0.5;
+ ann->activation_output_steepness = 0.5;
+#endif
-/* prints the last error to the error log (default stderr)
- */
-void fann_print_error(struct fann_error *errdat) {
- if ( (errdat->errno_f != FANN_E_NO_ERROR) && (errdat->error_log != NULL) ){
- fputs(errdat->errstr, errdat->error_log);
+ /* allocate room for the layers */
+ ann->first_layer = (struct fann_layer *)calloc(num_layers, sizeof(struct fann_layer));
+ if(ann->first_layer == NULL){
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ free(ann);
+ return NULL;
}
+
+ ann->last_layer = ann->first_layer + num_layers;
+
+ return ann;
}
-/* shuffles training data, randomizing the order
+/* INTERNAL FUNCTION
+ Allocates room for the neurons.
*/
-void fann_shuffle_train_data(struct fann_train_data *train_data) {
- int dat = train_data->num_data - 1, elem;
- unsigned int swap;
- fann_type temp;
+void fann_allocate_neurons(struct fann *ann)
+{
+ struct fann_layer *layer_it;
+ struct fann_neuron *neurons;
+ unsigned int num_neurons_so_far = 0;
+ unsigned int num_neurons = 0;
+
+ /* all the neurons is allocated in one long array */
+ neurons = (struct fann_neuron *)calloc(ann->total_neurons, sizeof(struct fann_neuron));
+ if(neurons == NULL){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ return;
+ }
+
+ /* clear data, primarily to make the input neurons cleared */
+ memset(neurons, 0, ann->total_neurons * sizeof(struct fann_neuron));
+
+ for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
+ num_neurons = layer_it->last_neuron - layer_it->first_neuron;
+ layer_it->first_neuron = neurons+num_neurons_so_far;
+ layer_it->last_neuron = layer_it->first_neuron+num_neurons;
+ num_neurons_so_far += num_neurons;
+ }
- for ( ; dat >= 0 ; dat-- ) {
- swap = (unsigned int)(rand() % train_data->num_data);
- if ( swap != dat ) {
- for ( elem = train_data->num_input ; elem >= 0 ; elem-- ) {
- temp = train_data->input[dat][elem];
- train_data->input[dat][elem] = train_data->input[swap][elem];
- train_data->input[swap][elem] = temp;
- }
- for ( elem = train_data->num_output ; elem >= 0 ; elem-- ) {
- temp = train_data->output[dat][elem];
- train_data->output[dat][elem] = train_data->output[swap][elem];
- train_data->output[swap][elem] = temp;
- }
- }
+ ann->output = (fann_type *)calloc(num_neurons, sizeof(fann_type));
+ if(ann->output == NULL){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ return;
}
}
-/* merges training data into a single struct.
+/* INTERNAL FUNCTION
+ Allocate room for the connections.
*/
-struct fann_train_data * fann_merge_train_data(struct fann_train_data *data1, struct fann_train_data *data2) {
- struct fann_train_data * train_data;
- int x;
-
- if ( (data1->num_input != data2->num_input) ||
- (data1->num_output != data2->num_output) ) {
- fann_error(NULL, FANN_E_TRAIN_DATA_MISMATCH);
- return NULL;
+void fann_allocate_connections(struct fann *ann)
+{
+ struct fann_layer *layer_it, *last_layer;
+ struct fann_neuron *neuron_it, *last_neuron;
+ fann_type *weights;
+ struct fann_neuron **connected_neurons = NULL;
+ unsigned int connections_so_far = 0;
+
+ weights = (fann_type *)calloc(ann->total_connections, sizeof(fann_type));
+ if(weights == NULL){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ return;
}
-
- train_data = (struct fann_train_data *)malloc(sizeof(struct fann_train_data));
-
- fann_init_error_data((struct fann_error *)train_data);
-
- train_data->num_data = data1->num_data + data2->num_data;
- train_data->num_input = data1->num_input;
- train_data->num_output = data1->num_output;
-
- if ( ((train_data->input = (fann_type **)calloc(train_data->num_data, sizeof(fann_type *))) == NULL) ||
- ((train_data->output = (fann_type **)calloc(train_data->num_data, sizeof(fann_type *))) == NULL) ) {
- fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
- fann_destroy_train(train_data);
- return NULL;
+
+ /* TODO make special cases for all places where the connections
+ is used, so that it is not needed for fully connected networks.
+ */
+ connected_neurons = (struct fann_neuron **) calloc(ann->total_connections, sizeof(struct fann_neuron*));
+ if(connected_neurons == NULL){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ return;
}
- for ( x = train_data->num_data - 1 ; x >= 0 ; x-- ) {
- if ( ((train_data->input[x] = (fann_type *)calloc(train_data->num_input, sizeof(fann_type))) == NULL) ||
- ((train_data->output[x] = (fann_type *)calloc(train_data->num_output, sizeof(fann_type))) == NULL) ) {
- fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
- fann_destroy_train(train_data);
- return NULL;
+
+
+ last_layer = ann->last_layer;
+ for(layer_it = ann->first_layer+1; layer_it != ann->last_layer; layer_it++){
+ last_neuron = layer_it->last_neuron-1;
+ for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++){
+ neuron_it->weights = weights+connections_so_far;
+ neuron_it->connected_neurons = connected_neurons+connections_so_far;
+ connections_so_far += neuron_it->num_connections;
}
- memcpy(train_data->input[x],
- ( x < data1->num_data ) ? data1->input[x] : data2->input[x - data1->num_data],
- train_data->num_input * sizeof(fann_type));
- memcpy(train_data->output[x],
- ( x < data1->num_data ) ? data1->output[x] : data2->output[x - data1->num_data],
- train_data->num_output * sizeof(fann_type));
}
- return train_data;
+ if(connections_so_far != ann->total_connections){
+ fann_error((struct fann_error *)ann, FANN_E_WRONG_NUM_CONNECTIONS, connections_so_far, ann->total_connections);
+ return;
+ }
}
-/* return a copy of a fann_train_data struct
+/* INTERNAL FUNCTION
+ Seed the random function.
*/
-struct fann_train_data * fann_duplicate_train_data(struct fann_train_data *data) {
- struct fann_train_data * dest;
- int x;
-
- if ( (dest = malloc(sizeof(struct fann_train_data))) == NULL ) {
- fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
- return NULL;
+void fann_seed_rand()
+{
+ FILE *fp = fopen("/dev/urandom", "r");
+ unsigned int foo;
+ struct timeval t;
+ if(!fp){
+ gettimeofday(&t, NULL);
+ foo = t.tv_usec;
+#ifdef DEBUG
+ printf("unable to open /dev/urandom\n");
+#endif
+ }else{
+ fread(&foo, sizeof(foo), 1, fp);
+ fclose(fp);
}
+ srand(foo);
+}
- fann_init_error_data((struct fann_error *)dest);
-
- dest->num_data = data->num_data;
- dest->num_input = data->num_input;
- dest->num_output = data->num_output;
- if ( ((dest->input = (fann_type **)calloc(dest->num_data, sizeof(fann_type *))) == NULL) ||
- ((dest->output = (fann_type **)calloc(dest->num_data, sizeof(fann_type *))) == NULL) ) {
- fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
- fann_destroy_train(dest);
- return NULL;
- }
- for ( x = dest->num_data - 1 ; x >= 0 ; x-- ) {
- if ( ((dest->input[x] = (fann_type *)calloc(dest->num_input, sizeof(fann_type))) == NULL) ||
- ((dest->output[x] = (fann_type *)calloc(dest->num_output, sizeof(fann_type))) == NULL) ) {
- fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
- fann_destroy_train(dest);
- return NULL;
- }
- memcpy(dest->input[x], data->input[x], dest->num_input * sizeof(fann_type));
- memcpy(dest->output[x], data->output[x], dest->num_output * sizeof(fann_type));
- }
- return dest;
-}
diff --git a/src/fann_error.c b/src/fann_error.c
new file mode 100644
index 0000000..6ff16c1
--- /dev/null
+++ b/src/fann_error.c
@@ -0,0 +1,170 @@
+/*
+ Fast Artificial Neural Network Library (fann)
+ Copyright (C) 2003 Steffen Nissen (lukesky at diku.dk)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include "config.h"
+#include "fann.h"
+#include "fann_errno.h"
+
+/* resets the last error number
+ */
+void fann_reset_errno(struct fann_error *errdat)
+{
+ errdat->errno_f = 0;
+}
+
+/* resets the last errstr
+ */
+void fann_reset_errstr(struct fann_error *errdat)
+{
+ if ( errdat->errstr != NULL )
+ free(errdat->errstr);
+ errdat->errstr = NULL;
+}
+
+/* returns the last error number
+ */
+unsigned int fann_get_errno(struct fann_error *errdat)
+{
+ return errdat->errno_f;
+}
+
+/* returns the last errstr
+ */
+char * fann_get_errstr(struct fann_error *errdat)
+{
+ char *errstr = errdat->errstr;
+
+ fann_reset_errno(errdat);
+ fann_reset_errstr(errdat);
+
+ return errstr;
+}
+
+/* change where errors are logged to
+ */
+void fann_set_error_log(struct fann_error *errdat, FILE *log)
+{
+ errdat->error_log = log;
+}
+
+/* prints the last error to the error log (default stderr)
+ */
+void fann_print_error(struct fann_error *errdat) {
+ if ( (errdat->errno_f != FANN_E_NO_ERROR) && (errdat->error_log != NULL) ){
+ fputs(errdat->errstr, errdat->error_log);
+ }
+}
+
+/* INTERNAL FUNCTION
+ Populate the error information
+ */
+void fann_error(struct fann_error *errdat, const unsigned int errno, ...)
+{
+ va_list ap;
+ char * errstr;
+
+ if (errdat != NULL) errdat->errno_f = errno;
+
+ if(errdat != NULL && errdat->errstr != NULL){
+ errstr = errdat->errstr;
+ }else{
+ errstr = (char *)malloc(FANN_ERRSTR_MAX);
+ if(errstr == NULL){
+ fprintf(stderr, "Unable to allocate memory.\n");
+ return;
+ }
+ }
+
+ va_start(ap, errno);
+ switch ( errno ) {
+ case FANN_E_NO_ERROR:
+ break;
+ case FANN_E_CANT_OPEN_CONFIG_R:
+ vsnprintf(errstr, FANN_ERRSTR_MAX, "Unable to open configuration file \"%s\" for reading.\n", ap);
+ break;
+ case FANN_E_CANT_OPEN_CONFIG_W:
+ vsnprintf(errstr, FANN_ERRSTR_MAX, "Unable to open configuration file \"%s\" for writing.\n", ap);
+ break;
+ case FANN_E_WRONG_CONFIG_VERSION:
+ vsnprintf(errstr, FANN_ERRSTR_MAX, "Wrong version of configuration file, aborting read of configuration file \"%s\".\n", ap);
+ break;
+ case FANN_E_CANT_READ_CONFIG:
+ vsnprintf(errstr, FANN_ERRSTR_MAX, "Error reading info from configuration file \"%s\".\n", ap);
+ break;
+ case FANN_E_CANT_READ_NEURON:
+ vsnprintf(errstr, FANN_ERRSTR_MAX, "Error reading neuron info from configuration file \"%s\".\n", ap);
+ break;
+ case FANN_E_CANT_READ_CONNECTIONS:
+ vsnprintf(errstr, FANN_ERRSTR_MAX, "Error reading connections from configuration file \"%s\".\n", ap);
+ break;
+ case FANN_E_WRONG_NUM_CONNECTIONS:
+ vsnprintf(errstr, FANN_ERRSTR_MAX, "ERROR connections_so_far=%d, total_connections=%d\n", ap);
+ break;
+ case FANN_E_CANT_OPEN_TD_W:
+ vsnprintf(errstr, FANN_ERRSTR_MAX, "Unable to open train data file \"%s\" for writing.\n", ap);
+ break;
+ case FANN_E_CANT_OPEN_TD_R:
+ vsnprintf(errstr, FANN_ERRSTR_MAX, "Unable to open train data file \"%s\" for writing.\n", ap);
+ break;
+ case FANN_E_CANT_READ_TD:
+ vsnprintf(errstr, FANN_ERRSTR_MAX, "Error reading info from train data file \"%s\", line: %d.\n", ap);
+ break;
+ case FANN_E_CANT_ALLOCATE_MEM:
+ snprintf(errstr, FANN_ERRSTR_MAX, "Unable to allocate memory.\n");
+ break;
+ case FANN_E_CANT_TRAIN_ACTIVATION:
+ snprintf(errstr, FANN_ERRSTR_MAX, "Unable to train with the selected activation function.\n");
+ break;
+ case FANN_E_CANT_USE_ACTIVATION:
+ snprintf(errstr, FANN_ERRSTR_MAX, "Unable to use the selected activation function.\n");
+ break;
+ case FANN_E_TRAIN_DATA_MISMATCH:
+ snprintf(errstr, FANN_ERRSTR_MAX, "Training data must be of equivalent structure.");
+ break;
+ default:
+ vsnprintf(errstr, FANN_ERRSTR_MAX, "Unknown error.\n", ap);
+ break;
+ }
+ va_end(ap);
+
+ if ( errdat == NULL ) {
+ fprintf(stderr, "FANN Error %d: %s", errno, errstr);
+ } else {
+ errdat->errstr = errstr;
+ if ( errdat->error_log != NULL ) {
+ fprintf(errdat->error_log, "FANN Error %d: %s", errno, errstr);
+ }
+ }
+}
+
+/* INTERNAL FUNCTION
+ Initialize an error data strcuture
+ */
+void fann_init_error_data(struct fann_error *errdat)
+{
+ errdat->error_value = 0;
+ errdat->errstr = NULL;
+ errdat->errno_f = 0;
+ errdat->error_log = stderr;
+}
diff --git a/src/fann_internal.c b/src/fann_internal.c
deleted file mode 100644
index aec17ba..0000000
--- a/src/fann_internal.c
+++ /dev/null
@@ -1,883 +0,0 @@
-/*
-Fast Artificial Neural Network Library (fann)
-Copyright (C) 2003 Steffen Nissen (lukesky at diku.dk)
-
-This library is free software; you can redistribute it and/or
-modify it under the terms of the GNU Lesser General Public
-License as published by the Free Software Foundation; either
-version 2.1 of the License, or (at your option) any later version.
-
-This library is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-Lesser General Public License for more details.
-
-You should have received a copy of the GNU Lesser General Public
-License along with this library; if not, write to the Free Software
-Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <sys/time.h>
-#include <time.h>
-#include <stdarg.h>
-
-#include "compat_time.h"
-#include "fann.h"
-#include "fann_internal.h"
-#include "fann_errno.h"
-
-/* Allocates the main structure and sets some default values.
- */
-struct fann * fann_allocate_structure(float learning_rate, unsigned int num_layers)
-{
- struct fann *ann;
-
- if(num_layers < 2){
-#ifdef DEBUG
- printf("less than 2 layers - ABORTING.\n");
-#endif
- return NULL;
- }
-
- /* allocate and initialize the main network structure */
- ann = (struct fann *)malloc(sizeof(struct fann));
- if(ann == NULL){
- fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
- return NULL;
- }
-
- ann->learning_rate = learning_rate;
- ann->total_neurons = 0;
- ann->total_connections = 0;
- ann->num_input = 0;
- ann->num_output = 0;
- ann->train_deltas = NULL;
- ann->num_errors = 0;
-
- fann_init_error_data((struct fann_error *)ann);
-
-#ifdef FIXEDFANN
- /* these values are only boring defaults, and should really
- never be used, since the real values are always loaded from a file. */
- ann->decimal_point = 8;
- ann->multiplier = 256;
-#endif
-
- ann->activation_function_hidden = FANN_SIGMOID_STEPWISE;
- ann->activation_function_output = FANN_SIGMOID_STEPWISE;
-#ifdef FIXEDFANN
- ann->activation_hidden_steepness = ann->multiplier/2;
- ann->activation_output_steepness = ann->multiplier/2;
-#else
- ann->activation_hidden_steepness = 0.5;
- ann->activation_output_steepness = 0.5;
-#endif
-
- /* allocate room for the layers */
- ann->first_layer = (struct fann_layer *)calloc(num_layers, sizeof(struct fann_layer));
- if(ann->first_layer == NULL){
- fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
- free(ann);
- return NULL;
- }
-
- ann->last_layer = ann->first_layer + num_layers;
-
- return ann;
-}
-
-/* Allocates room for the neurons.
- */
-void fann_allocate_neurons(struct fann *ann)
-{
- struct fann_layer *layer_it;
- struct fann_neuron *neurons;
- unsigned int num_neurons_so_far = 0;
- unsigned int num_neurons = 0;
-
- /* all the neurons is allocated in one long array */
- neurons = (struct fann_neuron *)calloc(ann->total_neurons, sizeof(struct fann_neuron));
- if(neurons == NULL){
- fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
- return;
- }
-
- /* clear data, primarily to make the input neurons cleared */
- memset(neurons, 0, ann->total_neurons * sizeof(struct fann_neuron));
-
- for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
- num_neurons = layer_it->last_neuron - layer_it->first_neuron;
- layer_it->first_neuron = neurons+num_neurons_so_far;
- layer_it->last_neuron = layer_it->first_neuron+num_neurons;
- num_neurons_so_far += num_neurons;
- }
-
- ann->output = (fann_type *)calloc(num_neurons, sizeof(fann_type));
- if(ann->output == NULL){
- fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
- return;
- }
-}
-
-/* Allocate room for the connections.
- */
-void fann_allocate_connections(struct fann *ann)
-{
- struct fann_layer *layer_it, *last_layer;
- struct fann_neuron *neuron_it, *last_neuron;
- fann_type *weights;
- struct fann_neuron **connected_neurons = NULL;
- unsigned int connections_so_far = 0;
-
- weights = (fann_type *)calloc(ann->total_connections, sizeof(fann_type));
- if(weights == NULL){
- fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
- return;
- }
-
- /* TODO make special cases for all places where the connections
- is used, so that it is not needed for fully connected networks.
- */
- connected_neurons = (struct fann_neuron **) calloc(ann->total_connections, sizeof(struct fann_neuron*));
- if(connected_neurons == NULL){
- fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
- return;
- }
-
-
- last_layer = ann->last_layer;
- for(layer_it = ann->first_layer+1; layer_it != ann->last_layer; layer_it++){
- last_neuron = layer_it->last_neuron-1;
- for(neuron_it = layer_it->first_neuron; neuron_it != last_neuron; neuron_it++){
- neuron_it->weights = weights+connections_so_far;
- neuron_it->connected_neurons = connected_neurons+connections_so_far;
- connections_so_far += neuron_it->num_connections;
- }
- }
-
- if(connections_so_far != ann->total_connections){
- fann_error((struct fann_error *)ann, FANN_E_WRONG_NUM_CONNECTIONS, connections_so_far, ann->total_connections);
- return;
- }
-}
-
-/* Used to save the network to a file.
- */
-int fann_save_internal(struct fann *ann, const char *configuration_file, unsigned int save_as_fixed)
-{
- int retval;
- FILE *conf = fopen(configuration_file, "w+");
- if(!conf){
- fann_error(NULL, FANN_E_CANT_OPEN_CONFIG_W, configuration_file);
- return -1;
- }
- retval = fann_save_internal_fd(ann, conf, configuration_file, save_as_fixed);
- fclose(conf);
- return retval;
-}
-
-/* Used to save the network to a file descriptor.
- */
-int fann_save_internal_fd(struct fann *ann, FILE *conf, const char *configuration_file, unsigned int save_as_fixed)
-{
- struct fann_layer *layer_it;
- int calculated_decimal_point = 0;
- struct fann_neuron *neuron_it, *first_neuron;
- fann_type *weights;
- struct fann_neuron **connected_neurons;
- unsigned int i = 0;
-#ifndef FIXEDFANN
- /* variabels for use when saving floats as fixed point variabels */
- unsigned int decimal_point = 0;
- unsigned int fixed_multiplier = 0;
- fann_type max_possible_value = 0;
- unsigned int bits_used_for_max = 0;
- fann_type current_max_value = 0;
-#endif
-
-#ifndef FIXEDFANN
- if(save_as_fixed){
- /* save the version information */
- fprintf(conf, FANN_FIX_VERSION"\n");
- }else{
- /* save the version information */
- fprintf(conf, FANN_FLO_VERSION"\n");
- }
-#else
- /* save the version information */
- fprintf(conf, FANN_FIX_VERSION"\n");
-#endif
-
-#ifndef FIXEDFANN
- if(save_as_fixed){
- /* calculate the maximal possible shift value */
-
- for(layer_it = ann->first_layer+1; layer_it != ann->last_layer; layer_it++){
- for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++){
- /* look at all connections to each neurons, and see how high a value we can get */
- current_max_value = 0;
- for(i = 0; i != neuron_it->num_connections; i++){
- current_max_value += fann_abs(neuron_it->weights[i]);
- }
-
- if(current_max_value > max_possible_value){
- max_possible_value = current_max_value;
- }
- }
- }
-
- for(bits_used_for_max = 0; max_possible_value >= 1; bits_used_for_max++){
- max_possible_value /= 2.0;
- }
-
- /* The maximum number of bits we shift the fix point, is the number
- of bits in a integer, minus one for the sign, one for the minus
- in stepwise, and minus the bits used for the maximum.
- This is devided by two, to allow multiplication of two fixed
- point numbers.
- */
- calculated_decimal_point = (sizeof(int)*8-2-bits_used_for_max)/2;
-
- if(calculated_decimal_point < 0){
- decimal_point = 0;
- }else{
- decimal_point = calculated_decimal_point;
- }
-
- fixed_multiplier = 1 << decimal_point;
-
-#ifdef DEBUG
- printf("calculated_decimal_point=%d, decimal_point=%u, bits_used_for_max=%u\n", calculated_decimal_point, decimal_point, bits_used_for_max);
-#endif
-
- /* save the decimal_point on a seperate line */
- fprintf(conf, "%u\n", decimal_point);
-
- /* save the number layers "num_layers learning_rate connection_rate activation_function_hidden activation_function_output activation_hidden_steepness activation_output_steepness" */
- fprintf(conf, "%u %f %f %u %u %d %d\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->activation_function_hidden, ann->activation_function_output, (int)(ann->activation_hidden_steepness * fixed_multiplier), (int)(ann->activation_output_steepness * fixed_multiplier));
- }else{
- /* save the number layers "num_layers learning_rate connection_rate activation_function_hidden activation_function_output activation_hidden_steepness activation_output_steepness" */
- fprintf(conf, "%u %f %f %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->activation_function_hidden, ann->activation_function_output, ann->activation_hidden_steepness, ann->activation_output_steepness);
- }
-#else
- /* save the decimal_point on a seperate line */
- fprintf(conf, "%u\n", ann->decimal_point);
-
- /* save the number layers "num_layers learning_rate connection_rate activation_function_hidden activation_function_output activation_hidden_steepness activation_output_steepness" */
- fprintf(conf, "%u %f %f %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->activation_function_hidden, ann->activation_function_output, ann->activation_hidden_steepness, ann->activation_output_steepness);
-#endif
-
- for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
- /* the number of neurons in the layers (in the last layer, there is always one too many neurons, because of an unused bias) */
- fprintf(conf, "%u ", layer_it->last_neuron - layer_it->first_neuron);
- }
- fprintf(conf, "\n");
-
-
- for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
- /* the number of connections to each neuron */
- for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++){
- fprintf(conf, "%u ", neuron_it->num_connections);
- }
- fprintf(conf, "\n");
- }
-
- connected_neurons = (ann->first_layer+1)->first_neuron->connected_neurons;
- weights = (ann->first_layer+1)->first_neuron->weights;
- first_neuron = ann->first_layer->first_neuron;
-
- /* Now save all the connections.
- We only need to save the source and the weight,
- since the destination is given by the order.
-
- The weight is not saved binary due to differences
- in binary definition of floating point numbers.
- Especially an iPAQ does not use the same binary
- representation as an i386 machine.
- */
- for(i = 0; i < ann->total_connections; i++){
-#ifndef FIXEDFANN
- if(save_as_fixed){
- /* save the connection "(source weight) "*/
- fprintf(conf, "(%u %d) ",
- connected_neurons[i] - first_neuron,
- (int)floor((weights[i]*fixed_multiplier) + 0.5));
- }else{
- /* save the connection "(source weight) "*/
- fprintf(conf, "(%u "FANNPRINTF") ",
- connected_neurons[i] - first_neuron, weights[i]);
- }
-#else
- /* save the connection "(source weight) "*/
- fprintf(conf, "(%u "FANNPRINTF") ",
- connected_neurons[i] - first_neuron, weights[i]);
-#endif
-
- }
- fprintf(conf, "\n");
-
- return calculated_decimal_point;
-}
-
-/* Save the train data structure.
- */
-void fann_save_train_internal(struct fann_train_data* data, char *filename, unsigned int save_as_fixed, unsigned int decimal_point)
-{
- FILE *file = fopen(filename, "w");
- if(!file){
- fann_error(NULL, FANN_E_CANT_OPEN_TD_W, filename);
- return;
- }
- fann_save_train_internal_fd(data, file, filename, save_as_fixed, decimal_point);
- fclose(file);
-}
-
-/* Save the train data structure.
- */
-void fann_save_train_internal_fd(struct fann_train_data* data, FILE *file, char *filename, unsigned int save_as_fixed, unsigned int decimal_point)
-{
- unsigned int num_data = data->num_data;
- unsigned int num_input = data->num_input;
- unsigned int num_output = data->num_output;
- unsigned int i, j;
-#ifndef FIXEDFANN
- unsigned int multiplier = 1 << decimal_point;
-#endif
-
- fprintf(file, "%u %u %u\n", data->num_data, data->num_input, data->num_output);
-
- for(i = 0; i < num_data; i++){
- for(j = 0; j < num_input; j++){
-#ifndef FIXEDFANN
- if(save_as_fixed){
- fprintf(file, "%d ", (int)(data->input[i][j]*multiplier));
- }else{
- fprintf(file, FANNPRINTF" ", data->input[i][j]);
- }
-#else
- fprintf(file, FANNPRINTF" ", data->input[i][j]);
-#endif
- }
- fprintf(file, "\n");
-
- for(j = 0; j < num_output; j++){
-#ifndef FIXEDFANN
- if(save_as_fixed){
- fprintf(file, "%d ", (int)(data->output[i][j]*multiplier));
- }else{
- fprintf(file, FANNPRINTF" ", data->output[i][j]);
- }
-#else
- fprintf(file, FANNPRINTF" ", data->output[i][j]);
-#endif
- }
- fprintf(file, "\n");
- }
-}
-
-/* Adjust the steepwise functions (if used) */
-void fann_update_stepwise_hidden(struct fann *ann)
-{
- unsigned int i = 0;
-#ifndef FIXEDFANN
- /* For use in stepwise linear activation function.
- results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
- */
- switch(ann->activation_function_hidden){
- case FANN_SIGMOID:
- case FANN_SIGMOID_STEPWISE:
- ann->activation_hidden_results[0] = 0.005;
- ann->activation_hidden_results[1] = 0.05;
- ann->activation_hidden_results[2] = 0.25;
- ann->activation_hidden_results[3] = 0.75;
- ann->activation_hidden_results[4] = 0.95;
- ann->activation_hidden_results[5] = 0.995;
- break;
- case FANN_SIGMOID_SYMMETRIC:
- case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- ann->activation_hidden_results[0] = -0.99;
- ann->activation_hidden_results[1] = -0.9;
- ann->activation_hidden_results[2] = -0.5;
- ann->activation_hidden_results[3] = 0.5;
- ann->activation_hidden_results[4] = 0.9;
- ann->activation_hidden_results[5] = 0.99;
- break;
- default:
- /* the actiavation functions which do not have a stepwise function
- should not have it calculated */
- return;
- }
-#else
- /* Calculate the parameters for the stepwise linear
- sigmoid function fixed point.
- Using a rewritten sigmoid function.
- results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
- */
- switch(ann->activation_function_hidden){
- case FANN_SIGMOID:
- case FANN_SIGMOID_STEPWISE:
- ann->activation_hidden_results[0] = (fann_type)(ann->multiplier/200.0+0.5);
- ann->activation_hidden_results[1] = (fann_type)(ann->multiplier/20.0+0.5);
- ann->activation_hidden_results[2] = (fann_type)(ann->multiplier/4.0+0.5);
- ann->activation_hidden_results[3] = ann->multiplier - (fann_type)(ann->multiplier/4.0+0.5);
- ann->activation_hidden_results[4] = ann->multiplier - (fann_type)(ann->multiplier/20.0+0.5);
- ann->activation_hidden_results[5] = ann->multiplier - (fann_type)(ann->multiplier/200.0+0.5);
- break;
- case FANN_SIGMOID_SYMMETRIC:
- case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- ann->activation_hidden_results[0] = (fann_type)((ann->multiplier/100.0) - ann->multiplier + 0.5);
- ann->activation_hidden_results[1] = (fann_type)((ann->multiplier/10.0) - ann->multiplier + 0.5);
- ann->activation_hidden_results[2] = (fann_type)((ann->multiplier/2.0) - ann->multiplier + 0.5);
- ann->activation_hidden_results[3] = ann->multiplier - (fann_type)(ann->multiplier/2.0+0.5);
- ann->activation_hidden_results[4] = ann->multiplier - (fann_type)(ann->multiplier/10.0+0.5);
- ann->activation_hidden_results[5] = ann->multiplier - (fann_type)(ann->multiplier/100.0+0.5);
- break;
- default:
- /* the actiavation functions which do not have a stepwise function
- should not have it calculated */
- return;
- }
-#endif
-
- for(i = 0; i < 6; i++){
-#ifndef FIXEDFANN
- switch(ann->activation_function_hidden){
- case FANN_SIGMOID:
- break;
- case FANN_SIGMOID_STEPWISE:
- ann->activation_hidden_values[i] = ((log(1.0/ann->activation_hidden_results[i] -1.0) * 1.0/-2.0) * 1.0/ann->activation_hidden_steepness);
- break;
- case FANN_SIGMOID_SYMMETRIC:
- case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- ann->activation_hidden_values[i] = ((log((1.0-ann->activation_hidden_results[i]) / (ann->activation_hidden_results[i]+1.0)) * 1.0/-2.0) * 1.0/ann->activation_hidden_steepness);
- break;
- }
-#else
- switch(ann->activation_function_hidden){
- case FANN_SIGMOID:
- case FANN_SIGMOID_STEPWISE:
- ann->activation_hidden_values[i] = (fann_type)((((log(ann->multiplier/(float)ann->activation_hidden_results[i] -1)*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_hidden_steepness);
- break;
- case FANN_SIGMOID_SYMMETRIC:
- case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- ann->activation_hidden_values[i] = (fann_type)((((log((ann->multiplier - (float)ann->activation_hidden_results[i])/((float)ann->activation_hidden_results[i] + ann->multiplier))*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_hidden_steepness);
- break;
- }
-#endif
- }
-}
-
-/* Adjust the steepwise functions (if used) */
-void fann_update_stepwise_output(struct fann *ann)
-{
- unsigned int i = 0;
-#ifndef FIXEDFANN
- /* For use in stepwise linear activation function.
- results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
- */
- switch(ann->activation_function_output){
- case FANN_SIGMOID:
- case FANN_SIGMOID_STEPWISE:
- ann->activation_output_results[0] = 0.005;
- ann->activation_output_results[1] = 0.05;
- ann->activation_output_results[2] = 0.25;
- ann->activation_output_results[3] = 0.75;
- ann->activation_output_results[4] = 0.95;
- ann->activation_output_results[5] = 0.995;
- break;
- case FANN_SIGMOID_SYMMETRIC:
- case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- ann->activation_output_results[0] = -0.99;
- ann->activation_output_results[1] = -0.9;
- ann->activation_output_results[2] = -0.5;
- ann->activation_output_results[3] = 0.5;
- ann->activation_output_results[4] = 0.9;
- ann->activation_output_results[5] = 0.99;
- break;
- default:
- /* the actiavation functions which do not have a stepwise function
- should not have it calculated */
- return;
- }
-#else
- /* Calculate the parameters for the stepwise linear
- sigmoid function fixed point.
- Using a rewritten sigmoid function.
- results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
- */
- switch(ann->activation_function_output){
- case FANN_SIGMOID:
- case FANN_SIGMOID_STEPWISE:
- ann->activation_output_results[0] = (fann_type)(ann->multiplier/200.0+0.5);
- ann->activation_output_results[1] = (fann_type)(ann->multiplier/20.0+0.5);
- ann->activation_output_results[2] = (fann_type)(ann->multiplier/4.0+0.5);
- ann->activation_output_results[3] = ann->multiplier - (fann_type)(ann->multiplier/4.0+0.5);
- ann->activation_output_results[4] = ann->multiplier - (fann_type)(ann->multiplier/20.0+0.5);
- ann->activation_output_results[5] = ann->multiplier - (fann_type)(ann->multiplier/200.0+0.5);
- break;
- case FANN_SIGMOID_SYMMETRIC:
- case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- ann->activation_output_results[0] = (fann_type)((ann->multiplier/100.0) - ann->multiplier + 0.5);
- ann->activation_output_results[1] = (fann_type)((ann->multiplier/10.0) - ann->multiplier + 0.5);
- ann->activation_output_results[2] = (fann_type)((ann->multiplier/2.0) - ann->multiplier + 0.5);
- ann->activation_output_results[3] = ann->multiplier - (fann_type)(ann->multiplier/2.0+0.5);
- ann->activation_output_results[4] = ann->multiplier - (fann_type)(ann->multiplier/10.0+0.5);
- ann->activation_output_results[5] = ann->multiplier - (fann_type)(ann->multiplier/100.0+0.5);
- break;
- default:
- /* the actiavation functions which do not have a stepwise function
- should not have it calculated */
- return;
- }
-#endif
-
- for(i = 0; i < 6; i++){
-#ifndef FIXEDFANN
- switch(ann->activation_function_output){
- case FANN_SIGMOID:
- break;
- case FANN_SIGMOID_STEPWISE:
- ann->activation_output_values[i] = ((log(1.0/ann->activation_output_results[i] -1.0) * 1.0/-2.0) * 1.0/ann->activation_output_steepness);
- break;
- case FANN_SIGMOID_SYMMETRIC:
- case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- ann->activation_output_values[i] = ((log((1.0-ann->activation_output_results[i]) / (ann->activation_output_results[i]+1.0)) * 1.0/-2.0) * 1.0/ann->activation_output_steepness);
- break;
- }
-#else
- switch(ann->activation_function_output){
- case FANN_SIGMOID:
- case FANN_SIGMOID_STEPWISE:
- ann->activation_output_values[i] = (fann_type)((((log(ann->multiplier/(float)ann->activation_output_results[i] -1)*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_output_steepness);
- break;
- case FANN_SIGMOID_SYMMETRIC:
- case FANN_SIGMOID_SYMMETRIC_STEPWISE:
- ann->activation_output_values[i] = (fann_type)((((log((ann->multiplier - (float)ann->activation_output_results[i])/((float)ann->activation_output_results[i] + ann->multiplier))*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_output_steepness);
- break;
- }
-#endif
- }
-}
-
-
-/* Seed the random function.
- */
-void fann_seed_rand()
-{
- FILE *fp = fopen("/dev/urandom", "r");
- unsigned int foo;
- struct timeval t;
- if(!fp){
- gettimeofday(&t, NULL);
- foo = t.tv_usec;
-#ifdef DEBUG
- printf("unable to open /dev/urandom\n");
-#endif
- }else{
- fread(&foo, sizeof(foo), 1, fp);
- fclose(fp);
- }
- srand(foo);
-}
-
-/* Populate the error information
- */
-void fann_error(struct fann_error *errdat, const unsigned int errno, ...)
-{
- va_list ap;
- char * errstr;
-
- if (errdat != NULL) errdat->errno_f = errno;
-
- if(errdat != NULL && errdat->errstr != NULL){
- errstr = errdat->errstr;
- }else{
- errstr = (char *)malloc(FANN_ERRSTR_MAX);
- if(errstr == NULL){
- fprintf(stderr, "Unable to allocate memory.\n");
- return;
- }
- }
-
- va_start(ap, errno);
- switch ( errno ) {
- case FANN_E_NO_ERROR:
- break;
- case FANN_E_CANT_OPEN_CONFIG_R:
- vsnprintf(errstr, FANN_ERRSTR_MAX, "Unable to open configuration file \"%s\" for reading.\n", ap);
- break;
- case FANN_E_CANT_OPEN_CONFIG_W:
- vsnprintf(errstr, FANN_ERRSTR_MAX, "Unable to open configuration file \"%s\" for writing.\n", ap);
- break;
- case FANN_E_WRONG_CONFIG_VERSION:
- vsnprintf(errstr, FANN_ERRSTR_MAX, "Wrong version of configuration file, aborting read of configuration file \"%s\".\n", ap);
- break;
- case FANN_E_CANT_READ_CONFIG:
- vsnprintf(errstr, FANN_ERRSTR_MAX, "Error reading info from configuration file \"%s\".\n", ap);
- break;
- case FANN_E_CANT_READ_NEURON:
- vsnprintf(errstr, FANN_ERRSTR_MAX, "Error reading neuron info from configuration file \"%s\".\n", ap);
- break;
- case FANN_E_CANT_READ_CONNECTIONS:
- vsnprintf(errstr, FANN_ERRSTR_MAX, "Error reading connections from configuration file \"%s\".\n", ap);
- break;
- case FANN_E_WRONG_NUM_CONNECTIONS:
- vsnprintf(errstr, FANN_ERRSTR_MAX, "ERROR connections_so_far=%d, total_connections=%d\n", ap);
- break;
- case FANN_E_CANT_OPEN_TD_W:
- vsnprintf(errstr, FANN_ERRSTR_MAX, "Unable to open train data file \"%s\" for writing.\n", ap);
- break;
- case FANN_E_CANT_OPEN_TD_R:
- vsnprintf(errstr, FANN_ERRSTR_MAX, "Unable to open train data file \"%s\" for writing.\n", ap);
- break;
- case FANN_E_CANT_READ_TD:
- vsnprintf(errstr, FANN_ERRSTR_MAX, "Error reading info from train data file \"%s\", line: %d.\n", ap);
- break;
- case FANN_E_CANT_ALLOCATE_MEM:
- snprintf(errstr, FANN_ERRSTR_MAX, "Unable to allocate memory.\n");
- break;
- case FANN_E_CANT_TRAIN_ACTIVATION:
- snprintf(errstr, FANN_ERRSTR_MAX, "Unable to train with the selected activation function.\n");
- break;
- case FANN_E_CANT_USE_ACTIVATION:
- snprintf(errstr, FANN_ERRSTR_MAX, "Unable to use the selected activation function.\n");
- break;
- case FANN_E_TRAIN_DATA_MISMATCH:
- snprintf(errstr, FANN_ERRSTR_MAX, "Training data must be of equivalent structure.");
- break;
- default:
- vsnprintf(errstr, FANN_ERRSTR_MAX, "Unknown error.\n", ap);
- break;
- }
- va_end(ap);
-
- if ( errdat == NULL ) {
- fprintf(stderr, "FANN Error %d: %s", errno, errstr);
- } else {
- errdat->errstr = errstr;
- if ( errdat->error_log != NULL ) {
- fprintf(errdat->error_log, "FANN Error %d: %s", errno, errstr);
- }
- }
-}
-
-/* Create a network from a configuration file descriptor.
- */
-struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
-{
- unsigned int num_layers, layer_size, activation_function_hidden, activation_function_output, input_neuron, i;
-#ifdef FIXEDFANN
- unsigned int decimal_point, multiplier;
-#endif
- fann_type activation_hidden_steepness, activation_output_steepness;
- float learning_rate, connection_rate;
- struct fann_neuron *first_neuron, *neuron_it, *last_neuron, **connected_neurons;
- fann_type *weights;
- struct fann_layer *layer_it;
- struct fann *ann;
-
- char *read_version;
-
- read_version = (char *)calloc(strlen(FANN_CONF_VERSION"\n"), 1);
- if(read_version == NULL){
- fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
- return NULL;
- }
-
- fread(read_version, 1, strlen(FANN_CONF_VERSION"\n"), conf); /* reads version */
-
- /* compares the version information */
- if(strncmp(read_version, FANN_CONF_VERSION"\n", strlen(FANN_CONF_VERSION"\n")) != 0){
- fann_error(NULL, FANN_E_WRONG_CONFIG_VERSION, configuration_file);
- free(read_version);
- return NULL;
- }
-
- free(read_version);
-
-#ifdef FIXEDFANN
- if(fscanf(conf, "%u\n", &decimal_point) != 1){
- fann_error(NULL, FANN_E_CANT_READ_CONFIG, configuration_file);
- return NULL;
- }
- multiplier = 1 << decimal_point;
-#endif
-
- if(fscanf(conf, "%u %f %f %u %u "FANNSCANF" "FANNSCANF"\n", &num_layers, &learning_rate, &connection_rate, &activation_function_hidden, &activation_function_output, &activation_hidden_steepness, &activation_output_steepness) != 7){
- fann_error(NULL, FANN_E_CANT_READ_CONFIG, configuration_file);
- return NULL;
- }
-
- ann = fann_allocate_structure(learning_rate, num_layers);
- if(ann == NULL){
- return NULL;
- }
- ann->connection_rate = connection_rate;
-
-#ifdef FIXEDFANN
- ann->decimal_point = decimal_point;
- ann->multiplier = multiplier;
-#endif
-
- ann->activation_hidden_steepness = activation_hidden_steepness;
- ann->activation_output_steepness = activation_output_steepness;
- ann->activation_function_hidden = activation_function_hidden;
- ann->activation_function_output = activation_function_output;
- fann_update_stepwise_hidden(ann);
- fann_update_stepwise_output(ann);
-
-#ifdef DEBUG
- printf("creating network with learning rate %f\n", learning_rate);
- printf("input\n");
-#endif
-
- /* determine how many neurons there should be in each layer */
- for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
- if(fscanf(conf, "%u ", &layer_size) != 1){
- fann_error((struct fann_error *)ann, FANN_E_CANT_READ_NEURON, configuration_file);
- fann_destroy(ann);
- return NULL;
- }
- /* we do not allocate room here, but we make sure that
- last_neuron - first_neuron is the number of neurons */
- layer_it->first_neuron = NULL;
- layer_it->last_neuron = layer_it->first_neuron + layer_size;
- ann->total_neurons += layer_size;
-#ifdef DEBUG
- printf(" layer : %d neurons, 1 bias\n", layer_size);
-#endif
- }
-
- ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1;
- ann->num_output = ((ann->last_layer-1)->last_neuron - (ann->last_layer-1)->first_neuron) - 1;
-
- /* allocate room for the actual neurons */
- fann_allocate_neurons(ann);
- if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM){
- fann_destroy(ann);
- return NULL;
- }
-
- last_neuron = (ann->last_layer-1)->last_neuron;
- for(neuron_it = ann->first_layer->first_neuron;
- neuron_it != last_neuron; neuron_it++){
- if(fscanf(conf, "%u ", &neuron_it->num_connections) != 1){
- fann_error((struct fann_error *)ann, FANN_E_CANT_READ_NEURON, configuration_file);
- fann_destroy(ann);
- return NULL;
- }
- ann->total_connections += neuron_it->num_connections;
- }
-
- fann_allocate_connections(ann);
- if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM){
- fann_destroy(ann);
- return NULL;
- }
-
- connected_neurons = (ann->first_layer+1)->first_neuron->connected_neurons;
- weights = (ann->first_layer+1)->first_neuron->weights;
- first_neuron = ann->first_layer->first_neuron;
-
- for(i = 0; i < ann->total_connections; i++){
- if(fscanf(conf, "(%u "FANNSCANF") ", &input_neuron, &weights[i]) != 2){
- fann_error((struct fann_error *)ann, FANN_E_CANT_READ_CONNECTIONS, configuration_file);
- fann_destroy(ann);
- return NULL;
- }
- connected_neurons[i] = first_neuron+input_neuron;
- }
-
-#ifdef DEBUG
- printf("output\n");
-#endif
- return ann;
-}
-
-/* Reads training data from a file descriptor.
- */
-struct fann_train_data* fann_read_train_from_fd(FILE *file, char *filename)
-{
- unsigned int num_input, num_output, num_data, i, j;
- unsigned int line = 1;
- struct fann_train_data* data = (struct fann_train_data *)malloc(sizeof(struct fann_train_data));
-
- if(data == NULL){
- fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
- return NULL;
- }
-
- if(fscanf(file, "%u %u %u\n", &num_data, &num_input, &num_output) != 3){
- fann_error(NULL, FANN_E_CANT_READ_TD, filename, line);
- fann_destroy_train(data);
- return NULL;
- }
- line++;
-
- fann_init_error_data((struct fann_error *)data);
-
- data->num_data = num_data;
- data->num_input = num_input;
- data->num_output = num_output;
- data->input = (fann_type **)calloc(num_data, sizeof(fann_type *));
- if(data->input == NULL){
- fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
- fann_destroy_train(data);
- return NULL;
- }
-
- data->output = (fann_type **)calloc(num_data, sizeof(fann_type *));
- if(data->output == NULL){
- fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
- fann_destroy_train(data);
- return NULL;
- }
-
- for(i = 0; i != num_data; i++){
- data->input[i] = (fann_type *)calloc(num_input, sizeof(fann_type));
- if(data->input[i] == NULL){
- fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
- fann_destroy_train(data);
- return NULL;
- }
-
- for(j = 0; j != num_input; j++){
- if(fscanf(file, FANNSCANF" ", &data->input[i][j]) != 1){
- fann_error(NULL, FANN_E_CANT_READ_TD, filename, line);
- fann_destroy_train(data);
- return NULL;
- }
- }
- line++;
-
- data->output[i] = (fann_type *)calloc(num_output, sizeof(fann_type));
- if(data->output[i] == NULL){
- fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
- fann_destroy_train(data);
- return NULL;
- }
-
- for(j = 0; j != num_output; j++){
- if(fscanf(file, FANNSCANF" ", &data->output[i][j]) != 1){
- fann_error(NULL, FANN_E_CANT_READ_TD, filename, line);
- fann_destroy_train(data);
- return NULL;
- }
- }
- line++;
- }
- return data;
-}
-
-/* Initialize an error data strcuture
- */
-void fann_init_error_data(struct fann_error *errdat)
-{
- errdat->error_value = 0;
- errdat->errstr = NULL;
- errdat->errno_f = 0;
- errdat->error_log = stderr;
-}
diff --git a/src/fann_io.c b/src/fann_io.c
new file mode 100644
index 0000000..c9e7611
--- /dev/null
+++ b/src/fann_io.c
@@ -0,0 +1,409 @@
+/*
+ Fast Artificial Neural Network Library (fann)
+ Copyright (C) 2003 Steffen Nissen (lukesky at diku.dk)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include "config.h"
+#include "fann.h"
+#include "fann_errno.h"
+
+/* Create a network from a configuration file.
+ */
+struct fann * fann_create_from_file(const char *configuration_file)
+{
+ struct fann *ann;
+ FILE *conf = fopen(configuration_file, "r");
+ if(!conf){
+ fann_error(NULL, FANN_E_CANT_OPEN_CONFIG_R, configuration_file);
+ return NULL;
+ }
+ ann = fann_create_from_fd(conf, configuration_file);
+ fclose(conf);
+ return ann;
+}
+
+/* Save the network.
+ */
+void fann_save(struct fann *ann, const char *configuration_file)
+{
+ fann_save_internal(ann, configuration_file, 0);
+}
+
+/* Save the network as fixed point data.
+ */
+int fann_save_to_fixed(struct fann *ann, const char *configuration_file)
+{
+ return fann_save_internal(ann, configuration_file, 1);
+}
+
+/* INTERNAL FUNCTION
+ Used to save the network to a file.
+ */
+int fann_save_internal(struct fann *ann, const char *configuration_file, unsigned int save_as_fixed)
+{
+ int retval;
+ FILE *conf = fopen(configuration_file, "w+");
+ if(!conf){
+ fann_error(NULL, FANN_E_CANT_OPEN_CONFIG_W, configuration_file);
+ return -1;
+ }
+ retval = fann_save_internal_fd(ann, conf, configuration_file, save_as_fixed);
+ fclose(conf);
+ return retval;
+}
+
+/* INTERNAL FUNCTION
+ Used to save the network to a file descriptor.
+ */
+int fann_save_internal_fd(struct fann *ann, FILE *conf, const char *configuration_file, unsigned int save_as_fixed)
+{
+ struct fann_layer *layer_it;
+ int calculated_decimal_point = 0;
+ struct fann_neuron *neuron_it, *first_neuron;
+ fann_type *weights;
+ struct fann_neuron **connected_neurons;
+ unsigned int i = 0;
+#ifndef FIXEDFANN
+ /* variabels for use when saving floats as fixed point variabels */
+ unsigned int decimal_point = 0;
+ unsigned int fixed_multiplier = 0;
+ fann_type max_possible_value = 0;
+ unsigned int bits_used_for_max = 0;
+ fann_type current_max_value = 0;
+#endif
+
+#ifndef FIXEDFANN
+ if(save_as_fixed){
+ /* save the version information */
+ fprintf(conf, FANN_FIX_VERSION"\n");
+ }else{
+ /* save the version information */
+ fprintf(conf, FANN_FLO_VERSION"\n");
+ }
+#else
+ /* save the version information */
+ fprintf(conf, FANN_FIX_VERSION"\n");
+#endif
+
+#ifndef FIXEDFANN
+ if(save_as_fixed){
+ /* calculate the maximal possible shift value */
+
+ for(layer_it = ann->first_layer+1; layer_it != ann->last_layer; layer_it++){
+ for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++){
+ /* look at all connections to each neurons, and see how high a value we can get */
+ current_max_value = 0;
+ for(i = 0; i != neuron_it->num_connections; i++){
+ current_max_value += fann_abs(neuron_it->weights[i]);
+ }
+
+ if(current_max_value > max_possible_value){
+ max_possible_value = current_max_value;
+ }
+ }
+ }
+
+ for(bits_used_for_max = 0; max_possible_value >= 1; bits_used_for_max++){
+ max_possible_value /= 2.0;
+ }
+
+ /* The maximum number of bits we shift the fix point, is the number
+ of bits in a integer, minus one for the sign, one for the minus
+ in stepwise, and minus the bits used for the maximum.
+ This is devided by two, to allow multiplication of two fixed
+ point numbers.
+ */
+ calculated_decimal_point = (sizeof(int)*8-2-bits_used_for_max)/2;
+
+ if(calculated_decimal_point < 0){
+ decimal_point = 0;
+ }else{
+ decimal_point = calculated_decimal_point;
+ }
+
+ fixed_multiplier = 1 << decimal_point;
+
+#ifdef DEBUG
+ printf("calculated_decimal_point=%d, decimal_point=%u, bits_used_for_max=%u\n", calculated_decimal_point, decimal_point, bits_used_for_max);
+#endif
+
+ /* save the decimal_point on a seperate line */
+ fprintf(conf, "%u\n", decimal_point);
+
+ /* save the number layers "num_layers learning_rate connection_rate activation_function_hidden activation_function_output activation_hidden_steepness activation_output_steepness" */
+ fprintf(conf, "%u %f %f %u %u %d %d\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->activation_function_hidden, ann->activation_function_output, (int)(ann->activation_hidden_steepness * fixed_multiplier), (int)(ann->activation_output_steepness * fixed_multiplier));
+ }else{
+ /* save the number layers "num_layers learning_rate connection_rate activation_function_hidden activation_function_output activation_hidden_steepness activation_output_steepness" */
+ fprintf(conf, "%u %f %f %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->activation_function_hidden, ann->activation_function_output, ann->activation_hidden_steepness, ann->activation_output_steepness);
+ }
+#else
+ /* save the decimal_point on a seperate line */
+ fprintf(conf, "%u\n", ann->decimal_point);
+
+ /* save the number layers "num_layers learning_rate connection_rate activation_function_hidden activation_function_output activation_hidden_steepness activation_output_steepness" */
+ fprintf(conf, "%u %f %f %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->activation_function_hidden, ann->activation_function_output, ann->activation_hidden_steepness, ann->activation_output_steepness);
+#endif
+
+ for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
+ /* the number of neurons in the layers (in the last layer, there is always one too many neurons, because of an unused bias) */
+ fprintf(conf, "%u ", layer_it->last_neuron - layer_it->first_neuron);
+ }
+ fprintf(conf, "\n");
+
+
+ for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
+ /* the number of connections to each neuron */
+ for(neuron_it = layer_it->first_neuron; neuron_it != layer_it->last_neuron; neuron_it++){
+ fprintf(conf, "%u ", neuron_it->num_connections);
+ }
+ fprintf(conf, "\n");
+ }
+
+ connected_neurons = (ann->first_layer+1)->first_neuron->connected_neurons;
+ weights = (ann->first_layer+1)->first_neuron->weights;
+ first_neuron = ann->first_layer->first_neuron;
+
+ /* Now save all the connections.
+ We only need to save the source and the weight,
+ since the destination is given by the order.
+
+ The weight is not saved binary due to differences
+ in binary definition of floating point numbers.
+ Especially an iPAQ does not use the same binary
+ representation as an i386 machine.
+ */
+ for(i = 0; i < ann->total_connections; i++){
+#ifndef FIXEDFANN
+ if(save_as_fixed){
+ /* save the connection "(source weight) "*/
+ fprintf(conf, "(%u %d) ",
+ connected_neurons[i] - first_neuron,
+ (int)floor((weights[i]*fixed_multiplier) + 0.5));
+ }else{
+ /* save the connection "(source weight) "*/
+ fprintf(conf, "(%u "FANNPRINTF") ",
+ connected_neurons[i] - first_neuron, weights[i]);
+ }
+#else
+ /* save the connection "(source weight) "*/
+ fprintf(conf, "(%u "FANNPRINTF") ",
+ connected_neurons[i] - first_neuron, weights[i]);
+#endif
+
+ }
+ fprintf(conf, "\n");
+
+ return calculated_decimal_point;
+}
+
+/* INTERNAL FUNCTION
+ Save the train data structure.
+ */
+void fann_save_train_internal(struct fann_train_data* data, char *filename, unsigned int save_as_fixed, unsigned int decimal_point)
+{
+ FILE *file = fopen(filename, "w");
+ if(!file){
+ fann_error(NULL, FANN_E_CANT_OPEN_TD_W, filename);
+ return;
+ }
+ fann_save_train_internal_fd(data, file, filename, save_as_fixed, decimal_point);
+ fclose(file);
+}
+
+/* INTERNAL FUNCTION
+ Save the train data structure.
+ */
+void fann_save_train_internal_fd(struct fann_train_data* data, FILE *file, char *filename, unsigned int save_as_fixed, unsigned int decimal_point)
+{
+ unsigned int num_data = data->num_data;
+ unsigned int num_input = data->num_input;
+ unsigned int num_output = data->num_output;
+ unsigned int i, j;
+#ifndef FIXEDFANN
+ unsigned int multiplier = 1 << decimal_point;
+#endif
+
+ fprintf(file, "%u %u %u\n", data->num_data, data->num_input, data->num_output);
+
+ for(i = 0; i < num_data; i++){
+ for(j = 0; j < num_input; j++){
+#ifndef FIXEDFANN
+ if(save_as_fixed){
+ fprintf(file, "%d ", (int)(data->input[i][j]*multiplier));
+ }else{
+ fprintf(file, FANNPRINTF" ", data->input[i][j]);
+ }
+#else
+ fprintf(file, FANNPRINTF" ", data->input[i][j]);
+#endif
+ }
+ fprintf(file, "\n");
+
+ for(j = 0; j < num_output; j++){
+#ifndef FIXEDFANN
+ if(save_as_fixed){
+ fprintf(file, "%d ", (int)(data->output[i][j]*multiplier));
+ }else{
+ fprintf(file, FANNPRINTF" ", data->output[i][j]);
+ }
+#else
+ fprintf(file, FANNPRINTF" ", data->output[i][j]);
+#endif
+ }
+ fprintf(file, "\n");
+ }
+}
+
+/* INTERNAL FUNCTION
+ Create a network from a configuration file descriptor.
+ */
+struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
+{
+ unsigned int num_layers, layer_size, activation_function_hidden, activation_function_output, input_neuron, i;
+#ifdef FIXEDFANN
+ unsigned int decimal_point, multiplier;
+#endif
+ fann_type activation_hidden_steepness, activation_output_steepness;
+ float learning_rate, connection_rate;
+ struct fann_neuron *first_neuron, *neuron_it, *last_neuron, **connected_neurons;
+ fann_type *weights;
+ struct fann_layer *layer_it;
+ struct fann *ann;
+
+ char *read_version;
+
+ read_version = (char *)calloc(strlen(FANN_CONF_VERSION"\n"), 1);
+ if(read_version == NULL){
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ return NULL;
+ }
+
+ fread(read_version, 1, strlen(FANN_CONF_VERSION"\n"), conf); /* reads version */
+
+ /* compares the version information */
+ if(strncmp(read_version, FANN_CONF_VERSION"\n", strlen(FANN_CONF_VERSION"\n")) != 0){
+ fann_error(NULL, FANN_E_WRONG_CONFIG_VERSION, configuration_file);
+ free(read_version);
+ return NULL;
+ }
+
+ free(read_version);
+
+#ifdef FIXEDFANN
+ if(fscanf(conf, "%u\n", &decimal_point) != 1){
+ fann_error(NULL, FANN_E_CANT_READ_CONFIG, configuration_file);
+ return NULL;
+ }
+ multiplier = 1 << decimal_point;
+#endif
+
+ if(fscanf(conf, "%u %f %f %u %u "FANNSCANF" "FANNSCANF"\n", &num_layers, &learning_rate, &connection_rate, &activation_function_hidden, &activation_function_output, &activation_hidden_steepness, &activation_output_steepness) != 7){
+ fann_error(NULL, FANN_E_CANT_READ_CONFIG, configuration_file);
+ return NULL;
+ }
+
+ ann = fann_allocate_structure(learning_rate, num_layers);
+ if(ann == NULL){
+ return NULL;
+ }
+ ann->connection_rate = connection_rate;
+
+#ifdef FIXEDFANN
+ ann->decimal_point = decimal_point;
+ ann->multiplier = multiplier;
+#endif
+
+ ann->activation_hidden_steepness = activation_hidden_steepness;
+ ann->activation_output_steepness = activation_output_steepness;
+ ann->activation_function_hidden = activation_function_hidden;
+ ann->activation_function_output = activation_function_output;
+ fann_update_stepwise_hidden(ann);
+ fann_update_stepwise_output(ann);
+
+#ifdef DEBUG
+ printf("creating network with learning rate %f\n", learning_rate);
+ printf("input\n");
+#endif
+
+ /* determine how many neurons there should be in each layer */
+ for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
+ if(fscanf(conf, "%u ", &layer_size) != 1){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_READ_NEURON, configuration_file);
+ fann_destroy(ann);
+ return NULL;
+ }
+ /* we do not allocate room here, but we make sure that
+ last_neuron - first_neuron is the number of neurons */
+ layer_it->first_neuron = NULL;
+ layer_it->last_neuron = layer_it->first_neuron + layer_size;
+ ann->total_neurons += layer_size;
+#ifdef DEBUG
+ printf(" layer : %d neurons, 1 bias\n", layer_size);
+#endif
+ }
+
+ ann->num_input = ann->first_layer->last_neuron - ann->first_layer->first_neuron - 1;
+ ann->num_output = ((ann->last_layer-1)->last_neuron - (ann->last_layer-1)->first_neuron) - 1;
+
+ /* allocate room for the actual neurons */
+ fann_allocate_neurons(ann);
+ if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM){
+ fann_destroy(ann);
+ return NULL;
+ }
+
+ last_neuron = (ann->last_layer-1)->last_neuron;
+ for(neuron_it = ann->first_layer->first_neuron;
+ neuron_it != last_neuron; neuron_it++){
+ if(fscanf(conf, "%u ", &neuron_it->num_connections) != 1){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_READ_NEURON, configuration_file);
+ fann_destroy(ann);
+ return NULL;
+ }
+ ann->total_connections += neuron_it->num_connections;
+ }
+
+ fann_allocate_connections(ann);
+ if(ann->errno_f == FANN_E_CANT_ALLOCATE_MEM){
+ fann_destroy(ann);
+ return NULL;
+ }
+
+ connected_neurons = (ann->first_layer+1)->first_neuron->connected_neurons;
+ weights = (ann->first_layer+1)->first_neuron->weights;
+ first_neuron = ann->first_layer->first_neuron;
+
+ for(i = 0; i < ann->total_connections; i++){
+ if(fscanf(conf, "(%u "FANNSCANF") ", &input_neuron, &weights[i]) != 2){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_READ_CONNECTIONS, configuration_file);
+ fann_destroy(ann);
+ return NULL;
+ }
+ connected_neurons[i] = first_neuron+input_neuron;
+ }
+
+#ifdef DEBUG
+ printf("output\n");
+#endif
+ return ann;
+}
+
diff --git a/src/fann_options.c b/src/fann_options.c
new file mode 100644
index 0000000..238c872
--- /dev/null
+++ b/src/fann_options.c
@@ -0,0 +1,307 @@
+/*
+ Fast Artificial Neural Network Library (fann)
+ Copyright (C) 2003 Steffen Nissen (lukesky at diku.dk)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include "config.h"
+#include "fann.h"
+#include "fann_errno.h"
+
+void fann_set_learning_rate(struct fann *ann, float learning_rate)
+{
+ ann->learning_rate = learning_rate;
+}
+
+void fann_set_activation_function_hidden(struct fann *ann, unsigned int activation_function)
+{
+ ann->activation_function_hidden = activation_function;
+ fann_update_stepwise_hidden(ann);
+}
+
+void fann_set_activation_function_output(struct fann *ann, unsigned int activation_function)
+{
+ ann->activation_function_output = activation_function;
+ fann_update_stepwise_output(ann);
+}
+
+void fann_set_activation_hidden_steepness(struct fann *ann, fann_type steepness)
+{
+ ann->activation_hidden_steepness = steepness;
+ fann_update_stepwise_hidden(ann);
+}
+
+void fann_set_activation_output_steepness(struct fann *ann, fann_type steepness)
+{
+ ann->activation_output_steepness = steepness;
+ fann_update_stepwise_output(ann);
+}
+
+float fann_get_learning_rate(struct fann *ann)
+{
+ return ann->learning_rate;
+}
+
+unsigned int fann_get_num_input(struct fann *ann)
+{
+ return ann->num_input;
+}
+
+unsigned int fann_get_num_output(struct fann *ann)
+{
+ return ann->num_output;
+}
+
+unsigned int fann_get_activation_function_hidden(struct fann *ann)
+{
+ return ann->activation_function_hidden;
+}
+
+unsigned int fann_get_activation_function_output(struct fann *ann)
+{
+ return ann->activation_function_output;
+}
+
+fann_type fann_get_activation_hidden_steepness(struct fann *ann)
+{
+ return ann->activation_hidden_steepness;
+}
+
+fann_type fann_get_activation_output_steepness(struct fann *ann)
+{
+ return ann->activation_output_steepness;
+}
+
+unsigned int fann_get_total_neurons(struct fann *ann)
+{
+ /* -1, because there is always an unused bias neuron in the last layer */
+ return ann->total_neurons - 1;
+}
+
+unsigned int fann_get_total_connections(struct fann *ann)
+{
+ return ann->total_connections;
+}
+
+#ifdef FIXEDFANN
+/* returns the position of the fix point.
+ */
+unsigned int fann_get_decimal_point(struct fann *ann)
+{
+ return ann->decimal_point;
+}
+
+/* returns the multiplier that fix point data is multiplied with.
+ */
+unsigned int fann_get_multiplier(struct fann *ann)
+{
+ return ann->multiplier;
+}
+
+#endif
+
+/* INTERNAL FUNCTION
+ Adjust the steepwise functions (if used)
+*/
+void fann_update_stepwise_hidden(struct fann *ann)
+{
+ unsigned int i = 0;
+#ifndef FIXEDFANN
+ /* For use in stepwise linear activation function.
+ results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
+ */
+ switch(ann->activation_function_hidden){
+ case FANN_SIGMOID:
+ case FANN_SIGMOID_STEPWISE:
+ ann->activation_hidden_results[0] = 0.005;
+ ann->activation_hidden_results[1] = 0.05;
+ ann->activation_hidden_results[2] = 0.25;
+ ann->activation_hidden_results[3] = 0.75;
+ ann->activation_hidden_results[4] = 0.95;
+ ann->activation_hidden_results[5] = 0.995;
+ break;
+ case FANN_SIGMOID_SYMMETRIC:
+ case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+ ann->activation_hidden_results[0] = -0.99;
+ ann->activation_hidden_results[1] = -0.9;
+ ann->activation_hidden_results[2] = -0.5;
+ ann->activation_hidden_results[3] = 0.5;
+ ann->activation_hidden_results[4] = 0.9;
+ ann->activation_hidden_results[5] = 0.99;
+ break;
+ default:
+ /* the actiavation functions which do not have a stepwise function
+ should not have it calculated */
+ return;
+ }
+#else
+ /* Calculate the parameters for the stepwise linear
+ sigmoid function fixed point.
+ Using a rewritten sigmoid function.
+ results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
+ */
+ switch(ann->activation_function_hidden){
+ case FANN_SIGMOID:
+ case FANN_SIGMOID_STEPWISE:
+ ann->activation_hidden_results[0] = (fann_type)(ann->multiplier/200.0+0.5);
+ ann->activation_hidden_results[1] = (fann_type)(ann->multiplier/20.0+0.5);
+ ann->activation_hidden_results[2] = (fann_type)(ann->multiplier/4.0+0.5);
+ ann->activation_hidden_results[3] = ann->multiplier - (fann_type)(ann->multiplier/4.0+0.5);
+ ann->activation_hidden_results[4] = ann->multiplier - (fann_type)(ann->multiplier/20.0+0.5);
+ ann->activation_hidden_results[5] = ann->multiplier - (fann_type)(ann->multiplier/200.0+0.5);
+ break;
+ case FANN_SIGMOID_SYMMETRIC:
+ case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+ ann->activation_hidden_results[0] = (fann_type)((ann->multiplier/100.0) - ann->multiplier + 0.5);
+ ann->activation_hidden_results[1] = (fann_type)((ann->multiplier/10.0) - ann->multiplier + 0.5);
+ ann->activation_hidden_results[2] = (fann_type)((ann->multiplier/2.0) - ann->multiplier + 0.5);
+ ann->activation_hidden_results[3] = ann->multiplier - (fann_type)(ann->multiplier/2.0+0.5);
+ ann->activation_hidden_results[4] = ann->multiplier - (fann_type)(ann->multiplier/10.0+0.5);
+ ann->activation_hidden_results[5] = ann->multiplier - (fann_type)(ann->multiplier/100.0+0.5);
+ break;
+ default:
+ /* the actiavation functions which do not have a stepwise function
+ should not have it calculated */
+ return;
+ }
+#endif
+
+ for(i = 0; i < 6; i++){
+#ifndef FIXEDFANN
+ switch(ann->activation_function_hidden){
+ case FANN_SIGMOID:
+ break;
+ case FANN_SIGMOID_STEPWISE:
+ ann->activation_hidden_values[i] = ((log(1.0/ann->activation_hidden_results[i] -1.0) * 1.0/-2.0) * 1.0/ann->activation_hidden_steepness);
+ break;
+ case FANN_SIGMOID_SYMMETRIC:
+ case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+ ann->activation_hidden_values[i] = ((log((1.0-ann->activation_hidden_results[i]) / (ann->activation_hidden_results[i]+1.0)) * 1.0/-2.0) * 1.0/ann->activation_hidden_steepness);
+ break;
+ }
+#else
+ switch(ann->activation_function_hidden){
+ case FANN_SIGMOID:
+ case FANN_SIGMOID_STEPWISE:
+ ann->activation_hidden_values[i] = (fann_type)((((log(ann->multiplier/(float)ann->activation_hidden_results[i] -1)*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_hidden_steepness);
+ break;
+ case FANN_SIGMOID_SYMMETRIC:
+ case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+ ann->activation_hidden_values[i] = (fann_type)((((log((ann->multiplier - (float)ann->activation_hidden_results[i])/((float)ann->activation_hidden_results[i] + ann->multiplier))*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_hidden_steepness);
+ break;
+ }
+#endif
+ }
+}
+
+/* INTERNAL FUNCTION
+ Adjust the steepwise functions (if used)
+*/
+void fann_update_stepwise_output(struct fann *ann)
+{
+ unsigned int i = 0;
+#ifndef FIXEDFANN
+ /* For use in stepwise linear activation function.
+ results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
+ */
+ switch(ann->activation_function_output){
+ case FANN_SIGMOID:
+ case FANN_SIGMOID_STEPWISE:
+ ann->activation_output_results[0] = 0.005;
+ ann->activation_output_results[1] = 0.05;
+ ann->activation_output_results[2] = 0.25;
+ ann->activation_output_results[3] = 0.75;
+ ann->activation_output_results[4] = 0.95;
+ ann->activation_output_results[5] = 0.995;
+ break;
+ case FANN_SIGMOID_SYMMETRIC:
+ case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+ ann->activation_output_results[0] = -0.99;
+ ann->activation_output_results[1] = -0.9;
+ ann->activation_output_results[2] = -0.5;
+ ann->activation_output_results[3] = 0.5;
+ ann->activation_output_results[4] = 0.9;
+ ann->activation_output_results[5] = 0.99;
+ break;
+ default:
+ /* the actiavation functions which do not have a stepwise function
+ should not have it calculated */
+ return;
+ }
+#else
+ /* Calculate the parameters for the stepwise linear
+ sigmoid function fixed point.
+ Using a rewritten sigmoid function.
+ results 0.005, 0.05, 0.25, 0.75, 0.95, 0.995
+ */
+ switch(ann->activation_function_output){
+ case FANN_SIGMOID:
+ case FANN_SIGMOID_STEPWISE:
+ ann->activation_output_results[0] = (fann_type)(ann->multiplier/200.0+0.5);
+ ann->activation_output_results[1] = (fann_type)(ann->multiplier/20.0+0.5);
+ ann->activation_output_results[2] = (fann_type)(ann->multiplier/4.0+0.5);
+ ann->activation_output_results[3] = ann->multiplier - (fann_type)(ann->multiplier/4.0+0.5);
+ ann->activation_output_results[4] = ann->multiplier - (fann_type)(ann->multiplier/20.0+0.5);
+ ann->activation_output_results[5] = ann->multiplier - (fann_type)(ann->multiplier/200.0+0.5);
+ break;
+ case FANN_SIGMOID_SYMMETRIC:
+ case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+ ann->activation_output_results[0] = (fann_type)((ann->multiplier/100.0) - ann->multiplier + 0.5);
+ ann->activation_output_results[1] = (fann_type)((ann->multiplier/10.0) - ann->multiplier + 0.5);
+ ann->activation_output_results[2] = (fann_type)((ann->multiplier/2.0) - ann->multiplier + 0.5);
+ ann->activation_output_results[3] = ann->multiplier - (fann_type)(ann->multiplier/2.0+0.5);
+ ann->activation_output_results[4] = ann->multiplier - (fann_type)(ann->multiplier/10.0+0.5);
+ ann->activation_output_results[5] = ann->multiplier - (fann_type)(ann->multiplier/100.0+0.5);
+ break;
+ default:
+ /* the actiavation functions which do not have a stepwise function
+ should not have it calculated */
+ return;
+ }
+#endif
+
+ for(i = 0; i < 6; i++){
+#ifndef FIXEDFANN
+ switch(ann->activation_function_output){
+ case FANN_SIGMOID:
+ break;
+ case FANN_SIGMOID_STEPWISE:
+ ann->activation_output_values[i] = ((log(1.0/ann->activation_output_results[i] -1.0) * 1.0/-2.0) * 1.0/ann->activation_output_steepness);
+ break;
+ case FANN_SIGMOID_SYMMETRIC:
+ case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+ ann->activation_output_values[i] = ((log((1.0-ann->activation_output_results[i]) / (ann->activation_output_results[i]+1.0)) * 1.0/-2.0) * 1.0/ann->activation_output_steepness);
+ break;
+ }
+#else
+ switch(ann->activation_function_output){
+ case FANN_SIGMOID:
+ case FANN_SIGMOID_STEPWISE:
+ ann->activation_output_values[i] = (fann_type)((((log(ann->multiplier/(float)ann->activation_output_results[i] -1)*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_output_steepness);
+ break;
+ case FANN_SIGMOID_SYMMETRIC:
+ case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+ ann->activation_output_values[i] = (fann_type)((((log((ann->multiplier - (float)ann->activation_output_results[i])/((float)ann->activation_output_results[i] + ann->multiplier))*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_output_steepness);
+ break;
+ }
+#endif
+ }
+}
diff --git a/src/fann_train.c b/src/fann_train.c
new file mode 100644
index 0000000..7091894
--- /dev/null
+++ b/src/fann_train.c
@@ -0,0 +1,268 @@
+/*
+ Fast Artificial Neural Network Library (fann)
+ Copyright (C) 2003 Steffen Nissen (lukesky at diku.dk)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include "config.h"
+#include "fann.h"
+#include "fann_errno.h"
+
+#ifndef FIXEDFANN
+/* Trains the network with the backpropagation algorithm.
+ */
+void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
+{
+ struct fann_neuron *neuron_it, *last_neuron, *neurons;
+ fann_type neuron_value, *delta_it, *delta_begin, tmp_delta;
+ struct fann_layer *layer_it;
+ unsigned int i, shift_prev_layer;
+
+ /* store some variabels local for fast access */
+ const float learning_rate = ann->learning_rate;
+ const fann_type activation_output_steepness = ann->activation_output_steepness;
+ const fann_type activation_hidden_steepness = ann->activation_hidden_steepness;
+ const struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
+
+ const struct fann_neuron *last_layer_begin = (ann->last_layer-1)->first_neuron;
+ const struct fann_neuron *last_layer_end = last_layer_begin + ann->num_output;
+ struct fann_layer *first_layer = ann->first_layer;
+ struct fann_layer *last_layer = ann->last_layer;
+
+ fann_run(ann, input);
+ /* if no room allocated for the delta variabels, allocate it now */
+ if(ann->train_deltas == NULL){
+ ann->train_deltas = (fann_type *)calloc(ann->total_neurons, sizeof(fann_type));
+ if(ann->train_deltas == NULL){
+ fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+ return;
+ }
+ }
+ delta_begin = ann->train_deltas;
+
+ /* clear the delta variabels */
+ memset(delta_begin, 0, (ann->total_neurons) * sizeof(fann_type));
+
+#ifdef DEBUGTRAIN
+ printf("calculate deltas\n");
+#endif
+ /* calculate the error and place it in the output layer */
+ delta_it = delta_begin + (last_layer_begin - first_neuron);
+
+ for(; last_layer_begin != last_layer_end; last_layer_begin++){
+ neuron_value = last_layer_begin->value;
+ switch(ann->activation_function_output){
+ case FANN_LINEAR:
+ *delta_it = fann_linear_derive(activation_output_steepness, neuron_value) * (*desired_output - neuron_value);
+ break;
+ case FANN_SIGMOID:
+ case FANN_SIGMOID_STEPWISE:
+ *delta_it = fann_sigmoid_derive(activation_output_steepness, neuron_value) * (*desired_output - neuron_value);
+ break;
+ case FANN_SIGMOID_SYMMETRIC:
+ case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+ *delta_it = fann_sigmoid_symmetric_derive(activation_output_steepness, neuron_value) * (*desired_output - neuron_value);
+ break;
+ default:
+ fann_error((struct fann_error *)ann, FANN_E_CANT_TRAIN_ACTIVATION);
+ return;
+ }
+
+ ann->error_value += (*desired_output - neuron_value) * (*desired_output - neuron_value);
+
+#ifdef DEBUGTRAIN
+ printf("delta1[%d] = "FANNPRINTF"\n", (delta_it - delta_begin), *delta_it);
+#endif
+ desired_output++;
+ delta_it++;
+ }
+ ann->num_errors++;
+
+
+ /* go through all the layers, from last to first. And propagate the error backwards */
+ for(layer_it = last_layer-1; layer_it != first_layer; --layer_it){
+ last_neuron = layer_it->last_neuron;
+
+ /* for each connection in this layer, propagate the error backwards*/
+ if(ann->connection_rate == 1){ /* optimization for fully connected networks */
+ shift_prev_layer = (layer_it-1)->first_neuron - first_neuron;
+ for(neuron_it = layer_it->first_neuron;
+ neuron_it != last_neuron; neuron_it++){
+ tmp_delta = *(delta_begin + (neuron_it - first_neuron));
+ for(i = 0; i < neuron_it->num_connections; i++){
+ *(delta_begin + i + shift_prev_layer) += tmp_delta * neuron_it->weights[i];
+#ifdef DEBUGTRAIN
+ printf("delta2[%d] = "FANNPRINTF" += ("FANNPRINTF" * "FANNPRINTF")\n", (i + shift_prev_layer), *(delta_begin + i + shift_prev_layer), tmp_delta, neuron_it->weights[i]);
+#endif
+ }
+ }
+ }else{
+ for(neuron_it = layer_it->first_neuron;
+ neuron_it != last_neuron; neuron_it++){
+ tmp_delta = *(delta_begin + (neuron_it - first_neuron));
+ for(i = 0; i < neuron_it->num_connections; i++){
+ *(delta_begin + (neuron_it->connected_neurons[i] - first_neuron)) +=
+ tmp_delta * neuron_it->weights[i];
+ }
+ }
+ }
+
+ /* then calculate the actual errors in the previous layer */
+ delta_it = delta_begin + ((layer_it-1)->first_neuron - first_neuron);
+ last_neuron = (layer_it-1)->last_neuron;
+
+ switch(ann->activation_function_hidden){
+ case FANN_LINEAR:
+ for(neuron_it = (layer_it-1)->first_neuron;
+ neuron_it != last_neuron; neuron_it++){
+ neuron_value = neuron_it->value;
+ *delta_it *= fann_linear_derive(activation_hidden_steepness, neuron_value) * learning_rate;
+ delta_it++;
+ }
+ break;
+ case FANN_SIGMOID:
+ case FANN_SIGMOID_STEPWISE:
+ for(neuron_it = (layer_it-1)->first_neuron;
+ neuron_it != last_neuron; neuron_it++){
+ neuron_value = neuron_it->value;
+ neuron_value = fann_clip(neuron_value, 0.01, 0.99);
+ *delta_it *= fann_sigmoid_derive(activation_hidden_steepness, neuron_value);
+#ifdef DEBUGTRAIN
+ printf("delta3[%d] = "FANNPRINTF" *= fann_sigmoid_derive(%f, %f) * %f\n", (delta_it - delta_begin), *delta_it, activation_hidden_steepness, neuron_value, learning_rate);
+#endif
+ delta_it++;
+ }
+ break;
+ case FANN_SIGMOID_SYMMETRIC:
+ case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+ for(neuron_it = (layer_it-1)->first_neuron;
+ neuron_it != last_neuron; neuron_it++){
+ neuron_value = neuron_it->value;
+ neuron_value = fann_clip(neuron_value, -0.98, 0.98);
+ *delta_it *= fann_sigmoid_symmetric_derive(activation_hidden_steepness, neuron_value);
+#ifdef DEBUGTRAIN
+ printf("delta3[%d] = "FANNPRINTF" *= fann_sigmoid_symmetric_derive(%f, %f) * %f\n", (delta_it - delta_begin), *delta_it, activation_hidden_steepness, neuron_value, learning_rate);
+#endif
+ delta_it++;
+ }
+ break;
+ default:
+ fann_error((struct fann_error *)ann, FANN_E_CANT_TRAIN_ACTIVATION);
+ return;
+ }
+ }
+
+#ifdef DEBUGTRAIN
+ printf("\nupdate weights\n");
+#endif
+
+ for(layer_it = (first_layer+1); layer_it != last_layer; layer_it++){
+#ifdef DEBUGTRAIN
+ printf("layer[%d]\n", layer_it - first_layer);
+#endif
+ last_neuron = layer_it->last_neuron;
+ if(ann->connection_rate == 1){ /* optimization for fully connected networks */
+ neurons = (layer_it-1)->first_neuron;
+ for(neuron_it = layer_it->first_neuron;
+ neuron_it != last_neuron; neuron_it++){
+ tmp_delta = *(delta_begin + (neuron_it - first_neuron));
+ for(i = 0; i < neuron_it->num_connections; i++){
+#ifdef DEBUGTRAIN
+ printf("weights[%d] += "FANNPRINTF" = %f * %f\n", i, tmp_delta * neurons[i].value, tmp_delta, neurons[i].value);
+#endif
+ neuron_it->weights[i] += learning_rate * tmp_delta * neurons[i].value;
+ }
+ }
+ }else{
+ for(neuron_it = layer_it->first_neuron;
+ neuron_it != last_neuron; neuron_it++){
+ tmp_delta = *(delta_begin + (neuron_it - first_neuron));
+ for(i = 0; i < neuron_it->num_connections; i++){
+ neuron_it->weights[i] += learning_rate * tmp_delta * neuron_it->connected_neurons[i]->value;
+ }
+ }
+ }
+ }
+}
+#endif
+
+/* Tests the network.
+ */
+fann_type *fann_test(struct fann *ann, fann_type *input, fann_type *desired_output)
+{
+ fann_type neuron_value;
+ fann_type *output_begin = fann_run(ann, input);
+ fann_type *output_it;
+ const fann_type *output_end = output_begin + ann->num_output;
+
+ /* calculate the error */
+ for(output_it = output_begin;
+ output_it != output_end; output_it++){
+ neuron_value = *output_it;
+
+#ifdef FIXEDFANN
+ ann->error_value += ((*desired_output - neuron_value)/(float)ann->multiplier) * ((*desired_output - neuron_value)/(float)ann->multiplier);
+#else
+ ann->error_value += (*desired_output - neuron_value) * (*desired_output - neuron_value);
+#endif
+
+ desired_output++;
+ }
+ ann->num_errors++;
+
+ return output_begin;
+}
+
+/* get the mean square error.
+ (obsolete will be removed at some point, use fann_get_MSE)
+ */
+float fann_get_error(struct fann *ann)
+{
+ return fann_get_MSE(ann);
+}
+
+/* get the mean square error.
+ */
+float fann_get_MSE(struct fann *ann)
+{
+ if(ann->num_errors){
+ return ann->error_value/(float)ann->num_errors;
+ }else{
+ return 0;
+ }
+}
+
+/* reset the mean square error.
+ (obsolete will be removed at some point, use fann_reset_MSE)
+ */
+void fann_reset_error(struct fann *ann)
+{
+ fann_reset_MSE(ann);
+}
+
+/* reset the mean square error.
+ */
+void fann_reset_MSE(struct fann *ann)
+{
+ ann->num_errors = 0;
+ ann->error_value = 0;
+}
+
diff --git a/src/fann_train_data.c b/src/fann_train_data.c
new file mode 100644
index 0000000..86f5a36
--- /dev/null
+++ b/src/fann_train_data.c
@@ -0,0 +1,330 @@
+/*
+ Fast Artificial Neural Network Library (fann)
+ Copyright (C) 2003 Steffen Nissen (lukesky at diku.dk)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include "config.h"
+#include "fann.h"
+#include "fann_errno.h"
+
+/* Reads training data from a file.
+ */
+struct fann_train_data* fann_read_train_from_file(char *configuration_file)
+{
+ struct fann_train_data* data;
+ FILE *file = fopen(configuration_file, "r");
+
+ if(!file){
+ fann_error(NULL, FANN_E_CANT_OPEN_CONFIG_R, configuration_file);
+ return NULL;
+ }
+
+ data = fann_read_train_from_fd(file, configuration_file);
+ fclose(file);
+ return data;
+}
+
+/* Save training data to a file
+ */
+void fann_save_train(struct fann_train_data* data, char *filename)
+{
+ fann_save_train_internal(data, filename, 0, 0);
+}
+
+/* Save training data to a file in fixed point algebra.
+ (Good for testing a network in fixed point)
+*/
+void fann_save_train_to_fixed(struct fann_train_data* data, char *filename, unsigned int decimal_point)
+{
+ fann_save_train_internal(data, filename, 1, decimal_point);
+}
+
+/* deallocate the train data structure.
+ */
+void fann_destroy_train(struct fann_train_data *data)
+{
+ unsigned int i;
+ if(data->input){
+ for(i = 0; i != data->num_data; i++){
+ fann_safe_free(data->input[i]);
+ }
+ }
+
+ if(data->output){
+ for(i = 0; i != data->num_data; i++){
+ fann_safe_free(data->output[i]);
+ }
+ }
+
+ fann_safe_free(data->input);
+ fann_safe_free(data->output);
+ fann_safe_free(data);
+}
+
+#ifndef FIXEDFANN
+
+/* Train directly on the training data.
+ */
+void fann_train_on_data_callback(struct fann *ann, struct fann_train_data *data, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error, int (*callback)(unsigned int epochs, float error))
+{
+ float error;
+ unsigned int i, j;
+
+ if(epochs_between_reports && callback == NULL){
+ printf("Max epochs %8d. Desired error: %.10f\n", max_epochs, desired_error);
+ }
+
+ for(i = 1; i <= max_epochs; i++){
+ /* train */
+ fann_reset_MSE(ann);
+
+ for(j = 0; j != data->num_data; j++){
+ fann_train(ann, data->input[j], data->output[j]);
+ }
+
+ error = fann_get_MSE(ann);
+
+ /* print current output */
+ if(epochs_between_reports &&
+ (i % epochs_between_reports == 0
+ || i == max_epochs
+ || i == 1
+ || error < desired_error)){
+ if (callback == NULL) {
+ printf("Epochs %8d. Current error: %.10f\n", i, error);
+ } else if((*callback)(i, error) == -1){
+ /* you can break the training by returning -1 */
+ break;
+ }
+ }
+
+ if(error < desired_error){
+ break;
+ }
+ }
+}
+
+void fann_train_on_data(struct fann *ann, struct fann_train_data *data, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error)
+{
+ fann_train_on_data_callback(ann, data, max_epochs, epochs_between_reports, desired_error, NULL);
+}
+
+
+/* Wrapper to make it easy to train directly on a training data file.
+ */
+void fann_train_on_file_callback(struct fann *ann, char *filename, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error, int (*callback)(unsigned int epochs, float error))
+{
+ struct fann_train_data *data = fann_read_train_from_file(filename);
+ if(data == NULL){
+ return;
+ }
+ fann_train_on_data_callback(ann, data, max_epochs, epochs_between_reports, desired_error, callback);
+ fann_destroy_train(data);
+}
+
+void fann_train_on_file(struct fann *ann, char *filename, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error)
+{
+ fann_train_on_file_callback(ann, filename, max_epochs, epochs_between_reports, desired_error, NULL);
+}
+
+#endif
+
+/* shuffles training data, randomizing the order
+ */
+void fann_shuffle_train_data(struct fann_train_data *train_data) {
+ int dat = train_data->num_data - 1, elem;
+ unsigned int swap;
+ fann_type temp;
+
+ for ( ; dat >= 0 ; dat-- ) {
+ swap = (unsigned int)(rand() % train_data->num_data);
+ if ( swap != dat ) {
+ for ( elem = train_data->num_input ; elem >= 0 ; elem-- ) {
+ temp = train_data->input[dat][elem];
+ train_data->input[dat][elem] = train_data->input[swap][elem];
+ train_data->input[swap][elem] = temp;
+ }
+ for ( elem = train_data->num_output ; elem >= 0 ; elem-- ) {
+ temp = train_data->output[dat][elem];
+ train_data->output[dat][elem] = train_data->output[swap][elem];
+ train_data->output[swap][elem] = temp;
+ }
+ }
+ }
+}
+
+/* merges training data into a single struct.
+ */
+struct fann_train_data * fann_merge_train_data(struct fann_train_data *data1, struct fann_train_data *data2) {
+ struct fann_train_data * train_data;
+ int x;
+
+ if ( (data1->num_input != data2->num_input) ||
+ (data1->num_output != data2->num_output) ) {
+ fann_error(NULL, FANN_E_TRAIN_DATA_MISMATCH);
+ return NULL;
+ }
+
+ train_data = (struct fann_train_data *)malloc(sizeof(struct fann_train_data));
+
+ fann_init_error_data((struct fann_error *)train_data);
+
+ train_data->num_data = data1->num_data + data2->num_data;
+ train_data->num_input = data1->num_input;
+ train_data->num_output = data1->num_output;
+
+ if ( ((train_data->input = (fann_type **)calloc(train_data->num_data, sizeof(fann_type *))) == NULL) ||
+ ((train_data->output = (fann_type **)calloc(train_data->num_data, sizeof(fann_type *))) == NULL) ) {
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ fann_destroy_train(train_data);
+ return NULL;
+ }
+ for ( x = train_data->num_data - 1 ; x >= 0 ; x-- ) {
+ if ( ((train_data->input[x] = (fann_type *)calloc(train_data->num_input, sizeof(fann_type))) == NULL) ||
+ ((train_data->output[x] = (fann_type *)calloc(train_data->num_output, sizeof(fann_type))) == NULL) ) {
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ fann_destroy_train(train_data);
+ return NULL;
+ }
+ memcpy(train_data->input[x],
+ ( x < data1->num_data ) ? data1->input[x] : data2->input[x - data1->num_data],
+ train_data->num_input * sizeof(fann_type));
+ memcpy(train_data->output[x],
+ ( x < data1->num_data ) ? data1->output[x] : data2->output[x - data1->num_data],
+ train_data->num_output * sizeof(fann_type));
+ }
+
+ return train_data;
+}
+
+/* return a copy of a fann_train_data struct
+ */
+struct fann_train_data * fann_duplicate_train_data(struct fann_train_data *data) {
+ struct fann_train_data * dest;
+ int x;
+
+ if ( (dest = malloc(sizeof(struct fann_train_data))) == NULL ) {
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ return NULL;
+ }
+
+ fann_init_error_data((struct fann_error *)dest);
+
+ dest->num_data = data->num_data;
+ dest->num_input = data->num_input;
+ dest->num_output = data->num_output;
+
+ if ( ((dest->input = (fann_type **)calloc(dest->num_data, sizeof(fann_type *))) == NULL) ||
+ ((dest->output = (fann_type **)calloc(dest->num_data, sizeof(fann_type *))) == NULL) ) {
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ fann_destroy_train(dest);
+ return NULL;
+ }
+
+ for ( x = dest->num_data - 1 ; x >= 0 ; x-- ) {
+ if ( ((dest->input[x] = (fann_type *)calloc(dest->num_input, sizeof(fann_type))) == NULL) ||
+ ((dest->output[x] = (fann_type *)calloc(dest->num_output, sizeof(fann_type))) == NULL) ) {
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ fann_destroy_train(dest);
+ return NULL;
+ }
+ memcpy(dest->input[x], data->input[x], dest->num_input * sizeof(fann_type));
+ memcpy(dest->output[x], data->output[x], dest->num_output * sizeof(fann_type));
+ }
+ return dest;
+}
+
+/* INTERNAL FUNCTION
+ Reads training data from a file descriptor.
+ */
+struct fann_train_data* fann_read_train_from_fd(FILE *file, char *filename)
+{
+ unsigned int num_input, num_output, num_data, i, j;
+ unsigned int line = 1;
+ struct fann_train_data* data = (struct fann_train_data *)malloc(sizeof(struct fann_train_data));
+
+ if(data == NULL){
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ return NULL;
+ }
+
+ if(fscanf(file, "%u %u %u\n", &num_data, &num_input, &num_output) != 3){
+ fann_error(NULL, FANN_E_CANT_READ_TD, filename, line);
+ fann_destroy_train(data);
+ return NULL;
+ }
+ line++;
+
+ fann_init_error_data((struct fann_error *)data);
+
+ data->num_data = num_data;
+ data->num_input = num_input;
+ data->num_output = num_output;
+ data->input = (fann_type **)calloc(num_data, sizeof(fann_type *));
+ if(data->input == NULL){
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ fann_destroy_train(data);
+ return NULL;
+ }
+
+ data->output = (fann_type **)calloc(num_data, sizeof(fann_type *));
+ if(data->output == NULL){
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ fann_destroy_train(data);
+ return NULL;
+ }
+
+ for(i = 0; i != num_data; i++){
+ data->input[i] = (fann_type *)calloc(num_input, sizeof(fann_type));
+ if(data->input[i] == NULL){
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ fann_destroy_train(data);
+ return NULL;
+ }
+
+ for(j = 0; j != num_input; j++){
+ if(fscanf(file, FANNSCANF" ", &data->input[i][j]) != 1){
+ fann_error(NULL, FANN_E_CANT_READ_TD, filename, line);
+ fann_destroy_train(data);
+ return NULL;
+ }
+ }
+ line++;
+
+ data->output[i] = (fann_type *)calloc(num_output, sizeof(fann_type));
+ if(data->output[i] == NULL){
+ fann_error(NULL, FANN_E_CANT_ALLOCATE_MEM);
+ fann_destroy_train(data);
+ return NULL;
+ }
+
+ for(j = 0; j != num_output; j++){
+ if(fscanf(file, FANNSCANF" ", &data->output[i][j]) != 1){
+ fann_error(NULL, FANN_E_CANT_READ_TD, filename, line);
+ fann_destroy_train(data);
+ return NULL;
+ }
+ }
+ line++;
+ }
+ return data;
+}
diff --git a/src/fixedfann.c b/src/fixedfann.c
index 67bcfb5..b0ee0f0 100644
--- a/src/fixedfann.c
+++ b/src/fixedfann.c
@@ -20,6 +20,13 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
/* Easy way to allow for build of multiple binaries */
#include "fixedfann.h"
-#include "fann.c"
-#include "fann_internal.c"
+
#include "fann.h"
+#include "fann_internal.h"
+
+#include "fann.c"
+#include "fann_io.c"
+#include "fann_train.c"
+#include "fann_train_data.c"
+#include "fann_options.c"
+#include "fann_error.c"
diff --git a/src/floatfann.c b/src/floatfann.c
index 87d5494..1e47bc6 100644
--- a/src/floatfann.c
+++ b/src/floatfann.c
@@ -20,6 +20,13 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
/* Easy way to allow for build of multiple binaries */
#include "floatfann.h"
-#include "fann.c"
-#include "fann_internal.c"
+
#include "fann.h"
+#include "fann_internal.h"
+
+#include "fann.c"
+#include "fann_io.c"
+#include "fann_train.c"
+#include "fann_train_data.c"
+#include "fann_options.c"
+#include "fann_error.c"
diff --git a/src/include/fann.h b/src/include/fann.h
index 2b00532..10c8f22 100644
--- a/src/include/fann.h
+++ b/src/include/fann.h
@@ -47,8 +47,10 @@ extern "C" {
#ifndef NULL
#define NULL 0
#endif /* NULL */
+
+
-/* ----- Initialisation and configuration ----- */
+/* ----- Implemented in fann.c Creation, running and destruction of ANNs ----- */
/* Constructs a backpropagation neural network, from an connection rate,
a learning rate, the number of layers and the number of neurons in each
@@ -75,14 +77,26 @@ struct fann * fann_create(float connection_rate, float learning_rate,
struct fann * fann_create_array(float connection_rate, float learning_rate,
unsigned int num_layers, unsigned int * layers);
-/* Constructs a backpropagation neural network from a configuration file.
+/* Runs a input through the network, and returns the output.
*/
-struct fann * fann_create_from_file(const char *configuration_file);
+fann_type* fann_run(struct fann *ann, fann_type *input);
/* Destructs the entire network.
Be sure to call this function after finished using the network.
*/
void fann_destroy(struct fann *ann);
+
+/* Randomize weights (from the beginning the weights are random between -0.1 and 0.1)
+ */
+void fann_randomize_weights(struct fann *ann, fann_type min_weight, fann_type max_weight);
+
+
+
+/* ----- Implemented in fann_io.c Saving and loading of ANNs ----- */
+
+/* Constructs a backpropagation neural network from a configuration file.
+ */
+struct fann * fann_create_from_file(const char *configuration_file);
/* Save the entire network to a configuration file.
*/
@@ -115,73 +129,8 @@ void fann_save(struct fann *ann, const char *configuration_file);
*/
int fann_save_to_fixed(struct fann *ann, const char *configuration_file);
-/* ----- Some stuff to set options on the network on the fly. ----- */
-
-/* Set the learning rate.
- */
-void fann_set_learning_rate(struct fann *ann, float learning_rate);
-
-/* Set the activation function for the hidden layers.
- */
-void fann_set_activation_function_hidden(struct fann *ann, unsigned int activation_function);
-
-/* Set the activation function for the output layer.
- */
-void fann_set_activation_function_output(struct fann *ann, unsigned int activation_function);
-
-/* Set the steepness of the sigmoid function used in the hidden layers.
- Only usefull if sigmoid function is used in the hidden layers (default 0.5).
- */
-void fann_set_activation_hidden_steepness(struct fann *ann, fann_type steepness);
-
-/* Set the steepness of the sigmoid function used in the output layer.
- Only usefull if sigmoid function is used in the output layer (default 0.5).
- */
-void fann_set_activation_output_steepness(struct fann *ann, fann_type steepness);
-
-/* ----- Some stuff to read network options from the network. ----- */
-
-/* Get the learning rate.
- */
-float fann_get_learning_rate(struct fann *ann);
-
-/* Get the number of input neurons.
- */
-unsigned int fann_get_num_input(struct fann *ann);
-
-/* Get the number of output neurons.
- */
-unsigned int fann_get_num_output(struct fann *ann);
-
-/* Get the activation function used in the hidden layers.
- */
-unsigned int fann_get_activation_function_hidden(struct fann *ann);
-
-/* Get the activation function used in the output layer.
- */
-unsigned int fann_get_activation_function_output(struct fann *ann);
-
-/* Get the steepness parameter for the sigmoid function used in the hidden layers.
- */
-fann_type fann_get_activation_hidden_steepness(struct fann *ann);
-
-/* Get the steepness parameter for the sigmoid function used in the output layer.
- */
-fann_type fann_get_activation_output_steepness(struct fann *ann);
-
-/* Get the total number of neurons in the entire network.
- */
-unsigned int fann_get_total_neurons(struct fann *ann);
-
-/* Get the total number of connections in the entire network.
- */
-unsigned int fann_get_total_connections(struct fann *ann);
-
-/* Randomize weights (from the beginning the weights are random between -0.1 and 0.1)
- */
-void fann_randomize_weights(struct fann *ann, fann_type min_weight, fann_type max_weight);
-/* ----- Training ----- */
+/* ----- Implemented in fann_train.c Training and testing of ANNs ----- */
#ifndef FIXEDFANN
/* Train one iteration with a set of inputs, and a set of desired outputs.
@@ -195,6 +144,26 @@ void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output);
*/
fann_type *fann_test(struct fann *ann, fann_type *input, fann_type *desired_output);
+/* Reads the mean square error from the network.
+ (obsolete will be removed at some point, use fann_get_MSE)
+ */
+float fann_get_error(struct fann *ann);
+
+/* Reads the mean square error from the network.
+ */
+float fann_get_MSE(struct fann *ann);
+
+/* Resets the mean square error from the network.
+ (obsolete will be removed at some point, use fann_reset_MSE)
+ */
+void fann_reset_error(struct fann *ann);
+
+/* Resets the mean square error from the network.
+ */
+void fann_reset_MSE(struct fann *ann);
+
+/* ----- Implemented in fann_train_data.c Data for training of ANNs ----- */
+
/* Reads a file that stores training data, in the format:
num_train_data num_input num_output\n
inputdata seperated by space\n
@@ -235,6 +204,11 @@ void fann_train_on_data_callback(struct fann *ann, struct fann_train_data *data,
*/
void fann_train_on_file(struct fann *ann, char *filename, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error);
+/* Does the same as train_on_data_callback, but
+ reads the data directly from a file.
+ */
+void fann_train_on_file_callback(struct fann *ann, char *filename, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error, int (*callback)(unsigned int epochs, float error));
+
/* shuffles training data, randomizing the order
*/
void fann_shuffle_train_data(struct fann_train_data *train_data);
@@ -247,11 +221,6 @@ struct fann_train_data * fann_merge_train_data(struct fann_train_data *data1, st
*/
struct fann_train_data * fann_duplicate_train_data(struct fann_train_data *data);
-/* Does the same as train_on_data_callback, but
- reads the data directly from a file.
- */
-void fann_train_on_file_callback(struct fann *ann, char *filename, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error, int (*callback)(unsigned int epochs, float error));
-
#endif /* NOT FIXEDFANN */
/* Save the training structure to a file.
@@ -263,54 +232,67 @@ void fann_save_train(struct fann_train_data* data, char *filename);
*/
void fann_save_train_to_fixed(struct fann_train_data* data, char *filename, unsigned int decimal_point);
-/* Reads the mean square error from the network.
- (obsolete will be removed at some point, use fann_get_MSE)
+
+
+/* ----- Implemented in fann_options.c Get and set options for the ANNs ----- */
+
+/* Get the learning rate.
*/
-float fann_get_error(struct fann *ann);
+float fann_get_learning_rate(struct fann *ann);
-/* Reads the mean square error from the network.
+/* Set the learning rate.
*/
-float fann_get_MSE(struct fann *ann);
+void fann_set_learning_rate(struct fann *ann, float learning_rate);
-/* Resets the mean square error from the network.
- (obsolete will be removed at some point, use fann_reset_MSE)
+/* Get the activation function used in the hidden layers.
*/
-void fann_reset_error(struct fann *ann);
+unsigned int fann_get_activation_function_hidden(struct fann *ann);
-/* Resets the mean square error from the network.
+/* Set the activation function for the hidden layers.
*/
-void fann_reset_MSE(struct fann *ann);
+void fann_set_activation_function_hidden(struct fann *ann, unsigned int activation_function);
-/* resets the last error number
+/* Get the activation function used in the output layer.
*/
-void fann_reset_errno(struct fann_error *errdat);
+unsigned int fann_get_activation_function_output(struct fann *ann);
-/* resets the last error string
+/* Set the activation function for the output layer.
*/
-void fann_reset_errstr(struct fann_error *errdat);
+void fann_set_activation_function_output(struct fann *ann, unsigned int activation_function);
-/* change where errors are logged to
+/* Get the steepness parameter for the sigmoid function used in the hidden layers.
*/
-void fann_set_error_log(struct fann_error *errdat, FILE *log);
+fann_type fann_get_activation_hidden_steepness(struct fann *ann);
+
+/* Set the steepness of the sigmoid function used in the hidden layers.
+ Only usefull if sigmoid function is used in the hidden layers (default 0.5).
+ */
+void fann_set_activation_hidden_steepness(struct fann *ann, fann_type steepness);
-/* returns the last error number
+/* Get the steepness parameter for the sigmoid function used in the output layer.
*/
-unsigned int fann_get_errno(struct fann_error *errdat);
+fann_type fann_get_activation_output_steepness(struct fann *ann);
+
+/* Set the steepness of the sigmoid function used in the output layer.
+ Only usefull if sigmoid function is used in the output layer (default 0.5).
+ */
+void fann_set_activation_output_steepness(struct fann *ann, fann_type steepness);
-/* returns the last errstr.
- * This function calls fann_reset_errno and fann_reset_errstr
+/* Get the number of input neurons.
*/
-char * fann_get_errstr(struct fann_error *errdat);
+unsigned int fann_get_num_input(struct fann *ann);
-/* prints the last error to stderr
+/* Get the number of output neurons.
*/
-void fann_print_error(struct fann_error *errdat);
+unsigned int fann_get_num_output(struct fann *ann);
-/* ----- Running�----- */
+/* Get the total number of neurons in the entire network.
+ */
+unsigned int fann_get_total_neurons(struct fann *ann);
-/* Runs a input through the network, and returns the output.
+/* Get the total number of connections in the entire network.
*/
-fann_type* fann_run(struct fann *ann, fann_type *input);
+unsigned int fann_get_total_connections(struct fann *ann);
#ifdef FIXEDFANN
@@ -322,6 +304,35 @@ unsigned int fann_get_decimal_point(struct fann *ann);
*/
unsigned int fann_get_multiplier(struct fann *ann);
#endif /* FIXEDFANN */
+
+
+
+/* ----- Implemented in fann_error.c Access error information about the ANN ----- */
+
+/* change where errors are logged to
+ */
+void fann_set_error_log(struct fann_error *errdat, FILE *log);
+
+/* returns the last error number
+ */
+unsigned int fann_get_errno(struct fann_error *errdat);
+
+/* resets the last error number
+ */
+void fann_reset_errno(struct fann_error *errdat);
+
+/* resets the last error string
+ */
+void fann_reset_errstr(struct fann_error *errdat);
+
+/* returns the last errstr.
+ * This function calls fann_reset_errno and fann_reset_errstr
+ */
+char * fann_get_errstr(struct fann_error *errdat);
+
+/* prints the last error to stderr
+ */
+void fann_print_error(struct fann_error *errdat);
#ifdef __cplusplus
}
diff --git a/src/include/fann_internal.h b/src/include/fann_internal.h
index 9b85353..033c5ad 100644
--- a/src/include/fann_internal.h
+++ b/src/include/fann_internal.h
@@ -45,7 +45,6 @@ int fann_save_internal_fd(struct fann *ann, FILE *conf, const char *configuratio
void fann_save_train_internal(struct fann_train_data* data, char *filename, unsigned int save_as_fixed, unsigned int decimal_point);
void fann_save_train_internal_fd(struct fann_train_data* data, FILE *file, char *filename, unsigned int save_as_fixed, unsigned int decimal_point);
-int fann_compare_connections(const void* c1, const void* c2);
void fann_seed_rand();
void fann_update_stepwise_hidden(struct fann *ann);
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/libfann.git
More information about the debian-science-commits
mailing list