[libfann] 135/242: merge from lukesky branch

Christian Kastner chrisk-guest at moszumanska.debian.org
Sat Oct 4 21:10:31 UTC 2014


This is an automated email from the git hooks/post-receive script.

chrisk-guest pushed a commit to tag Version2_0_0
in repository libfann.

commit dbbd185bd12bf3480405ff6dc200864ebb6c85dc
Author: Steffen Nissen <lukesky at diku.dk>
Date:   Tue May 25 18:02:06 2004 +0000

    merge from lukesky branch
---
 Makefile.am                   |   7 +-
 Makefile.in                   |   2 +-
 TODO                          |   5 -
 aclocal.m4                    |  21 +-
 configure                     |  98 +++-----
 debian/changelog              |  10 +-
 examples/Makefile             |   1 +
 examples/mushroom.c           |   9 +-
 examples/robot.c              |   8 +-
 examples/simple_train.c       |   5 +-
 examples/steepness_train.c    |  35 ++-
 examples/xor_train.c          |  14 +-
 src/doublefann.c              |   3 -
 src/fann.c                    | 100 +++++---
 src/fann_cascade.c            | 145 +++++++++++
 src/fann_io.c                 |  20 +-
 src/fann_options.c            | 255 +++++++++++++++-----
 src/fann_train.c              | 548 ++++++++++++++++++++++++++++++++----------
 src/fann_train_data.c         | 130 +++++++++-
 src/fixedfann.c               |   3 -
 src/floatfann.c               |   3 -
 src/include/fann.h            |  95 +++++++-
 src/include/fann_activation.h |   6 +
 src/include/fann_data.h       |  88 ++++++-
 src/include/fann_internal.h   |  15 ++
 25 files changed, 1241 insertions(+), 385 deletions(-)

diff --git a/Makefile.am b/Makefile.am
index d8cedea..5595b44 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -3,7 +3,12 @@ SUBDIRS = src
 pkgconfigdir = $(libdir)/pkgconfig
 pkgconfig_DATA = fann.pc
 
-EXTRA_DIST = benchmarks doc examples python MSVC++ src/include/config.h
+EXTRA_DIST = benchmarks doc examples python MSVC++ src/include/config.h \
+	debian/changelog debian/compat debian/control debian/copyright \
+	debian/docs debian/libfann1-dev.dirs debian/libfann1-dev.examples \
+	debian/libfann1-dev.files debian/libfann1-dev.install \
+	debian/libfann1.dirs debian/libfann1.files debian/libfann1.install \
+	debian/rules 
 
 dist-hook:
 	rm -rf `find $(distdir) -name "*~" -o -name ".#*"`
diff --git a/Makefile.in b/Makefile.in
index 1a3b77f..2e8a40d 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -576,7 +576,7 @@ dist-hook:
 	rm -rf `find $(distdir)/benchmarks -name CVS`
 	(cd $(distdir)/python && make clean)
 	rm -rf `find $(distdir)/python -name CVS`
-	zip -r $(distdir).zip $(distdir)
+	zip -9 -r $(distdir).zip $(distdir)
 	tar chojf $(distdir).tar.bz2 $(distdir)
 # Tell versions [3.59,3.63) of GNU make to not export all variables.
 # Otherwise a system limit (for SysV at least) may be exceeded.
diff --git a/TODO b/TODO
index 678c72d..f58778c 100644
--- a/TODO
+++ b/TODO
@@ -1,12 +1,7 @@
-* Implement quickprop training
-* Implement rprop training.
 * Implement cascade correlation.
 * Implement SuperSAB
 * Implement the activation functions defined in fann_activation.h
 * More checks to see if train data is properly formatted in the file (some have experienced nasty problems because a number was missing in one line).
-* Make it easy to use in windows compilers as Borland C++ and MS Visual C++.
-* Good reference manual.
-* Convert existing comments to doxygen-friendly comments.
 * C++ wrapper.
 * C# wrapper.
 * Implement optimal brain damage.
diff --git a/aclocal.m4 b/aclocal.m4
index 03622fd..c6636e6 100644
--- a/aclocal.m4
+++ b/aclocal.m4
@@ -862,7 +862,7 @@ AU_DEFUN([AM_CONFIG_HEADER], [AC_CONFIG_HEADERS($@)])
 # libtool.m4 - Configure libtool for the host system. -*-Autoconf-*-
 
 # serial 47 AC_PROG_LIBTOOL
-# Debian $Rev: 149 $
+# Debian $Rev: 192 $
 
 
 # AC_PROVIDE_IFELSE(MACRO-NAME, IF-PROVIDED, IF-NOT-PROVIDED)
@@ -1768,13 +1768,6 @@ AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext],
    mkdir out
    printf "$lt_simple_compile_test_code" > conftest.$ac_ext
 
-   # According to Tom Tromey, Ian Lance Taylor reported there are C compilers
-   # that will create temporary files in the current directory regardless of
-   # the output directory.  Thus, making CWD read-only will cause this test
-   # to fail, enabling locking or at least warning the user not to do parallel
-   # builds.
-   chmod -w .
-
    lt_compiler_flag="-o out/conftest2.$ac_objext"
    # Insert the option either (1) after the last *FLAGS variable, or
    # (2) before a word containing "conftest.", or (3) at the end.
@@ -2324,7 +2317,7 @@ netbsd*)
     finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
     dynamic_linker='NetBSD (a.out) ld.so'
   else
-    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} ${libname}${shared_ext}'
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
     soname_spec='${libname}${release}${shared_ext}$major'
     dynamic_linker='NetBSD ld.elf_so'
   fi
@@ -3033,7 +3026,7 @@ linux*)
   lt_cv_deplibs_check_method=pass_all
   ;;
 
-netbsd*)
+netbsd* | knetbsd*-gnu)
   if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then
     lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$'
   else
@@ -4027,7 +4020,7 @@ case $host_os in
 	;;
     esac
     ;;
-  netbsd*)
+  netbsd* | knetbsd*-gnu)
     if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
       _LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable  -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags'
       wlarc=
@@ -5517,7 +5510,7 @@ AC_MSG_CHECKING([for $compiler option to produce PIC])
 	    ;;
 	esac
 	;;
-      netbsd*)
+      netbsd* | knetbsd*-gnu)
 	;;
       osf3* | osf4* | osf5*)
 	case $cc_basename in
@@ -5957,7 +5950,7 @@ EOF
       fi
       ;;
 
-    netbsd*)
+    netbsd* | knetbsd*-gnu)
       if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
 	_LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
 	wlarc=
@@ -6374,7 +6367,7 @@ $echo "local: *; };" >> $output_objdir/$libname.ver~
       _LT_AC_TAGVAR(link_all_deplibs, $1)=yes
       ;;
 
-    netbsd*)
+    netbsd* | knetbsd*-gnu)
       if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
 	_LT_AC_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'  # a.out
       else
diff --git a/configure b/configure
index 336ad90..41bfde7 100755
--- a/configure
+++ b/configure
@@ -3550,7 +3550,7 @@ linux*)
   lt_cv_deplibs_check_method=pass_all
   ;;
 
-netbsd*)
+netbsd* | knetbsd*-gnu)
   if echo __ELF__ | $CC -E - | grep __ELF__ > /dev/null; then
     lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$'
   else
@@ -6540,13 +6540,6 @@ else
    mkdir out
    printf "$lt_simple_compile_test_code" > conftest.$ac_ext
 
-   # According to Tom Tromey, Ian Lance Taylor reported there are C compilers
-   # that will create temporary files in the current directory regardless of
-   # the output directory.  Thus, making CWD read-only will cause this test
-   # to fail, enabling locking or at least warning the user not to do parallel
-   # builds.
-   chmod -w .
-
    lt_compiler_flag="-o out/conftest2.$ac_objext"
    # Insert the option either (1) after the last *FLAGS variable, or
    # (2) before a word containing "conftest.", or (3) at the end.
@@ -6556,11 +6549,11 @@ else
    -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:6559: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:6552: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:6563: \$? = $ac_status" >&5
+   echo "$as_me:6556: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -6733,7 +6726,7 @@ EOF
       fi
       ;;
 
-    netbsd*)
+    netbsd* | knetbsd*-gnu)
       if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
 	archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
 	wlarc=
@@ -7250,7 +7243,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
       link_all_deplibs=yes
       ;;
 
-    netbsd*)
+    netbsd* | knetbsd*-gnu)
       if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
 	archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'  # a.out
       else
@@ -7920,7 +7913,7 @@ netbsd*)
     finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
     dynamic_linker='NetBSD (a.out) ld.so'
   else
-    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} ${libname}${shared_ext}'
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
     soname_spec='${libname}${release}${shared_ext}$major'
     dynamic_linker='NetBSD ld.elf_so'
   fi
@@ -8731,7 +8724,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<EOF
-#line 8734 "configure"
+#line 8727 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -8829,7 +8822,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<EOF
-#line 8832 "configure"
+#line 8825 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -10340,7 +10333,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
 	;;
     esac
     ;;
-  netbsd*)
+  netbsd* | knetbsd*-gnu)
     if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
       archive_cmds_CXX='$LD -Bshareable  -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags'
       wlarc=
@@ -10895,7 +10888,7 @@ echo $ECHO_N "checking for $compiler option to produce PIC... $ECHO_C" >&6
 	    ;;
 	esac
 	;;
-      netbsd*)
+      netbsd* | knetbsd*-gnu)
 	;;
       osf3* | osf4* | osf5*)
 	case $cc_basename in
@@ -11006,11 +10999,11 @@ else
    -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:11009: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:11002: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:11013: \$? = $ac_status" >&5
+   echo "$as_me:11006: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings
@@ -11057,13 +11050,6 @@ else
    mkdir out
    printf "$lt_simple_compile_test_code" > conftest.$ac_ext
 
-   # According to Tom Tromey, Ian Lance Taylor reported there are C compilers
-   # that will create temporary files in the current directory regardless of
-   # the output directory.  Thus, making CWD read-only will cause this test
-   # to fail, enabling locking or at least warning the user not to do parallel
-   # builds.
-   chmod -w .
-
    lt_compiler_flag="-o out/conftest2.$ac_objext"
    # Insert the option either (1) after the last *FLAGS variable, or
    # (2) before a word containing "conftest.", or (3) at the end.
@@ -11073,11 +11059,11 @@ else
    -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:11076: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:11062: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:11080: \$? = $ac_status" >&5
+   echo "$as_me:11066: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -11616,7 +11602,7 @@ netbsd*)
     finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
     dynamic_linker='NetBSD (a.out) ld.so'
   else
-    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} ${libname}${shared_ext}'
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
     soname_spec='${libname}${release}${shared_ext}$major'
     dynamic_linker='NetBSD ld.elf_so'
   fi
@@ -12427,7 +12413,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<EOF
-#line 12430 "configure"
+#line 12416 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -12525,7 +12511,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<EOF
-#line 12528 "configure"
+#line 12514 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -13352,11 +13338,11 @@ else
    -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:13355: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:13341: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:13359: \$? = $ac_status" >&5
+   echo "$as_me:13345: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings
@@ -13403,13 +13389,6 @@ else
    mkdir out
    printf "$lt_simple_compile_test_code" > conftest.$ac_ext
 
-   # According to Tom Tromey, Ian Lance Taylor reported there are C compilers
-   # that will create temporary files in the current directory regardless of
-   # the output directory.  Thus, making CWD read-only will cause this test
-   # to fail, enabling locking or at least warning the user not to do parallel
-   # builds.
-   chmod -w .
-
    lt_compiler_flag="-o out/conftest2.$ac_objext"
    # Insert the option either (1) after the last *FLAGS variable, or
    # (2) before a word containing "conftest.", or (3) at the end.
@@ -13419,11 +13398,11 @@ else
    -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:13422: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:13401: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:13426: \$? = $ac_status" >&5
+   echo "$as_me:13405: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -13596,7 +13575,7 @@ EOF
       fi
       ;;
 
-    netbsd*)
+    netbsd* | knetbsd*-gnu)
       if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
 	archive_cmds_F77='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
 	wlarc=
@@ -14093,7 +14072,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
       link_all_deplibs_F77=yes
       ;;
 
-    netbsd*)
+    netbsd* | knetbsd*-gnu)
       if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
 	archive_cmds_F77='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'  # a.out
       else
@@ -14763,7 +14742,7 @@ netbsd*)
     finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
     dynamic_linker='NetBSD (a.out) ld.so'
   else
-    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} ${libname}${shared_ext}'
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
     soname_spec='${libname}${release}${shared_ext}$major'
     dynamic_linker='NetBSD ld.elf_so'
   fi
@@ -15451,11 +15430,11 @@ else
    -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:15454: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:15433: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:15458: \$? = $ac_status" >&5
+   echo "$as_me:15437: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings
@@ -15684,11 +15663,11 @@ else
    -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:15687: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:15666: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:15691: \$? = $ac_status" >&5
+   echo "$as_me:15670: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings
@@ -15735,13 +15714,6 @@ else
    mkdir out
    printf "$lt_simple_compile_test_code" > conftest.$ac_ext
 
-   # According to Tom Tromey, Ian Lance Taylor reported there are C compilers
-   # that will create temporary files in the current directory regardless of
-   # the output directory.  Thus, making CWD read-only will cause this test
-   # to fail, enabling locking or at least warning the user not to do parallel
-   # builds.
-   chmod -w .
-
    lt_compiler_flag="-o out/conftest2.$ac_objext"
    # Insert the option either (1) after the last *FLAGS variable, or
    # (2) before a word containing "conftest.", or (3) at the end.
@@ -15751,11 +15723,11 @@ else
    -e 's:.*FLAGS}? :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:15754: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:15726: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:15758: \$? = $ac_status" >&5
+   echo "$as_me:15730: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -15928,7 +15900,7 @@ EOF
       fi
       ;;
 
-    netbsd*)
+    netbsd* | knetbsd*-gnu)
       if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
 	archive_cmds_GCJ='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
 	wlarc=
@@ -16445,7 +16417,7 @@ if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
       link_all_deplibs_GCJ=yes
       ;;
 
-    netbsd*)
+    netbsd* | knetbsd*-gnu)
       if echo __ELF__ | $CC -E - | grep __ELF__ >/dev/null; then
 	archive_cmds_GCJ='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'  # a.out
       else
@@ -17115,7 +17087,7 @@ netbsd*)
     finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
     dynamic_linker='NetBSD (a.out) ld.so'
   else
-    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} ${libname}${shared_ext}'
+    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
     soname_spec='${libname}${release}${shared_ext}$major'
     dynamic_linker='NetBSD ld.elf_so'
   fi
@@ -17926,7 +17898,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<EOF
-#line 17929 "configure"
+#line 17901 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -18024,7 +17996,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<EOF
-#line 18027 "configure"
+#line 17999 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
diff --git a/debian/changelog b/debian/changelog
index 47ea82a..df11e1d 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,8 +1,8 @@
-libfann1 (1.1.0.1-1) unstable; urgency=low
-
-  * rebuild with a clean tar.gz. All issues with debian/ dir in cvs
-    that caused a changing orig.gz should be sorted out now.
-
+libfann1 (1.1.0.1-1) unstable; urgency=low
+
+  * rebuild with a clean tar.gz. All issues with debian/ dir in cvs
+    that caused a changing orig.gz should be sorted out now.
+
  -- Steffen Nissen <lukesky at diku.dk>  Mon, 17 May 2004 09:44:38 +0200
 
 libfann1 (1.1.0-3) unstable; urgency=low
diff --git a/examples/Makefile b/examples/Makefile
index 7f70a81..6180d5e 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -48,6 +48,7 @@ rundebugtest: $(DEBUG_TARGETS)
 #compiletest is used to test whether the library will compile easily in other compilers
 compiletest:
 	gcc -O3 -ggdb -lm -DDEBUG -Wall -Wformat-security -Wfloat-equal -Wshadow -Wpointer-arith -Wcast-qual -Wsign-compare -pedantic -ansi -I../src/ -I../src/include/ ../src/floatfann.c xor_train.c -o xor_train
+	gcc -O3 -ggdb -lm -DDEBUG -Wall -Wformat-security -Wfloat-equal -Wshadow -Wpointer-arith -Wcast-qual -Wsign-compare -pedantic -ansi -DFIXEDFANN -I../src/ -I../src/include/ ../src/fixedfann.c xor_test.c -o xor_test
 	g++ -O3 -ggdb -lm -DDEBUG -Wall -Wformat-security -Wfloat-equal -Wpointer-arith -Wcast-qual -Wsign-compare -pedantic -ansi -I../src/ -I../src/include/ ../src/floatfann.c xor_train.c -o xor_train
 
 debug: $(DEBUG_TARGETS)
diff --git a/examples/mushroom.c b/examples/mushroom.c
index 546c33f..4ba4396 100644
--- a/examples/mushroom.c
+++ b/examples/mushroom.c
@@ -30,11 +30,11 @@ int print_callback(unsigned int epochs, float error)
 int main()
 {
 	const float connection_rate = 1;
-	const float learning_rate = (const float)0.4;
+	const float learning_rate = (const float)0.7;
 	const unsigned int num_layers = 3;
 	const unsigned int num_neurons_hidden = 32;
 	const float desired_error = (const float)0.0001;
-	const unsigned int max_iterations = 300000;
+	const unsigned int max_iterations = 300;
 	const unsigned int iterations_between_reports = 1;
 	struct fann *ann;
 	struct fann_train_data *train_data, *test_data;
@@ -55,11 +55,10 @@ int main()
 	fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC_STEPWISE);
 	fann_set_activation_function_output(ann, FANN_SIGMOID_STEPWISE);
 
+	fann_set_training_algorithm(ann, FANN_INCREMENTAL_TRAIN);
+	
 	fann_train_on_data(ann, train_data, max_iterations, iterations_between_reports, desired_error);
 	
-	/*fann_train_on_data_callback(ann, data, max_iterations, iterations_between_reports, desired_error, print_callback);*/
-
-
 	printf("Testing network.\n");
 
 	test_data = fann_read_train_from_file("../benchmarks/datasets/mushroom.test");
diff --git a/examples/robot.c b/examples/robot.c
index 44fc080..d346cbd 100644
--- a/examples/robot.c
+++ b/examples/robot.c
@@ -34,8 +34,6 @@ int main()
 	const unsigned int num_layers = 3;
 	const unsigned int num_neurons_hidden = 96;
 	const float desired_error = (const float)0.001;
-	const unsigned int max_iterations = 300000;
-	const unsigned int iterations_between_reports = 10;
 	struct fann *ann;
 	struct fann_train_data *train_data, *test_data;
 	
@@ -52,11 +50,11 @@ int main()
 
 	printf("Training network.\n");
 
-	fann_train_on_data(ann, train_data, max_iterations, iterations_between_reports, desired_error);
+	fann_set_training_algorithm(ann, FANN_INCREMENTAL_TRAIN);
+	
+	fann_train_on_data(ann, train_data, 100000, 10, desired_error);
 	
 	/*fann_train_on_data_callback(ann, data, max_iterations, iterations_between_reports, desired_error, print_callback);*/
-
-
 	printf("Testing network.\n");
 
 	test_data = fann_read_train_from_file("../benchmarks/datasets/robot.test");
diff --git a/examples/simple_train.c b/examples/simple_train.c
index 1a6e44a..35f4119 100644
--- a/examples/simple_train.c
+++ b/examples/simple_train.c
@@ -27,13 +27,16 @@ int main()
 	const unsigned int num_output = 1;
 	const unsigned int num_layers = 3;
 	const unsigned int num_neurons_hidden = 5;
-	const float desired_error = (const float)0.0001;
+	const float desired_error = (const float)0.001;
 	const unsigned int max_iterations = 500000;
 	const unsigned int iterations_between_reports = 1000;
 
 	struct fann *ann = fann_create(connection_rate, learning_rate, num_layers,
 		num_input, num_neurons_hidden, num_output);
 
+	fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC);
+	fann_set_activation_function_output(ann, FANN_SIGMOID_SYMMETRIC);
+	
 	fann_train_on_file(ann, "xor.data", max_iterations,
 		iterations_between_reports, desired_error);
 	
diff --git a/examples/steepness_train.c b/examples/steepness_train.c
index 2a2d687..8ce3464 100644
--- a/examples/steepness_train.c
+++ b/examples/steepness_train.c
@@ -26,7 +26,7 @@ void train_on_steepness_file(struct fann *ann, char *filename,
 	float steepness_step, float steepness_end)
 {
 	float error;
-	unsigned int i, j;
+	unsigned int i;
 
 	struct fann_train_data *data = fann_read_train_from_file(filename);
 	if(epochs_between_reports){
@@ -34,17 +34,11 @@ void train_on_steepness_file(struct fann *ann, char *filename,
 			max_epochs, desired_error);
 	}
 
-	fann_set_activation_hidden_steepness(ann, steepness_start);
-	fann_set_activation_output_steepness(ann, steepness_start);
+	fann_set_activation_steepness_hidden(ann, steepness_start);
+	fann_set_activation_steepness_output(ann, steepness_start);
 	for(i = 1; i <= max_epochs; i++){
 		/* train */
-		fann_reset_MSE(ann);
-
-		for(j = 0; j != data->num_data; j++){
-			fann_train(ann, data->input[j], data->output[j]);
-		}
-
-		error = fann_get_MSE(ann);
+		error = fann_train_epoch(ann, data);
 
 		/* print current output */
 		if(epochs_between_reports &&
@@ -59,8 +53,8 @@ void train_on_steepness_file(struct fann *ann, char *filename,
 			steepness_start += steepness_step;
 			if(steepness_start <= steepness_end){
 				printf("Steepness: %f\n", steepness_start);
-				fann_set_activation_hidden_steepness(ann, steepness_start);
-				fann_set_activation_output_steepness(ann, steepness_start);
+				fann_set_activation_steepness_hidden(ann, steepness_start);
+				fann_set_activation_steepness_output(ann, steepness_start);
 			}else{
 				break;
 			}
@@ -76,8 +70,8 @@ int main()
 	const unsigned int num_input = 2;
 	const unsigned int num_output = 1;
 	const unsigned int num_layers = 3;
-	const unsigned int num_neurons_hidden = 4;
-	const float desired_error = (const float)0.0001;
+	const unsigned int num_neurons_hidden = 2;
+	const float desired_error = (const float)0.001;
 	const unsigned int max_iterations = 500000;
 	const unsigned int iterations_between_reports = 1000;
 	unsigned int i;
@@ -88,14 +82,19 @@ int main()
 	struct fann *ann = fann_create(connection_rate,
 		learning_rate, num_layers,
 		num_input, num_neurons_hidden, num_output);
-
+	
 	data = fann_read_train_from_file("xor.data");
 	
+	fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC);
+	fann_set_activation_function_output(ann, FANN_SIGMOID_SYMMETRIC);
+
+	fann_set_training_algorithm(ann, FANN_QUICKPROP_TRAIN);
+	
 	train_on_steepness_file(ann, "xor.data", max_iterations,
-		iterations_between_reports, desired_error, (float)0.5, (float)0.1, (float)20.0);
+		iterations_between_reports, desired_error, (float)1.0, (float)0.1, (float)20.0);
 
-	fann_set_activation_function_hidden(ann, FANN_THRESHOLD);
-	fann_set_activation_function_output(ann, FANN_THRESHOLD);
+	fann_set_activation_function_hidden(ann, FANN_THRESHOLD_SYMMETRIC);
+	fann_set_activation_function_output(ann, FANN_THRESHOLD_SYMMETRIC);
 
 	for(i = 0; i != data->num_data; i++){
 		calc_out = fann_run(ann, data->input[i]);
diff --git a/examples/xor_train.c b/examples/xor_train.c
index f8edd21..28e2c8b 100644
--- a/examples/xor_train.c
+++ b/examples/xor_train.c
@@ -31,11 +31,11 @@ int main()
 {
 	fann_type *calc_out;
 	const float connection_rate = 1;
-	const float learning_rate = (const float)0.4;
+	const float learning_rate = (const float)0.7;
 	const unsigned int num_input = 2;
 	const unsigned int num_output = 1;
 	const unsigned int num_layers = 3;
-	const unsigned int num_neurons_hidden = 6;
+	const unsigned int num_neurons_hidden = 2;
 	const float desired_error = (const float)0.001;
 	const unsigned int max_iterations = 300000;
 	const unsigned int iterations_between_reports = 1000;
@@ -56,8 +56,16 @@ int main()
 
 	data = fann_read_train_from_file("xor.data");
 
-	fann_init_weights(ann, data);
+
+	fann_set_activation_steepness_hidden(ann, 1.0);
+	fann_set_activation_steepness_output(ann, 1.0);
+	
+	fann_set_activation_function_hidden(ann, FANN_SIGMOID_SYMMETRIC_STEPWISE);
+	fann_set_activation_function_output(ann, FANN_SIGMOID_SYMMETRIC_STEPWISE);
 	
+	fann_init_weights(ann, data);
+
+	/* fann_set_training_algorithm(ann, FANN_QUICKPROP_TRAIN); */
 	fann_train_on_data(ann, data, max_iterations, iterations_between_reports, desired_error);
 	
 	/*fann_train_on_data_callback(ann, data, max_iterations, iterations_between_reports, desired_error, print_callback);*/
diff --git a/src/doublefann.c b/src/doublefann.c
index 7769d7a..57d5efd 100644
--- a/src/doublefann.c
+++ b/src/doublefann.c
@@ -21,9 +21,6 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
 #include "doublefann.h"
 
-#include "fann.h"
-#include "fann_internal.h"
-
 #include "fann.c"
 #include "fann_io.c"
 #include "fann_train.c"
diff --git a/src/fann.c b/src/fann.c
index d829d33..7b0744e 100644
--- a/src/fann.c
+++ b/src/fann.c
@@ -177,7 +177,7 @@ struct fann * fann_create_array(float connection_rate, float learning_rate, unsi
 			printf("  layer       : %d neurons, 1 bias\n", prev_layer_size-1);
 #endif
 		}
-	}else{
+	} else {
 		/* make connections for a network, that are not fully connected */
 		
 		/* generally, what we do is first to connect all the input
@@ -437,15 +437,15 @@ fann_type* fann_run(struct fann *ann, fann_type *input)
 	/* store some variabels local for fast access */
 #ifndef FIXEDFANN
 	fann_type steepness;
-	const fann_type activation_output_steepness = ann->activation_output_steepness;
-	const fann_type activation_hidden_steepness = ann->activation_hidden_steepness;
+	const fann_type activation_steepness_output = ann->activation_steepness_output;
+	const fann_type activation_steepness_hidden = ann->activation_steepness_hidden;
 #endif
 	
 	unsigned int activation_function_output = ann->activation_function_output;
 	unsigned int activation_function_hidden = ann->activation_function_hidden;
 	struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
 #ifdef FIXEDFANN
-	unsigned int multiplier = ann->multiplier;
+	int multiplier = ann->multiplier;
 	unsigned int decimal_point = ann->decimal_point;
 #endif
 	
@@ -463,20 +463,20 @@ fann_type* fann_run(struct fann *ann, fann_type *input)
 		case FANN_SIGMOID_STEPWISE:
 		case FANN_SIGMOID_SYMMETRIC_STEPWISE:			
 			/* the hidden results */
-			rh1 = ann->activation_hidden_results[0];
-			rh2 = ann->activation_hidden_results[1];
-			rh3 = ann->activation_hidden_results[2];
-			rh4 = ann->activation_hidden_results[3];
-			rh5 = ann->activation_hidden_results[4];
-			rh6 = ann->activation_hidden_results[5];
+			rh1 = ann->activation_results_hidden[0];
+			rh2 = ann->activation_results_hidden[1];
+			rh3 = ann->activation_results_hidden[2];
+			rh4 = ann->activation_results_hidden[3];
+			rh5 = ann->activation_results_hidden[4];
+			rh6 = ann->activation_results_hidden[5];
 			
 			/* the hidden parameters */
-			h1 = ann->activation_hidden_values[0];
-			h2 = ann->activation_hidden_values[1];
-			h3 = ann->activation_hidden_values[2];
-			h4 = ann->activation_hidden_values[3];
-			h5 = ann->activation_hidden_values[4];
-			h6 = ann->activation_hidden_values[5];
+			h1 = ann->activation_values_hidden[0];
+			h2 = ann->activation_values_hidden[1];
+			h3 = ann->activation_values_hidden[2];
+			h4 = ann->activation_values_hidden[3];
+			h5 = ann->activation_values_hidden[4];
+			h6 = ann->activation_values_hidden[5];
 			break;
 		default:
 			break;
@@ -490,20 +490,20 @@ fann_type* fann_run(struct fann *ann, fann_type *input)
 		case FANN_SIGMOID_STEPWISE:
 		case FANN_SIGMOID_SYMMETRIC_STEPWISE:			
 			/* the output results */
-			ro1 = ann->activation_output_results[0];
-			ro2 = ann->activation_output_results[1];
-			ro3 = ann->activation_output_results[2];
-			ro4 = ann->activation_output_results[3];
-			ro5 = ann->activation_output_results[4];
-			ro6 = ann->activation_output_results[5];
+			ro1 = ann->activation_results_output[0];
+			ro2 = ann->activation_results_output[1];
+			ro3 = ann->activation_results_output[2];
+			ro4 = ann->activation_results_output[3];
+			ro5 = ann->activation_results_output[4];
+			ro6 = ann->activation_results_output[5];
 			
 			/* the output parameters */
-			o1 = ann->activation_output_values[0];
-			o2 = ann->activation_output_values[1];
-			o3 = ann->activation_output_values[2];
-			o4 = ann->activation_output_values[3];
-			o5 = ann->activation_output_values[4];
-			o6 = ann->activation_output_values[5];
+			o1 = ann->activation_values_output[0];
+			o2 = ann->activation_values_output[1];
+			o3 = ann->activation_values_output[2];
+			o4 = ann->activation_values_output[3];
+			o5 = ann->activation_values_output[4];
+			o6 = ann->activation_values_output[5];
 			break;
 		default:
 			break;
@@ -530,7 +530,7 @@ fann_type* fann_run(struct fann *ann, fann_type *input)
 		((layer_it-1)->last_neuron-1)->value = 1;
 		
 		steepness = (layer_it == last_layer-1) ? 
-			activation_output_steepness : activation_hidden_steepness;
+			activation_steepness_output : activation_steepness_hidden;
 #endif
 		
 		activation_function = (layer_it == last_layer-1) ?
@@ -672,6 +672,9 @@ fann_type* fann_run(struct fann *ann, fann_type *input)
 				case FANN_THRESHOLD:
 					neuron_it->value = (fann_type)((neuron_value < 0) ? 0 : 1);
 					break;
+				case FANN_THRESHOLD_SYMMETRIC:
+					neuron_it->value = (fann_type)((neuron_value < 0) ? -1 : 1);
+					break;
 				default:
 					fann_error((struct fann_error *)ann, FANN_E_CANT_USE_ACTIVATION);
 			}
@@ -692,12 +695,12 @@ fann_type* fann_run(struct fann *ann, fann_type *input)
  */
 void fann_destroy(struct fann *ann)
 {
-	fann_safe_free((ann->first_layer+1)->first_neuron->weights);
-	fann_safe_free((ann->first_layer+1)->first_neuron->connected_neurons);
+	fann_safe_free(fann_get_weights(ann));
+	fann_safe_free(fann_get_connections(ann));
 	fann_safe_free(ann->first_layer->first_neuron);
 	fann_safe_free(ann->first_layer);
 	fann_safe_free(ann->output);
-	fann_safe_free(ann->train_deltas);
+	fann_safe_free(ann->train_errors);
 	fann_safe_free(ann->errstr);
 	fann_safe_free(ann);
 }
@@ -831,14 +834,37 @@ struct fann * fann_allocate_structure(float learning_rate, unsigned int num_laye
 		return NULL;
 	}
 
+	ann->errno_f = 0;
+	ann->error_log = NULL;
+	ann->errstr = NULL;
 	ann->learning_rate = learning_rate;
 	ann->total_neurons = 0;
 	ann->total_connections = 0;
 	ann->num_input = 0;
 	ann->num_output = 0;
-	ann->train_deltas = NULL;
+	ann->train_errors = NULL;
+	ann->train_slopes = NULL;
+	ann->prev_steps = NULL;
+	ann->prev_train_slopes = NULL;
+	ann->training_algorithm = FANN_RPROP_TRAIN;
 	ann->num_errors = 0;
+	ann->error_value = 0;
 	ann->forward_connections = 0;
+	ann->use_tanh_error_function = 1;
+
+	/* variables used for cascade correlation (reasonable defaults) */
+	/*ann->change_fraction = 0.01;
+	  ann->stagnation_epochs = 12;*/
+
+	/* Variables for use with with Quickprop training (reasonable defaults) */
+	ann->quickprop_decay = -0.0001;
+	ann->quickprop_mu = 1.75;
+
+	/* Variables for use with with RPROP training (reasonable defaults) */
+	ann->rprop_increase_factor = 1.2;
+	ann->rprop_decrease_factor = 0.5;
+	ann->rprop_delta_min = 0.0;
+	ann->rprop_delta_max = 50.0;
 
 	fann_init_error_data((struct fann_error *)ann);
 
@@ -852,11 +878,11 @@ struct fann * fann_allocate_structure(float learning_rate, unsigned int num_laye
 	ann->activation_function_hidden = FANN_SIGMOID_STEPWISE;
 	ann->activation_function_output = FANN_SIGMOID_STEPWISE;
 #ifdef FIXEDFANN
-	ann->activation_hidden_steepness = ann->multiplier/2;
-	ann->activation_output_steepness = ann->multiplier/2;
+	ann->activation_steepness_hidden = ann->multiplier/2;
+	ann->activation_steepness_output = ann->multiplier/2;
 #else
-	ann->activation_hidden_steepness = 0.5;
-	ann->activation_output_steepness = 0.5;
+	ann->activation_steepness_hidden = 0.5;
+	ann->activation_steepness_output = 0.5;
 #endif
 
 	/* allocate room for the layers */
diff --git a/src/fann_cascade.c b/src/fann_cascade.c
new file mode 100644
index 0000000..c53beae
--- /dev/null
+++ b/src/fann_cascade.c
@@ -0,0 +1,145 @@
+/*
+  Fast Artificial Neural Network Library (fann)
+  Copyright (C) 2003 Steffen Nissen (lukesky at diku.dk)
+  
+  This library is free software; you can redistribute it and/or
+  modify it under the terms of the GNU Lesser General Public
+  License as published by the Free Software Foundation; either
+  version 2.1 of the License, or (at your option) any later version.
+  
+  This library is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  Lesser General Public License for more details.
+  
+  You should have received a copy of the GNU Lesser General Public
+  License along with this library; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+
+#include "fann.h"
+
+void fann_cascadetrain_on_data_callback(struct fann *ann, struct fann_train_data *data, float desired_error, int (*callback)(unsigned int epochs, float error), unsigned int max_out_epochs, unsigned int max_neurons, unsigned int neurons_between_reports);
+
+int fann_train_outputs(struct fann *ann, struct fann_train_data *data, float desired_error, unsigned int max_epochs);
+
+float fann_train_outputs_epoch(struct fann *ann, struct fann_train_data *data);
+
+/* Train directly on the training data.
+ */
+void fann_cascadetrain_on_data_callback(struct fann *ann, struct fann_train_data *data, float desired_error, int (*callback)(unsigned int epochs, float error), unsigned int max_out_epochs, unsigned int max_neurons, unsigned int neurons_between_reports)
+{
+	float error;
+	unsigned int i;
+	unsigned int total_epochs = 0;
+	
+	if(neurons_between_reports && callback == NULL){
+		printf("Max neurons %6d. Desired error: %.6f\n", max_neurons, desired_error);
+	}
+	
+	for(i = 1; i <= max_neurons; i++){
+		/* train */
+		
+		total_epochs += fann_train_outputs(ann, data, desired_error, max_out_epochs);
+
+		error = fann_get_MSE(ann);
+				
+		/* print current error */
+		if(neurons_between_reports &&
+			(i % neurons_between_reports == 0
+				|| i == max_neurons
+				|| i == 1
+				|| error < desired_error)){
+			if (callback == NULL) {
+				printf("Neurons     %6d. Current error: %.6f. Epochs %6d\n", i, error, total_epochs);
+			} else if((*callback)(i, error) == -1){
+				/* you can break the training by returning -1 */
+				break;
+			}
+		}
+		
+		if(error < desired_error){
+			break;
+		}
+
+		/* fann_train_candidate */
+		/* fann_install_candidate */
+	}
+}
+
+int fann_train_outputs(struct fann *ann, struct fann_train_data *data, float desired_error, unsigned int max_epochs)
+{
+	float error, initial_error, error_improvement;
+	float target_improvement = 0.0;
+	float backslide_improvement = 0.0;
+	unsigned int i;
+	unsigned int stagnation = max_epochs;
+
+	fann_clear_train_arrays(ann);
+	
+	/* run an initial epoch to set the initital error */
+	initial_error = fann_train_outputs_epoch(ann, data);
+
+	if(initial_error < desired_error){
+		return 1;
+	}
+	
+	for(i = 1; i < max_epochs; i++){
+		error = fann_train_outputs_epoch(ann, data);
+
+		if(error < desired_error){
+			return i+1;
+		}
+		
+		/* Improvement since start of train */
+		error_improvement = initial_error - error;
+		
+		/* After any significant change, set a new goal and
+		   allow a new quota of epochs to reach it */  
+		if ((error_improvement > target_improvement) ||
+			(error_improvement < backslide_improvement))
+		{
+			/*printf("error_improvement=%f, target_improvement=%f, backslide_improvement=%f, stagnation=%d\n", error_improvement, target_improvement, backslide_improvement, stagnation);*/
+
+			target_improvement = error_improvement * (ann->change_fraction + 1);
+			backslide_improvement = error_improvement * (ann->change_fraction - 1);
+			stagnation = i + ann->stagnation_epochs;
+		}
+		
+		/* No improvement in allotted period, so quit */
+		if (i >= stagnation)
+		{
+			return i+1;
+		}
+	}
+
+	return max_epochs;
+}
+
+float fann_train_outputs_epoch(struct fann *ann, struct fann_train_data *data)
+{
+	unsigned int i;
+	fann_reset_MSE(ann);
+	
+	for(i = 0; i < data->num_data; i++){
+		/* TODO this should be real quickprop training and only on the output layer */
+		/*fann_train(ann, data->input[i], data->output[i]);*/
+
+		fann_run(ann, data->input[i]);
+		fann_compute_MSE(ann, data->output[i]);
+		fann_backpropagate_MSE(ann);
+		/*fann_update_weights(ann);*/
+		fann_update_slopes_batch(ann);
+	}
+	fann_update_weights_quickprop(ann, data->num_data);
+	/*fann_update_weights_batch(ann, data->num_data);*/
+
+	/*fann_update_output_weights(ann);*/
+
+	return fann_get_MSE(ann);
+}
+
+void fann_update_output_weights(struct fann *ann)
+{
+	printf("fann_update_output_weights not implemented\n");
+}
diff --git a/src/fann_io.c b/src/fann_io.c
index eec5053..748f0d8 100644
--- a/src/fann_io.c
+++ b/src/fann_io.c
@@ -149,18 +149,18 @@ int fann_save_internal_fd(struct fann *ann, FILE *conf, const char *configuratio
 		/* save the decimal_point on a seperate line */
 		fprintf(conf, "%u\n", decimal_point);
 		
-		/* save the number layers "num_layers learning_rate connection_rate forward_connections activation_function_hidden activation_function_output activation_hidden_steepness activation_output_steepness" */	
-		fprintf(conf, "%u %f %f %u %u %u %d %d\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->forward_connections, ann->activation_function_hidden, ann->activation_function_output, (int)(ann->activation_hidden_steepness * fixed_multiplier), (int)(ann->activation_output_steepness * fixed_multiplier));
+		/* save the number layers "num_layers learning_rate connection_rate forward_connections activation_function_hidden activation_function_output activation_steepness_hidden activation_steepness_output" */	
+		fprintf(conf, "%u %f %f %u %u %u %d %d\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->forward_connections, ann->activation_function_hidden, ann->activation_function_output, (int)(ann->activation_steepness_hidden * fixed_multiplier), (int)(ann->activation_steepness_output * fixed_multiplier));
 	}else{
-		/* save the number layers "num_layers learning_rate connection_rate forward_connections activation_function_hidden activation_function_output activation_hidden_steepness activation_output_steepness" */	
-		fprintf(conf, "%u %f %f %u %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->forward_connections, ann->activation_function_hidden, ann->activation_function_output, ann->activation_hidden_steepness, ann->activation_output_steepness);
+		/* save the number layers "num_layers learning_rate connection_rate forward_connections activation_function_hidden activation_function_output activation_steepness_hidden activation_steepness_output" */	
+		fprintf(conf, "%u %f %f %u %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->forward_connections, ann->activation_function_hidden, ann->activation_function_output, ann->activation_steepness_hidden, ann->activation_steepness_output);
 	}
 #else
 	/* save the decimal_point on a seperate line */
 	fprintf(conf, "%u\n", ann->decimal_point);
 	
-	/* save the number layers "num_layers learning_rate connection_rate forward_connections activation_function_hidden activation_function_output activation_hidden_steepness activation_output_steepness" */	
-	fprintf(conf, "%u %f %f %u %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->forward_connections, ann->activation_function_hidden, ann->activation_function_output, ann->activation_hidden_steepness, ann->activation_output_steepness);	
+	/* save the number layers "num_layers learning_rate connection_rate forward_connections activation_function_hidden activation_function_output activation_steepness_hidden activation_steepness_output" */	
+	fprintf(conf, "%u %f %f %u %u %u "FANNPRINTF" "FANNPRINTF"\n", ann->last_layer - ann->first_layer, ann->learning_rate, ann->connection_rate, ann->forward_connections, ann->activation_function_hidden, ann->activation_function_output, ann->activation_steepness_hidden, ann->activation_steepness_output);	
 #endif
 
 	for(layer_it = ann->first_layer; layer_it != ann->last_layer; layer_it++){
@@ -282,7 +282,7 @@ struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
 #ifdef FIXEDFANN
 	unsigned int decimal_point, multiplier;
 #endif
-	fann_type activation_hidden_steepness, activation_output_steepness;
+	fann_type activation_steepness_hidden, activation_steepness_output;
 	float learning_rate, connection_rate;
 	struct fann_neuron *first_neuron, *neuron_it, *last_neuron, **connected_neurons;
 	fann_type *weights;
@@ -316,7 +316,7 @@ struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
 	multiplier = 1 << decimal_point;
 #endif
 	
-	if(fscanf(conf, "%u %f %f %u %u %u "FANNSCANF" "FANNSCANF"\n", &num_layers, &learning_rate, &connection_rate, &forward_connections, &activation_function_hidden, &activation_function_output, &activation_hidden_steepness, &activation_output_steepness) != 8){
+	if(fscanf(conf, "%u %f %f %u %u %u "FANNSCANF" "FANNSCANF"\n", &num_layers, &learning_rate, &connection_rate, &forward_connections, &activation_function_hidden, &activation_function_output, &activation_steepness_hidden, &activation_steepness_output) != 8){
 		fann_error(NULL, FANN_E_CANT_READ_CONFIG, configuration_file);
 		return NULL;
 	}
@@ -333,8 +333,8 @@ struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file)
 	ann->multiplier = multiplier;
 #endif
 
-	ann->activation_hidden_steepness = activation_hidden_steepness;
-	ann->activation_output_steepness = activation_output_steepness;
+	ann->activation_steepness_hidden = activation_steepness_hidden;
+	ann->activation_steepness_output = activation_steepness_output;
 	ann->activation_function_hidden = activation_function_hidden;
 	ann->activation_function_output = activation_function_output;
 	fann_update_stepwise_hidden(ann);
diff --git a/src/fann_options.c b/src/fann_options.c
index 5201d61..5334504 100644
--- a/src/fann_options.c
+++ b/src/fann_options.c
@@ -26,6 +26,16 @@
 #include "fann.h"
 #include "fann_errno.h"
 
+unsigned int fann_get_training_algorithm(struct fann *ann)
+{
+	return ann->training_algorithm;
+}
+
+void fann_set_training_algorithm(struct fann *ann, unsigned int training_algorithm)
+{
+	ann->training_algorithm = training_algorithm;
+}
+
 void fann_set_learning_rate(struct fann *ann, float learning_rate)
 {
 	ann->learning_rate = learning_rate;
@@ -43,18 +53,28 @@ void fann_set_activation_function_output(struct fann *ann, unsigned int activati
 	fann_update_stepwise_output(ann);
 }
 
-void fann_set_activation_hidden_steepness(struct fann *ann, fann_type steepness)
+void fann_set_activation_steepness_hidden(struct fann *ann, fann_type steepness)
 {
-	ann->activation_hidden_steepness = steepness;
+	ann->activation_steepness_hidden = steepness;
 	fann_update_stepwise_hidden(ann);
 }
 
-void fann_set_activation_output_steepness(struct fann *ann, fann_type steepness)
+void fann_set_activation_steepness_output(struct fann *ann, fann_type steepness)
 {
-	ann->activation_output_steepness = steepness;
+	ann->activation_steepness_output = steepness;
 	fann_update_stepwise_output(ann);
 }
 
+void fann_set_activation_hidden_steepness(struct fann *ann, fann_type steepness)
+{
+	fann_set_activation_steepness_hidden(ann, steepness);
+}
+
+void fann_set_activation_output_steepness(struct fann *ann, fann_type steepness)
+{
+	fann_set_activation_steepness_output(ann, steepness);
+}
+
 float fann_get_learning_rate(struct fann *ann)
 {
 	return ann->learning_rate;
@@ -82,12 +102,22 @@ unsigned int fann_get_activation_function_output(struct fann *ann)
 
 fann_type fann_get_activation_hidden_steepness(struct fann *ann)
 {
-	return ann->activation_hidden_steepness;
+	return ann->activation_steepness_hidden;
 }
 
 fann_type fann_get_activation_output_steepness(struct fann *ann)
 {
-	return ann->activation_output_steepness;
+	return ann->activation_steepness_output;
+}
+
+fann_type fann_get_activation_steepness_hidden(struct fann *ann)
+{
+	return ann->activation_steepness_hidden;
+}
+
+fann_type fann_get_activation_steepness_output(struct fann *ann)
+{
+	return ann->activation_steepness_output;
 }
 
 unsigned int fann_get_total_neurons(struct fann *ann)
@@ -101,6 +131,107 @@ unsigned int fann_get_total_connections(struct fann *ann)
 	return ann->total_connections;
 }
 
+fann_type* fann_get_weights(struct fann *ann)
+{
+	return (ann->first_layer+1)->first_neuron->weights;
+}
+
+struct fann_neuron** fann_get_connections(struct fann *ann)
+{
+	return (ann->first_layer+1)->first_neuron->connected_neurons;
+}
+
+
+/* When using this, training is usually faster. (default ).
+   Makes the error used for calculating the slopes
+   higher when the difference is higher.
+ */
+void fann_set_use_tanh_error_function(struct fann *ann, unsigned int use_tanh_error_function)
+{
+	ann->use_tanh_error_function = use_tanh_error_function;
+}
+
+/* Decay is used to make the weights do not go so high (default -0.0001). */
+void fann_set_quickprop_decay(struct fann *ann, float quickprop_decay)
+{
+	ann->quickprop_decay = quickprop_decay;
+}
+	
+/* Mu is a factor used to increase and decrease the stepsize (default 1.75). */
+void fann_set_quickprop_mu(struct fann *ann, float quickprop_mu)
+{
+	ann->quickprop_mu = quickprop_mu;
+}
+
+/* Tells how much the stepsize should increase during learning (default 1.2). */
+void fann_set_rprop_increase_factor(struct fann *ann, float rprop_increase_factor)
+{
+	ann->rprop_increase_factor = rprop_increase_factor;
+}
+
+/* Tells how much the stepsize should decrease during learning (default 0.5). */
+void fann_set_rprop_decrease_factor(struct fann *ann, float rprop_decrease_factor)
+{
+	ann->rprop_decrease_factor = rprop_decrease_factor;
+}
+
+/* The minimum stepsize (default 0.0). */
+void fann_set_rprop_delta_min(struct fann *ann, float rprop_delta_min)
+{
+	ann->rprop_delta_min = rprop_delta_min;
+}
+
+/* The maximum stepsize (default 50.0). */
+void fann_set_rprop_delta_max(struct fann *ann, float rprop_delta_max)
+{
+	ann->rprop_delta_max = rprop_delta_max;
+}
+
+/* When using this, training is usually faster. (default ).
+   Makes the error used for calculating the slopes
+   higher when the difference is higher.
+ */
+unsigned int fann_get_use_tanh_error_function(struct fann *ann)
+{
+	return ann->use_tanh_error_function;
+}
+
+/* Decay is used to make the weights do not go so high (default -0.0001). */
+float fann_get_quickprop_decay(struct fann *ann)
+{
+	return ann->quickprop_decay;
+}
+	
+/* Mu is a factor used to increase and decrease the stepsize (default 1.75). */
+float fann_get_quickprop_mu(struct fann *ann)
+{
+	return ann->quickprop_mu;
+}
+
+/* Tells how much the stepsize should increase during learning (default 1.2). */
+float fann_get_rprop_increase_factor(struct fann *ann)
+{
+	return ann->rprop_increase_factor;
+}
+
+/* Tells how much the stepsize should decrease during learning (default 0.5). */
+float fann_get_rprop_decrease_factor(struct fann *ann)
+{
+	return ann->rprop_decrease_factor;
+}
+
+/* The minimum stepsize (default 0.0). */
+float fann_get_rprop_delta_min(struct fann *ann)
+{
+	return ann->rprop_delta_min;
+}
+
+/* The maximum stepsize (default 50.0). */
+float fann_get_rprop_delta_max(struct fann *ann)
+{
+	return ann->rprop_delta_max;
+}
+
 #ifdef FIXEDFANN
 /* returns the position of the fix point.
  */
@@ -131,21 +262,21 @@ void fann_update_stepwise_hidden(struct fann *ann)
 	switch(ann->activation_function_hidden){
 		case FANN_SIGMOID:
 		case FANN_SIGMOID_STEPWISE:
-			ann->activation_hidden_results[0] = (fann_type)0.005;
-			ann->activation_hidden_results[1] = (fann_type)0.05;
-			ann->activation_hidden_results[2] = (fann_type)0.25;
-			ann->activation_hidden_results[3] = (fann_type)0.75;
-			ann->activation_hidden_results[4] = (fann_type)0.95;
-			ann->activation_hidden_results[5] = (fann_type)0.995;	
+			ann->activation_results_hidden[0] = (fann_type)0.005;
+			ann->activation_results_hidden[1] = (fann_type)0.05;
+			ann->activation_results_hidden[2] = (fann_type)0.25;
+			ann->activation_results_hidden[3] = (fann_type)0.75;
+			ann->activation_results_hidden[4] = (fann_type)0.95;
+			ann->activation_results_hidden[5] = (fann_type)0.995;	
 			break;
 		case FANN_SIGMOID_SYMMETRIC:
 		case FANN_SIGMOID_SYMMETRIC_STEPWISE:
-			ann->activation_hidden_results[0] = (fann_type)-0.99;
-			ann->activation_hidden_results[1] = (fann_type)-0.9;
-			ann->activation_hidden_results[2] = (fann_type)-0.5;
-			ann->activation_hidden_results[3] = (fann_type)0.5;
-			ann->activation_hidden_results[4] = (fann_type)0.9;
-			ann->activation_hidden_results[5] = (fann_type)0.99;
+			ann->activation_results_hidden[0] = (fann_type)-0.99;
+			ann->activation_results_hidden[1] = (fann_type)-0.9;
+			ann->activation_results_hidden[2] = (fann_type)-0.5;
+			ann->activation_results_hidden[3] = (fann_type)0.5;
+			ann->activation_results_hidden[4] = (fann_type)0.9;
+			ann->activation_results_hidden[5] = (fann_type)0.99;
 			break;
 		default:
 			/* the actiavation functions which do not have a stepwise function
@@ -161,21 +292,21 @@ void fann_update_stepwise_hidden(struct fann *ann)
 	switch(ann->activation_function_hidden){
 		case FANN_SIGMOID:
 		case FANN_SIGMOID_STEPWISE:
-			ann->activation_hidden_results[0] = (fann_type)(ann->multiplier/200.0+0.5);
-			ann->activation_hidden_results[1] = (fann_type)(ann->multiplier/20.0+0.5);
-			ann->activation_hidden_results[2] = (fann_type)(ann->multiplier/4.0+0.5);
-			ann->activation_hidden_results[3] = ann->multiplier - (fann_type)(ann->multiplier/4.0+0.5);
-			ann->activation_hidden_results[4] = ann->multiplier - (fann_type)(ann->multiplier/20.0+0.5);
-			ann->activation_hidden_results[5] = ann->multiplier - (fann_type)(ann->multiplier/200.0+0.5);
+			ann->activation_results_hidden[0] = (fann_type)(ann->multiplier/200.0+0.5);
+			ann->activation_results_hidden[1] = (fann_type)(ann->multiplier/20.0+0.5);
+			ann->activation_results_hidden[2] = (fann_type)(ann->multiplier/4.0+0.5);
+			ann->activation_results_hidden[3] = ann->multiplier - (fann_type)(ann->multiplier/4.0+0.5);
+			ann->activation_results_hidden[4] = ann->multiplier - (fann_type)(ann->multiplier/20.0+0.5);
+			ann->activation_results_hidden[5] = ann->multiplier - (fann_type)(ann->multiplier/200.0+0.5);
 			break;
 		case FANN_SIGMOID_SYMMETRIC:
 		case FANN_SIGMOID_SYMMETRIC_STEPWISE:
-			ann->activation_hidden_results[0] = (fann_type)((ann->multiplier/100.0) - ann->multiplier + 0.5);
-			ann->activation_hidden_results[1] = (fann_type)((ann->multiplier/10.0) - ann->multiplier + 0.5);
-			ann->activation_hidden_results[2] = (fann_type)((ann->multiplier/2.0) - ann->multiplier + 0.5);
-			ann->activation_hidden_results[3] = ann->multiplier - (fann_type)(ann->multiplier/2.0+0.5);
-			ann->activation_hidden_results[4] = ann->multiplier - (fann_type)(ann->multiplier/10.0+0.5);
-			ann->activation_hidden_results[5] = ann->multiplier - (fann_type)(ann->multiplier/100.0+0.5);
+			ann->activation_results_hidden[0] = (fann_type)((ann->multiplier/100.0) - ann->multiplier + 0.5);
+			ann->activation_results_hidden[1] = (fann_type)((ann->multiplier/10.0) - ann->multiplier + 0.5);
+			ann->activation_results_hidden[2] = (fann_type)((ann->multiplier/2.0) - ann->multiplier + 0.5);
+			ann->activation_results_hidden[3] = ann->multiplier - (fann_type)(ann->multiplier/2.0+0.5);
+			ann->activation_results_hidden[4] = ann->multiplier - (fann_type)(ann->multiplier/10.0+0.5);
+			ann->activation_results_hidden[5] = ann->multiplier - (fann_type)(ann->multiplier/100.0+0.5);
 			break;
 		default:
 			/* the actiavation functions which do not have a stepwise function
@@ -190,22 +321,22 @@ void fann_update_stepwise_hidden(struct fann *ann)
 			case FANN_SIGMOID:
 				break;
 			case FANN_SIGMOID_STEPWISE:
-				ann->activation_hidden_values[i] = (fann_type)((log(1.0/ann->activation_hidden_results[i] -1.0) * 1.0/-2.0) * 1.0/ann->activation_hidden_steepness);
+				ann->activation_values_hidden[i] = (fann_type)((log(1.0/ann->activation_results_hidden[i] -1.0) * 1.0/-2.0) * 1.0/ann->activation_steepness_hidden);
 				break;
 			case FANN_SIGMOID_SYMMETRIC:
 			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
-				ann->activation_hidden_values[i] = (fann_type)((log((1.0-ann->activation_hidden_results[i]) / (ann->activation_hidden_results[i]+1.0)) * 1.0/-2.0) * 1.0/ann->activation_hidden_steepness);
+				ann->activation_values_hidden[i] = (fann_type)((log((1.0-ann->activation_results_hidden[i]) / (ann->activation_results_hidden[i]+1.0)) * 1.0/-2.0) * 1.0/ann->activation_steepness_hidden);
 				break;
 		}
 #else
 		switch(ann->activation_function_hidden){
 			case FANN_SIGMOID:
 			case FANN_SIGMOID_STEPWISE:
-				ann->activation_hidden_values[i] = (fann_type)((((log(ann->multiplier/(float)ann->activation_hidden_results[i] -1)*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_hidden_steepness);
+				ann->activation_values_hidden[i] = (fann_type)((((log(ann->multiplier/(float)ann->activation_results_hidden[i] -1)*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_steepness_hidden);
 				break;
 			case FANN_SIGMOID_SYMMETRIC:
 			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
-				ann->activation_hidden_values[i] = (fann_type)((((log((ann->multiplier - (float)ann->activation_hidden_results[i])/((float)ann->activation_hidden_results[i] + ann->multiplier))*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_hidden_steepness);
+				ann->activation_values_hidden[i] = (fann_type)((((log((ann->multiplier - (float)ann->activation_results_hidden[i])/((float)ann->activation_results_hidden[i] + ann->multiplier))*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_steepness_hidden);
 				break;
 		}
 #endif
@@ -225,21 +356,21 @@ void fann_update_stepwise_output(struct fann *ann)
 	switch(ann->activation_function_output){
 		case FANN_SIGMOID:
 		case FANN_SIGMOID_STEPWISE:
-			ann->activation_output_results[0] = (fann_type)0.005;
-			ann->activation_output_results[1] = (fann_type)0.05;
-			ann->activation_output_results[2] = (fann_type)0.25;
-			ann->activation_output_results[3] = (fann_type)0.75;
-			ann->activation_output_results[4] = (fann_type)0.95;
-			ann->activation_output_results[5] = (fann_type)0.995;	
+			ann->activation_results_output[0] = (fann_type)0.005;
+			ann->activation_results_output[1] = (fann_type)0.05;
+			ann->activation_results_output[2] = (fann_type)0.25;
+			ann->activation_results_output[3] = (fann_type)0.75;
+			ann->activation_results_output[4] = (fann_type)0.95;
+			ann->activation_results_output[5] = (fann_type)0.995;	
 			break;
 		case FANN_SIGMOID_SYMMETRIC:
 		case FANN_SIGMOID_SYMMETRIC_STEPWISE:
-			ann->activation_output_results[0] = (fann_type)-0.99;
-			ann->activation_output_results[1] = (fann_type)-0.9;
-			ann->activation_output_results[2] = (fann_type)-0.5;
-			ann->activation_output_results[3] = (fann_type)0.5;
-			ann->activation_output_results[4] = (fann_type)0.9;
-			ann->activation_output_results[5] = (fann_type)0.99;
+			ann->activation_results_output[0] = (fann_type)-0.99;
+			ann->activation_results_output[1] = (fann_type)-0.9;
+			ann->activation_results_output[2] = (fann_type)-0.5;
+			ann->activation_results_output[3] = (fann_type)0.5;
+			ann->activation_results_output[4] = (fann_type)0.9;
+			ann->activation_results_output[5] = (fann_type)0.99;
 			break;
 		default:
 			/* the actiavation functions which do not have a stepwise function
@@ -255,21 +386,21 @@ void fann_update_stepwise_output(struct fann *ann)
 	switch(ann->activation_function_output){
 		case FANN_SIGMOID:
 		case FANN_SIGMOID_STEPWISE:
-			ann->activation_output_results[0] = (fann_type)(ann->multiplier/200.0+0.5);
-			ann->activation_output_results[1] = (fann_type)(ann->multiplier/20.0+0.5);
-			ann->activation_output_results[2] = (fann_type)(ann->multiplier/4.0+0.5);
-			ann->activation_output_results[3] = ann->multiplier - (fann_type)(ann->multiplier/4.0+0.5);
-			ann->activation_output_results[4] = ann->multiplier - (fann_type)(ann->multiplier/20.0+0.5);
-			ann->activation_output_results[5] = ann->multiplier - (fann_type)(ann->multiplier/200.0+0.5);
+			ann->activation_results_output[0] = (fann_type)(ann->multiplier/200.0+0.5);
+			ann->activation_results_output[1] = (fann_type)(ann->multiplier/20.0+0.5);
+			ann->activation_results_output[2] = (fann_type)(ann->multiplier/4.0+0.5);
+			ann->activation_results_output[3] = ann->multiplier - (fann_type)(ann->multiplier/4.0+0.5);
+			ann->activation_results_output[4] = ann->multiplier - (fann_type)(ann->multiplier/20.0+0.5);
+			ann->activation_results_output[5] = ann->multiplier - (fann_type)(ann->multiplier/200.0+0.5);
 			break;
 		case FANN_SIGMOID_SYMMETRIC:
 		case FANN_SIGMOID_SYMMETRIC_STEPWISE:
-			ann->activation_output_results[0] = (fann_type)((ann->multiplier/100.0) - ann->multiplier + 0.5);
-			ann->activation_output_results[1] = (fann_type)((ann->multiplier/10.0) - ann->multiplier + 0.5);
-			ann->activation_output_results[2] = (fann_type)((ann->multiplier/2.0) - ann->multiplier + 0.5);
-			ann->activation_output_results[3] = ann->multiplier - (fann_type)(ann->multiplier/2.0+0.5);
-			ann->activation_output_results[4] = ann->multiplier - (fann_type)(ann->multiplier/10.0+0.5);
-			ann->activation_output_results[5] = ann->multiplier - (fann_type)(ann->multiplier/100.0+0.5);
+			ann->activation_results_output[0] = (fann_type)((ann->multiplier/100.0) - ann->multiplier + 0.5);
+			ann->activation_results_output[1] = (fann_type)((ann->multiplier/10.0) - ann->multiplier + 0.5);
+			ann->activation_results_output[2] = (fann_type)((ann->multiplier/2.0) - ann->multiplier + 0.5);
+			ann->activation_results_output[3] = ann->multiplier - (fann_type)(ann->multiplier/2.0+0.5);
+			ann->activation_results_output[4] = ann->multiplier - (fann_type)(ann->multiplier/10.0+0.5);
+			ann->activation_results_output[5] = ann->multiplier - (fann_type)(ann->multiplier/100.0+0.5);
 			break;
 		default:
 			/* the actiavation functions which do not have a stepwise function
@@ -284,22 +415,22 @@ void fann_update_stepwise_output(struct fann *ann)
 			case FANN_SIGMOID:
 				break;
 			case FANN_SIGMOID_STEPWISE:
-				ann->activation_output_values[i] = (fann_type)((log(1.0/ann->activation_output_results[i] -1.0) * 1.0/-2.0) * 1.0/ann->activation_output_steepness);
+				ann->activation_values_output[i] = (fann_type)((log(1.0/ann->activation_results_output[i] -1.0) * 1.0/-2.0) * 1.0/ann->activation_steepness_output);
 				break;
 			case FANN_SIGMOID_SYMMETRIC:
 			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
-				ann->activation_output_values[i] = (fann_type)((log((1.0-ann->activation_output_results[i]) / (ann->activation_output_results[i]+1.0)) * 1.0/-2.0) * 1.0/ann->activation_output_steepness);
+				ann->activation_values_output[i] = (fann_type)((log((1.0-ann->activation_results_output[i]) / (ann->activation_results_output[i]+1.0)) * 1.0/-2.0) * 1.0/ann->activation_steepness_output);
 				break;
 		}
 #else
 		switch(ann->activation_function_output){
 			case FANN_SIGMOID:
 			case FANN_SIGMOID_STEPWISE:
-				ann->activation_output_values[i] = (fann_type)((((log(ann->multiplier/(float)ann->activation_output_results[i] -1)*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_output_steepness);
+				ann->activation_values_output[i] = (fann_type)((((log(ann->multiplier/(float)ann->activation_results_output[i] -1)*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_steepness_output);
 				break;
 			case FANN_SIGMOID_SYMMETRIC:
 			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
-				ann->activation_output_values[i] = (fann_type)((((log((ann->multiplier - (float)ann->activation_output_results[i])/((float)ann->activation_output_results[i] + ann->multiplier))*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_output_steepness);
+				ann->activation_values_output[i] = (fann_type)((((log((ann->multiplier - (float)ann->activation_results_output[i])/((float)ann->activation_results_output[i] + ann->multiplier))*(float)ann->multiplier) / -2.0)*(float)ann->multiplier) / ann->activation_steepness_output);
 				break;
 		}
 #endif
diff --git a/src/fann_train.c b/src/fann_train.c
index 32569c7..cc42e12 100644
--- a/src/fann_train.c
+++ b/src/fann_train.c
@@ -26,109 +26,232 @@
 #include "fann.h"
 #include "fann_errno.h"
 
+/*#define DEBUGTRAIN*/
+
+/* INTERNAL FUNCTION
+  Calculates the derived of a value, given an activation function
+   and a steepness
+*/
+static fann_type fann_activation_derived(unsigned int activation_function,
+	fann_type steepness, fann_type value)
+{
+	switch(activation_function){
+		case FANN_LINEAR:
+			return (fann_type)fann_linear_derive(steepness, value);
+		case FANN_SIGMOID:
+		case FANN_SIGMOID_STEPWISE:
+			value = fann_clip(value, 0.01f, 0.99f);
+			return (fann_type)fann_sigmoid_derive(steepness, value);
+		case FANN_SIGMOID_SYMMETRIC:
+		case FANN_SIGMOID_SYMMETRIC_STEPWISE:
+			value = fann_clip(value, -0.98f, 0.98f);
+			return (fann_type)fann_sigmoid_symmetric_derive(steepness, value);
+		default:
+			return 0;
+	}
+}
+
 #ifndef FIXEDFANN
 /* Trains the network with the backpropagation algorithm.
  */
 void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
 {
-	struct fann_neuron *neuron_it, *last_neuron, *neurons;
-	fann_type neuron_value, *delta_it, *delta_begin, tmp_delta;
-	struct fann_layer *layer_it;
-	unsigned int i, shift_prev_layer;
-	
-	/* store some variabels local for fast access */
-	const float learning_rate = ann->learning_rate;
-	const fann_type activation_output_steepness = ann->activation_output_steepness;
-	const fann_type activation_hidden_steepness = ann->activation_hidden_steepness;
-	const struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
+	fann_run(ann, input);
+
+	fann_compute_MSE(ann, desired_output);
+
+	fann_backpropagate_MSE(ann);
+
+	fann_update_weights(ann);
+}
+#endif
+
+/* Tests the network.
+ */
+fann_type *fann_test(struct fann *ann, fann_type *input, fann_type *desired_output)
+{
+	fann_type neuron_value;
+	fann_type *output_begin = fann_run(ann, input);
+	fann_type *output_it;
+	const fann_type *output_end = output_begin + ann->num_output;
+	fann_type neuron_diff;
+
+	/* calculate the error */
+	for(output_it = output_begin;
+		output_it != output_end; output_it++){
+		neuron_value = *output_it;
+
+		neuron_diff = (*desired_output - neuron_value);
+		
+		if(ann->activation_function_output == FANN_SIGMOID_SYMMETRIC ||
+			ann->activation_function_output == FANN_SIGMOID_SYMMETRIC_STEPWISE){
+			neuron_diff /= (fann_type)2;
+		}
+		
+#ifdef FIXEDFANN
+		ann->error_value += (neuron_diff/(float)ann->multiplier) * (neuron_diff/(float)ann->multiplier);
+#else
+		ann->error_value += neuron_diff * neuron_diff;
+#endif
+		
+		desired_output++;
+	}
+	ann->num_errors++;
 	
-	const struct fann_neuron *last_layer_begin = (ann->last_layer-1)->first_neuron;
+	return output_begin;
+}
+
+/* get the mean square error.
+   (obsolete will be removed at some point, use fann_get_MSE)
+ */
+float fann_get_error(struct fann *ann)
+{
+	return fann_get_MSE(ann);
+}
+
+/* get the mean square error.
+ */
+float fann_get_MSE(struct fann *ann)
+{
+	if(ann->num_errors){
+		return ann->error_value/(float)ann->num_errors;
+	}else{
+		return 0;
+	}
+}
+
+/* reset the mean square error.
+   (obsolete will be removed at some point, use fann_reset_MSE)
+ */
+void fann_reset_error(struct fann *ann)
+{
+	fann_reset_MSE(ann);
+}
+
+/* reset the mean square error.
+ */
+void fann_reset_MSE(struct fann *ann)
+{
+	ann->num_errors = 0;
+	ann->error_value = 0;
+}
+
+/* INTERNAL FUNCTION
+    compute the error at the network output
+	(usually, after forward propagation of a certain input vector, fann_run)
+	the error is a sum of squares for all the output units
+	also increments a counter because MSE is an average of such errors
+
+	After this train_errors in the output layer will be set to:
+	neuron_value_derived * (desired_output - neuron_value)
+ */
+void fann_compute_MSE(struct fann *ann, fann_type *desired_output)
+{
+	fann_type neuron_value, neuron_diff, *error_it = 0, *error_begin = 0;
+	struct fann_neuron *last_layer_begin = (ann->last_layer-1)->first_neuron;
 	const struct fann_neuron *last_layer_end = last_layer_begin + ann->num_output;
-	struct fann_layer *first_layer = ann->first_layer;
-	struct fann_layer *last_layer = ann->last_layer;
-	
-	fann_run(ann, input);
-	/* if no room allocated for the delta variabels, allocate it now */
-	if(ann->train_deltas == NULL){
-		ann->train_deltas = (fann_type *)calloc(ann->total_neurons, sizeof(fann_type));
-		if(ann->train_deltas == NULL){
+	const struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
+
+	/* if no room allocated for the error variabels, allocate it now */
+	if(ann->train_errors == NULL){
+		ann->train_errors = (fann_type *)calloc(ann->total_neurons, sizeof(fann_type));
+		if(ann->train_errors == NULL){
 			fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
 			return;
 		}
 	}
-	delta_begin = ann->train_deltas;
 	
-	/* clear the delta variabels */
-	memset(delta_begin, 0, (ann->total_neurons) * sizeof(fann_type));
+	/* clear the error variabels */
+	memset(ann->train_errors, 0, (ann->total_neurons) * sizeof(fann_type));
+	error_begin = ann->train_errors;
 	
 #ifdef DEBUGTRAIN
-	printf("calculate deltas\n");
+	printf("\ncalculate errors\n");
 #endif
 	/* calculate the error and place it in the output layer */
-	delta_it = delta_begin + (last_layer_begin - first_neuron);
+	error_it = error_begin + (last_layer_begin - first_neuron);
 
 	for(; last_layer_begin != last_layer_end; last_layer_begin++){
 		neuron_value = last_layer_begin->value;
-		switch(ann->activation_function_output){
-			case FANN_LINEAR:
-				*delta_it = (fann_type)fann_linear_derive(activation_output_steepness, neuron_value) * (*desired_output - neuron_value);
-				break;
-			case FANN_SIGMOID:
-			case FANN_SIGMOID_STEPWISE:
-				*delta_it = (fann_type)fann_sigmoid_derive(activation_output_steepness, neuron_value) * (*desired_output - neuron_value);
-				break;
-			case FANN_SIGMOID_SYMMETRIC:
-			case FANN_SIGMOID_SYMMETRIC_STEPWISE:
-				*delta_it = (fann_type)fann_sigmoid_symmetric_derive(activation_output_steepness, neuron_value) * (*desired_output - neuron_value);
-				break;
-			default:
-				fann_error((struct fann_error *)ann, FANN_E_CANT_TRAIN_ACTIVATION);
-				return;
+		neuron_diff = *desired_output - neuron_value;
+
+		if(ann->activation_function_output == FANN_SIGMOID_SYMMETRIC ||
+			ann->activation_function_output == FANN_SIGMOID_SYMMETRIC_STEPWISE){
+			neuron_diff /= 2.0;
 		}
 		
-		ann->error_value += (*desired_output - neuron_value) * (*desired_output - neuron_value);
+		ann->error_value += neuron_diff * neuron_diff;
+
+		if(ann->use_tanh_error_function){
+			if ( neuron_diff < -.9999999 )
+				neuron_diff = -17.0;
+			else if ( neuron_diff > .9999999 )
+				neuron_diff = 17.0;
+			else
+				neuron_diff =  log ( (1.0+neuron_diff) / (1.0-neuron_diff) );
+		}
+	
+		*error_it = fann_activation_derived(ann->activation_function_output,
+			ann->activation_steepness_output, neuron_value) * neuron_diff;
+
 		
-#ifdef DEBUGTRAIN
-		printf("delta1[%d] = "FANNPRINTF"\n", (delta_it - delta_begin), *delta_it);
-#endif
 		desired_output++;
-		delta_it++;
+		error_it++;
 	}
 	ann->num_errors++;
+}
+
+/* INTERNAL FUNCTION
+   Propagate the error backwards from the output layer.
+
+   After this the train_errors in the hidden layers will be:
+   neuron_value_derived * sum(outgoing_weights * connected_neuron)
+*/
+void fann_backpropagate_MSE(struct fann *ann)
+{
+	fann_type neuron_value, tmp_error;
+	unsigned int i;
+	struct fann_layer *layer_it;
+	struct fann_neuron *neuron_it, *last_neuron;
 	
-	
-	/* go through all the layers, from last to first. And propagate the error backwards */
-	for(layer_it = last_layer-1; layer_it != first_layer; --layer_it){
+	fann_type *error_begin = ann->train_errors;
+	fann_type *error_prev_layer;
+	const fann_type activation_steepness_hidden = ann->activation_steepness_hidden;
+	const struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
+	const struct fann_layer *second_layer = ann->first_layer + 1;
+	struct fann_layer *last_layer = ann->last_layer;
+
+	/* go through all the layers, from last to first.
+	   And propagate the error backwards */
+	for(layer_it = last_layer-1; layer_it > second_layer; --layer_it){
 		last_neuron = layer_it->last_neuron;
-		
+
 		/* for each connection in this layer, propagate the error backwards*/
 		if(ann->connection_rate >= 1 && !ann->forward_connections){
 			/* optimization for fully connected networks */
 			/* but not forward connected networks */
-			shift_prev_layer = (layer_it-1)->first_neuron - first_neuron;
+			error_prev_layer = error_begin + ((layer_it-1)->first_neuron - first_neuron);
 			for(neuron_it = layer_it->first_neuron;
 				neuron_it != last_neuron; neuron_it++){
-				tmp_delta = *(delta_begin + (neuron_it - first_neuron));
+				
+				tmp_error = error_begin[neuron_it - first_neuron];
 				for(i = neuron_it->num_connections ; i-- ; ){
-					*(delta_begin + i + shift_prev_layer) += tmp_delta * neuron_it->weights[i];
-#ifdef DEBUGTRAIN
-					printf("delta2[%d] = "FANNPRINTF" += ("FANNPRINTF" * "FANNPRINTF")\n", (i + shift_prev_layer), *(delta_begin + i + shift_prev_layer), tmp_delta, neuron_it->weights[i]);
-#endif
+					error_prev_layer[i] += tmp_error * neuron_it->weights[i];
 				}
 			}
 		}else{
 			for(neuron_it = layer_it->first_neuron;
 				neuron_it != last_neuron; neuron_it++){
-				tmp_delta = *(delta_begin + (neuron_it - first_neuron));
+				
+				tmp_error = error_begin[neuron_it - first_neuron];
 				for(i = neuron_it->num_connections ; i-- ; ){
-					*(delta_begin + (neuron_it->connected_neurons[i] - first_neuron)) +=
-						tmp_delta * neuron_it->weights[i];
+					error_begin[neuron_it->connected_neurons[i] - first_neuron] += tmp_error * neuron_it->weights[i];
 				}
 			}
 		}
-		
+
 		/* then calculate the actual errors in the previous layer */
-		delta_it = delta_begin + ((layer_it-1)->first_neuron - first_neuron);
+		error_prev_layer = error_begin + ((layer_it-1)->first_neuron - first_neuron);
 		last_neuron = (layer_it-1)->last_neuron;
 		
 		switch(ann->activation_function_hidden){
@@ -136,8 +259,8 @@ void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
 				for(neuron_it = (layer_it-1)->first_neuron;
 					neuron_it != last_neuron; neuron_it++){
 					neuron_value = neuron_it->value;
-					*delta_it *= (fann_type)fann_linear_derive(activation_hidden_steepness, neuron_value) * learning_rate;
-					delta_it++;
+					*error_prev_layer *= (fann_type)fann_linear_derive(activation_steepness_hidden, neuron_value);
+					error_prev_layer++;
 				}
 				break;
 			case FANN_SIGMOID:
@@ -146,11 +269,8 @@ void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
 					neuron_it != last_neuron; neuron_it++){
 					neuron_value = neuron_it->value;
 					neuron_value = fann_clip(neuron_value, 0.01f, 0.99f);
-					*delta_it *= (fann_type)fann_sigmoid_derive(activation_hidden_steepness, neuron_value);
-#ifdef DEBUGTRAIN
-					printf("delta3[%d] = "FANNPRINTF" *= fann_sigmoid_derive(%f, %f) * %f\n", (delta_it - delta_begin), *delta_it, activation_hidden_steepness, neuron_value, learning_rate);
-#endif
-					delta_it++;
+					*error_prev_layer *= (fann_type)fann_sigmoid_derive(activation_steepness_hidden, neuron_value);
+					error_prev_layer++;
 				}
 				break;
 			case FANN_SIGMOID_SYMMETRIC:
@@ -159,11 +279,8 @@ void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
 					neuron_it != last_neuron; neuron_it++){
 					neuron_value = neuron_it->value;
 					neuron_value = fann_clip(neuron_value, -0.98f, 0.98f);
-					*delta_it *= (fann_type)fann_sigmoid_symmetric_derive(activation_hidden_steepness, neuron_value);
-#ifdef DEBUGTRAIN
-					printf("delta3[%d] = "FANNPRINTF" *= fann_sigmoid_symmetric_derive(%f, %f) * %f\n", (delta_it - delta_begin), *delta_it, activation_hidden_steepness, neuron_value, learning_rate);
-#endif
-					delta_it++;
+					*error_prev_layer *= (fann_type)fann_sigmoid_symmetric_derive(activation_steepness_hidden, neuron_value);
+					error_prev_layer++;
 				}
 				break;
 			default:
@@ -171,7 +288,25 @@ void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
 				return;
 		}
 	}
+}
+
+/* INTERNAL FUNCTION
+   Update weights for incremental training
+*/
+void fann_update_weights(struct fann *ann)
+{
+	struct fann_neuron *neuron_it, *last_neuron, *prev_neurons;
+	fann_type tmp_error;
+	struct fann_layer *layer_it;
+	unsigned int i;
 	
+	/* store some variabels local for fast access */
+	const float learning_rate = ann->learning_rate;
+	const struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
+	struct fann_layer *first_layer = ann->first_layer;
+	const struct fann_layer *last_layer = ann->last_layer;
+	fann_type *error_begin = ann->train_errors;	
+
 #ifdef DEBUGTRAIN
 	printf("\nupdate weights\n");
 #endif
@@ -184,89 +319,260 @@ void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output)
 		if(ann->connection_rate >= 1 && !ann->forward_connections){
 			/* optimization for fully connected networks */
 			/* but not forward connected networks */			
-			neurons = (layer_it-1)->first_neuron;
+			prev_neurons = (layer_it-1)->first_neuron;
 			for(neuron_it = layer_it->first_neuron;
 				neuron_it != last_neuron; neuron_it++){
-				tmp_delta = *(delta_begin + (neuron_it - first_neuron)) * learning_rate;
+				tmp_error = error_begin[neuron_it - first_neuron] * learning_rate;
 				for(i = neuron_it->num_connections ; i-- ; ){
-#ifdef DEBUGTRAIN
-					printf("weights[%d] += "FANNPRINTF" = %f * %f\n", i, tmp_delta * neurons[i].value, tmp_delta, neurons[i].value);
-#endif
-					neuron_it->weights[i] += tmp_delta * neurons[i].value;
+					neuron_it->weights[i] += tmp_error * prev_neurons[i].value;
 				}
 			}
 		}else{
 			for(neuron_it = layer_it->first_neuron;
 				neuron_it != last_neuron; neuron_it++){
-				tmp_delta = *(delta_begin + (neuron_it - first_neuron)) * learning_rate;
+				tmp_error = error_begin[neuron_it - first_neuron] * learning_rate;
 				for(i = neuron_it->num_connections ; i-- ; ){
-					neuron_it->weights[i] += tmp_delta * neuron_it->connected_neurons[i]->value;
+					neuron_it->weights[i] += tmp_error * neuron_it->connected_neurons[i]->value;
 				}
 			}
 		}
 	}
 }
-#endif
 
-/* Tests the network.
- */
-fann_type *fann_test(struct fann *ann, fann_type *input, fann_type *desired_output)
+/* INTERNAL FUNCTION
+   Update slopes for batch training
+*/
+void fann_update_slopes_batch(struct fann *ann)
 {
-	fann_type neuron_value;
-	fann_type *output_begin = fann_run(ann, input);
-	fann_type *output_it;
-	const fann_type *output_end = output_begin + ann->num_output;
+	struct fann_neuron *neuron_it, *last_neuron, *prev_neurons;
+	fann_type tmp_error, *weights_begin;
+	struct fann_layer *layer_it;
+	unsigned int i;
 	
-	/* calculate the error */
-	for(output_it = output_begin;
-		output_it != output_end; output_it++){
-		neuron_value = *output_it;
-		
-#ifdef FIXEDFANN
-		ann->error_value += ((*desired_output - neuron_value)/(float)ann->multiplier) * ((*desired_output - neuron_value)/(float)ann->multiplier);
-#else
-		ann->error_value += (*desired_output - neuron_value) * (*desired_output - neuron_value);
-#endif
-		
-		desired_output++;
+	/* store some variabels local for fast access */
+	const struct fann_neuron *first_neuron = ann->first_layer->first_neuron;
+	struct fann_layer *first_layer = ann->first_layer;
+	const struct fann_layer *last_layer = ann->last_layer;
+	fann_type *error_begin = ann->train_errors;
+	fann_type *slope_begin, *neuron_slope;
+
+	/* if no room allocated for the slope variabels, allocate it now */
+	if(ann->train_slopes == NULL){
+		ann->train_slopes = (fann_type *)calloc(ann->total_connections, sizeof(fann_type));
+		if(ann->train_slopes == NULL){
+			fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+			return;
+		}
+		memset(ann->train_slopes, 0, (ann->total_connections) * sizeof(fann_type));	
 	}
-	ann->num_errors++;
 	
-	return output_begin;
+	slope_begin = ann->train_slopes;
+	weights_begin = fann_get_weights(ann);
+	
+#ifdef DEBUGTRAIN
+	printf("\nupdate slopes\n");
+#endif
+	
+	for(layer_it = (first_layer+1); layer_it != last_layer; layer_it++){
+#ifdef DEBUGTRAIN
+		printf("layer[%d]\n", layer_it - first_layer);
+#endif
+		last_neuron = layer_it->last_neuron;
+		if(ann->connection_rate >= 1 && !ann->forward_connections){
+			/* optimization for fully connected networks */
+			/* but not forward connected networks */			
+			prev_neurons = (layer_it-1)->first_neuron;
+			for(neuron_it = layer_it->first_neuron;
+				neuron_it != last_neuron; neuron_it++){
+				tmp_error = error_begin[neuron_it - first_neuron];
+				neuron_slope = slope_begin + (neuron_it->weights - weights_begin);
+				for(i = neuron_it->num_connections ; i-- ; ){
+					neuron_slope[i] += tmp_error * prev_neurons[i].value;
+				}
+			}
+		}else{
+			for(neuron_it = layer_it->first_neuron;
+				neuron_it != last_neuron; neuron_it++){
+				tmp_error = error_begin[neuron_it - first_neuron];
+				neuron_slope = slope_begin + (neuron_it->weights - weights_begin);
+				for(i = neuron_it->num_connections ; i-- ; ){
+					neuron_slope[i] += tmp_error * neuron_it->connected_neurons[i]->value;
+				}
+			}
+		}
+	}
 }
 
-/* get the mean square error.
-   (obsolete will be removed at some point, use fann_get_MSE)
+/* INTERNAL FUNCTION
+   Clears arrays used for training before a new training session.
+   Also creates the arrays that do not exist yet.
  */
-float fann_get_error(struct fann *ann)
+void fann_clear_train_arrays(struct fann *ann)
 {
-	return fann_get_MSE(ann);
+	unsigned int i;
+	
+	/* if no room allocated for the slope variabels, allocate it now */
+	if(ann->train_slopes == NULL){
+		ann->train_slopes = (fann_type *)calloc(ann->total_connections, sizeof(fann_type));
+		if(ann->train_slopes == NULL){
+			fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+			return;
+		}
+	}
+	memset(ann->train_slopes, 0, (ann->total_connections) * sizeof(fann_type));	
+	/* if no room allocated for the variabels, allocate it now */
+	if(ann->prev_steps == NULL){
+		ann->prev_steps = (fann_type *)calloc(ann->total_connections, sizeof(fann_type));
+		if(ann->prev_steps == NULL){
+			fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+			return;
+		}
+	}
+	memset(ann->prev_steps, 0, (ann->total_connections) * sizeof(fann_type));	
+	
+	/* if no room allocated for the variabels, allocate it now */
+	if(ann->prev_train_slopes == NULL){
+		ann->prev_train_slopes = (fann_type *)calloc(ann->total_connections, sizeof(fann_type));
+		if(ann->prev_train_slopes == NULL){
+			fann_error((struct fann_error *)ann, FANN_E_CANT_ALLOCATE_MEM);
+			return;
+		}
+	}	
+
+	if(ann->training_algorithm == FANN_RPROP_TRAIN){
+		for(i = 0; i < ann->total_connections; i++){
+			ann->prev_train_slopes[i] = 0.0125;
+		}
+	} else {
+		memset(ann->prev_train_slopes, 0, (ann->total_connections) * sizeof(fann_type));
+	}
 }
 
-/* get the mean square error.
+/* INTERNAL FUNCTION
+   Update weights for batch training
  */
-float fann_get_MSE(struct fann *ann)
+void fann_update_weights_batch(struct fann *ann, unsigned int num_data)
 {
-	if(ann->num_errors){
-		return ann->error_value/(float)ann->num_errors;
-	}else{
-		return 0;
+	fann_type *train_slopes = ann->train_slopes;
+	fann_type *weights = fann_get_weights(ann);
+	const float epsilon = ann->learning_rate/num_data;
+	unsigned int i = ann->total_connections;
+	while(i--){
+		weights[i] += train_slopes[i] * epsilon;
+		train_slopes[i] = 0.0;
 	}
 }
 
-/* reset the mean square error.
-   (obsolete will be removed at some point, use fann_reset_MSE)
+/* INTERNAL FUNCTION
+   The quickprop training algorithm
  */
-void fann_reset_error(struct fann *ann)
+void fann_update_weights_quickprop(struct fann *ann, unsigned int num_data)
 {
-	fann_reset_MSE(ann);
+	fann_type *train_slopes = ann->train_slopes;
+	fann_type *weights = fann_get_weights(ann);
+	fann_type *prev_steps = ann->prev_steps;
+	fann_type *prev_train_slopes = ann->prev_train_slopes;
+
+	fann_type w, prev_step, slope, prev_slope, next_step;
+	
+	float epsilon = ann->learning_rate/num_data;
+	float decay = ann->quickprop_decay; /*-0.0001;*/
+	float mu = ann->quickprop_mu; /*1.75;*/
+	float shrink_factor = mu / (1.0 + mu); 
+
+	unsigned int i = ann->total_connections;
+	while(i--){
+		w = weights[i];
+		prev_step = prev_steps[i];
+		slope = train_slopes[i] +  decay * w;
+		prev_slope = prev_train_slopes[i];
+		next_step = 0.0;
+	
+		/* The step must always be in direction opposite to the slope. */
+	
+		if(prev_step > 0.001) {
+			/* If last step was positive...  */
+			if(slope > 0.0) {
+				/*  Add in linear term if current slope is still positive.*/
+				next_step += epsilon * slope;
+			}
+		
+			/*If current slope is close to or larger than prev slope...  */
+			if(slope > (shrink_factor * prev_slope)) {
+				next_step += mu * prev_step;      /* Take maximum size negative step. */
+			} else {
+				next_step += prev_step * slope / (prev_slope - slope); /* Else, use quadratic estimate. */
+			}
+		} else if(prev_step < -0.001){
+			/* If last step was negative...  */  
+			if(slope < 0.0){
+				/*  Add in linear term if current slope is still negative.*/
+				next_step += epsilon * slope;
+			}
+		
+			/* If current slope is close to or more neg than prev slope... */
+			if(slope < (shrink_factor * prev_slope)){
+				next_step += mu * prev_step;      /* Take maximum size negative step. */
+			} else {
+				next_step += prev_step * slope / (prev_slope - slope); /* Else, use quadratic estimate. */
+			}
+		} else {
+			/* Last step was zero, so use only linear term. */
+			next_step += epsilon * slope;
+		}
+
+
+		/* update global data arrays */
+		prev_steps[i] = next_step;
+		weights[i] = w + next_step;
+		prev_train_slopes[i] = slope;
+		train_slopes[i] = 0.0;
+	}
 }
 
-/* reset the mean square error.
- */
-void fann_reset_MSE(struct fann *ann)
+/* INTERNAL FUNCTION
+   The iRprop- algorithm
+*/
+void fann_update_weights_irpropm(struct fann *ann, unsigned int num_data)
 {
-	ann->num_errors = 0;
-	ann->error_value = 0;
-}
+	fann_type *train_slopes = ann->train_slopes;
+	fann_type *weights = fann_get_weights(ann);
+	fann_type *prev_steps = ann->prev_steps;
+	fann_type *prev_train_slopes = ann->prev_train_slopes;
+
+	fann_type prev_step, slope, prev_slope, next_step, same_sign;
+
+	/* These should be set from variables */
+	float increase_factor = ann->rprop_increase_factor;/*1.2;*/
+	float decrease_factor = ann->rprop_decrease_factor;/*0.5;*/
+	float delta_min = ann->rprop_delta_min;/*0.0;*/
+	float delta_max = ann->rprop_delta_max;/*50.0;*/
+
+	unsigned int i = ann->total_connections;
+	while(i--){	
+		prev_step = prev_steps[i];
+		slope = train_slopes[i];
+		prev_slope = prev_train_slopes[i];
+		next_step = 0.0;
 
+		same_sign = prev_slope * slope;
+	
+		if(same_sign > 0.0) {
+			next_step = fann_min(prev_step * increase_factor, delta_max);
+		} else if(same_sign < 0.0) {
+			next_step = fann_max(prev_step * decrease_factor, delta_min);
+			slope = 0;
+		}
+
+		if(slope > 0){
+			weights[i] -= next_step;
+		}else{
+			weights[i] += next_step;
+		}
+	
+		/* update global data arrays */
+		prev_steps[i] = next_step;
+		prev_train_slopes[i] = slope;
+		train_slopes[i] = 0.0;
+	}
+}
diff --git a/src/fann_train_data.c b/src/fann_train_data.c
index 5123d13..0baefc3 100644
--- a/src/fann_train_data.c
+++ b/src/fann_train_data.c
@@ -82,26 +82,138 @@ void fann_destroy_train(struct fann_train_data *data)
 
 #ifndef FIXEDFANN
 
+float fann_train_epoch_quickprop(struct fann *ann, struct fann_train_data *data)
+{
+	unsigned int i;
+
+	if(ann->prev_train_slopes == NULL){
+		fann_clear_train_arrays(ann);
+	}
+	
+	fann_reset_MSE(ann);
+	
+	for(i = 0; i < data->num_data; i++){
+		fann_run(ann, data->input[i]);
+		fann_compute_MSE(ann, data->output[i]);
+		fann_backpropagate_MSE(ann);
+		fann_update_slopes_batch(ann);
+	}
+	fann_update_weights_quickprop(ann, data->num_data);
+
+	return fann_get_MSE(ann);
+}
+
+float fann_train_epoch_irpropm(struct fann *ann, struct fann_train_data *data)
+{
+	unsigned int i;
+
+	if(ann->prev_train_slopes == NULL){
+		fann_clear_train_arrays(ann);
+	}
+	
+	fann_reset_MSE(ann);
+	
+	for(i = 0; i < data->num_data; i++){
+		fann_run(ann, data->input[i]);
+		fann_compute_MSE(ann, data->output[i]);
+		fann_backpropagate_MSE(ann);
+		fann_update_slopes_batch(ann);
+	}
+	fann_update_weights_quickprop(ann, data->num_data);
+
+	return fann_get_MSE(ann);
+}
+
+float fann_train_epoch_batch(struct fann *ann, struct fann_train_data *data)
+{
+	unsigned int i;
+	fann_reset_MSE(ann);
+	
+	for(i = 0; i < data->num_data; i++){
+		fann_run(ann, data->input[i]);
+		fann_compute_MSE(ann, data->output[i]);
+		fann_backpropagate_MSE(ann);
+		fann_update_slopes_batch(ann);
+	}
+	fann_update_weights_batch(ann, data->num_data);
+
+	return fann_get_MSE(ann);
+}
+
+float fann_train_epoch_incremental(struct fann *ann, struct fann_train_data *data)
+{
+	unsigned int i;
+	fann_reset_MSE(ann);
+	
+	for(i = 0; i != data->num_data; i++){
+		fann_train(ann, data->input[i], data->output[i]);
+	}
+
+	return fann_get_MSE(ann);
+}
+
+/* Train for one epoch with the selected training algorithm
+ */
+float fann_train_epoch(struct fann *ann, struct fann_train_data *data)
+{
+	switch(ann->training_algorithm){
+		case FANN_QUICKPROP_TRAIN:
+			return fann_train_epoch_quickprop(ann, data);
+			break;
+		case FANN_RPROP_TRAIN:
+			return fann_train_epoch_irpropm(ann, data);
+			break;
+		case FANN_BATCH_TRAIN:
+			return fann_train_epoch_batch(ann, data);
+			break;
+		case FANN_INCREMENTAL_TRAIN:
+			return fann_train_epoch_incremental(ann, data);
+			break;
+		default:
+			return 0.0;
+	}
+}
+
 /* Train directly on the training data.
  */
 void fann_train_on_data_callback(struct fann *ann, struct fann_train_data *data, unsigned int max_epochs, unsigned int epochs_between_reports, float desired_error, int (*callback)(unsigned int epochs, float error))
 {
 	float error;
-	unsigned int i, j;
+	unsigned int i;
+
+#ifdef DEBUG
+	printf("Training with ");
+	switch(ann->training_algorithm){
+		case FANN_QUICKPROP_TRAIN:
+			printf("FANN_QUICKPROP_TRAIN");
+			break;
+		case FANN_RPROP_TRAIN:
+			printf("FANN_RPROP_TRAIN");
+			break;
+		case FANN_BATCH_TRAIN:
+			printf("FANN_BATCH_TRAIN");
+			break;
+		case FANN_INCREMENTAL_TRAIN:
+			printf("FANN_INCREMENTAL_TRAIN");
+			break;
+	}
+	printf("\n");
+#endif	
 	
 	if(epochs_between_reports && callback == NULL){
 		printf("Max epochs %8d. Desired error: %.10f\n", max_epochs, desired_error);
 	}
-	
+
+	/* some training algorithms need stuff to be cleared etc. before training starts.
+	 */
+	if(ann->training_algorithm == FANN_RPROP_TRAIN ||
+		ann->training_algorithm == FANN_QUICKPROP_TRAIN){
+		fann_clear_train_arrays(ann);
+	}
+
 	for(i = 1; i <= max_epochs; i++){
 		/* train */
-		fann_reset_MSE(ann);
-		
-		for(j = 0; j != data->num_data; j++){
-			fann_train(ann, data->input[j], data->output[j]);
-		}
-		
-		error = fann_get_MSE(ann);
+		error = fann_train_epoch(ann, data);
 		
 		/* print current output */
 		if(epochs_between_reports &&
diff --git a/src/fixedfann.c b/src/fixedfann.c
index b0ee0f0..b4fecfd 100644
--- a/src/fixedfann.c
+++ b/src/fixedfann.c
@@ -21,9 +21,6 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
 #include "fixedfann.h"
 
-#include "fann.h"
-#include "fann_internal.h"
-
 #include "fann.c"
 #include "fann_io.c"
 #include "fann_train.c"
diff --git a/src/floatfann.c b/src/floatfann.c
index 1e47bc6..4f411ce 100644
--- a/src/floatfann.c
+++ b/src/floatfann.c
@@ -21,9 +21,6 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
 #include "floatfann.h"
 
-#include "fann.h"
-#include "fann_internal.h"
-
 #include "fann.c"
 #include "fann_io.c"
 #include "fann_train.c"
diff --git a/src/include/fann.h b/src/include/fann.h
index d340750..1ce1075 100644
--- a/src/include/fann.h
+++ b/src/include/fann.h
@@ -48,8 +48,6 @@ extern "C" {
 #ifndef NULL
 #define NULL 0
 #endif /* NULL */
-
-
 	
 /* ----- Implemented in fann.c Creation, running and destruction of ANNs ----- */
 	
@@ -77,7 +75,6 @@ struct fann * fann_create(float connection_rate, float learning_rate,
 */
 struct fann * fann_create_array(float connection_rate, float learning_rate,
 	unsigned int num_layers, unsigned int * layers);
-	
 
 /* create a neural network with forward connections.
  */
@@ -153,6 +150,11 @@ int fann_save_to_fixed(struct fann *ann, const char *configuration_file);
 /* Train one iteration with a set of inputs, and a set of desired outputs.
  */
 void fann_train(struct fann *ann, fann_type *input, fann_type *desired_output);
+
+/* Train one epoch with a set of training data.
+ */
+float fann_train_epoch(struct fann *ann, struct fann_train_data *data);
+	
 #endif /* NOT FIXEDFANN */
 
 /* Test with a set of inputs, and a set of desired outputs.
@@ -253,6 +255,14 @@ void fann_save_train_to_fixed(struct fann_train_data* data, char *filename, unsi
 
 /* ----- Implemented in fann_options.c Get and set options for the ANNs ----- */
 
+/* Get the training algorithm.
+ */
+unsigned int fann_get_training_algorithm(struct fann *ann);
+
+/* Set the training algorithm.
+ */
+void fann_set_training_algorithm(struct fann *ann, unsigned int training_algorithm);
+
 /* Get the learning rate.
  */
 float fann_get_learning_rate(struct fann *ann);
@@ -279,22 +289,93 @@ void fann_set_activation_function_output(struct fann *ann, unsigned int activati
 
 /* Get the steepness parameter for the sigmoid function used in the hidden layers.
  */
-fann_type fann_get_activation_hidden_steepness(struct fann *ann);
+fann_type fann_get_activation_steepness_hidden(struct fann *ann);
 	
 /* Set the steepness of the sigmoid function used in the hidden layers.
    Only usefull if sigmoid function is used in the hidden layers (default 0.5).
  */
-void fann_set_activation_hidden_steepness(struct fann *ann, fann_type steepness);
+void fann_set_activation_steepness_hidden(struct fann *ann, fann_type steepness);
 
 /* Get the steepness parameter for the sigmoid function used in the output layer.
  */
-fann_type fann_get_activation_output_steepness(struct fann *ann);
+fann_type fann_get_activation_steepness_output(struct fann *ann);
 	
 /* Set the steepness of the sigmoid function used in the output layer.
    Only usefull if sigmoid function is used in the output layer (default 0.5).
  */
+void fann_set_activation_steepness_output(struct fann *ann, fann_type steepness);
+
+/* OBSOLETE use fann_get_activation_steepness_hidden
+   Get the steepness parameter for the sigmoid function used in the hidden layers.
+ */
+fann_type fann_get_activation_hidden_steepness(struct fann *ann);
+	
+/* OBSOLETE use fann_set_activation_steepness_hidden
+   Set the steepness of the sigmoid function used in the hidden layers.
+   Only usefull if sigmoid function is used in the hidden layers (default 0.5).
+ */
+void fann_set_activation_hidden_steepness(struct fann *ann, fann_type steepness);
+
+/* OBSOLETE use fann_get_activation_steepness_output
+  Get the steepness parameter for the sigmoid function used in the output layer.
+ */
+fann_type fann_get_activation_output_steepness(struct fann *ann);
+	
+/* OBSOLETE use fann_set_activation_steepness_output
+  Set the steepness of the sigmoid function used in the output layer.
+   Only usefull if sigmoid function is used in the output layer (default 0.5).
+ */
 void fann_set_activation_output_steepness(struct fann *ann, fann_type steepness);
 
+/* When using this, training is usually faster. (default ).
+   Makes the error used for calculating the slopes
+   higher when the difference is higher.
+ */
+void fann_set_use_tanh_error_function(struct fann *ann, unsigned int use_tanh_error_function);
+
+/* Decay is used to make the weights do not go so high (default -0.0001). */
+void fann_set_quickprop_decay(struct fann *ann, float quickprop_decay);
+	
+/* Mu is a factor used to increase and decrease the stepsize (default 1.75). */
+void fann_set_quickprop_mu(struct fann *ann, float quickprop_mu);
+
+/* Tells how much the stepsize should increase during learning (default 1.2). */
+void fann_set_rprop_increase_factor(struct fann *ann, float rprop_increase_factor);
+
+/* Tells how much the stepsize should decrease during learning (default 0.5). */
+void fann_set_rprop_decrease_factor(struct fann *ann, float rprop_decrease_factor);
+
+/* The minimum stepsize (default 0.0). */
+void fann_set_rprop_delta_min(struct fann *ann, float rprop_delta_min);
+
+/* The maximum stepsize (default 50.0). */
+void fann_set_rprop_delta_max(struct fann *ann, float rprop_delta_max);
+
+/* When using this, training is usually faster. (default ).
+   Makes the error used for calculating the slopes
+   higher when the difference is higher.
+ */
+unsigned int fann_get_use_tanh_error_function(struct fann *ann);
+
+/* Decay is used to make the weights do not go so high (default -0.0001). */
+float fann_get_quickprop_decay(struct fann *ann);
+	
+/* Mu is a factor used to increase and decrease the stepsize (default 1.75). */
+float fann_get_quickprop_mu(struct fann *ann);
+
+/* Tells how much the stepsize should increase during learning (default 1.2). */
+float fann_get_rprop_increase_factor(struct fann *ann);
+
+/* Tells how much the stepsize should decrease during learning (default 0.5). */
+float fann_get_rprop_decrease_factor(struct fann *ann);
+
+/* The minimum stepsize (default 0.0). */
+float fann_get_rprop_delta_min(struct fann *ann);
+
+/* The maximum stepsize (default 50.0). */
+float fann_get_rprop_delta_max(struct fann *ann);
+	
+	
 /* Get the number of input neurons.
  */
 unsigned int fann_get_num_input(struct fann *ann);
@@ -322,8 +403,6 @@ unsigned int fann_get_decimal_point(struct fann *ann);
 unsigned int fann_get_multiplier(struct fann *ann);
 #endif /* FIXEDFANN */
 	
-
-	
 /* ----- Implemented in fann_error.c Access error information about the ANN ----- */
 	
 /* change where errors are logged to
diff --git a/src/include/fann_activation.h b/src/include/fann_activation.h
index b25656f..b8fed1f 100644
--- a/src/include/fann_activation.h
+++ b/src/include/fann_activation.h
@@ -44,6 +44,12 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
 #define FANN_THRESHOLD 2
 
+/* Threshold activation function.
+   x < 0 -> y = 0, x >= 0 -> y = 1
+   Can NOT be used during training.
+*/
+#define FANN_THRESHOLD_SYMMETRIC 11
+
 /* Sigmoid activation function.
    One of the most used activation functions.
    span: 0 < y < 1
diff --git a/src/include/fann_data.h b/src/include/fann_data.h
index 80bf85f..ebe8e05 100644
--- a/src/include/fann_data.h
+++ b/src/include/fann_data.h
@@ -103,11 +103,11 @@ struct fann
 	/* Number of output neurons (not calculating bias) */
 	unsigned int num_output;
 
-	/* Used to contain the error deltas used during training
+	/* Used to contain the errors used during training
 	 * Is allocated during first training session,
 	 * which means that if we do not train, it is never allocated.
 	 */
-	fann_type *train_deltas;
+	fann_type *train_errors;
 
 	/* Used to choose which activation function to use
 	   
@@ -119,8 +119,12 @@ struct fann
 	unsigned int activation_function_hidden, activation_function_output;
 
 	/* Parameters for the activation function */
-	fann_type activation_hidden_steepness;
-	fann_type activation_output_steepness;
+	fann_type activation_steepness_hidden;
+	fann_type activation_steepness_output;
+
+	/* Training algorithm used when calling fann_train_on_..
+	 */
+	unsigned int training_algorithm;
 
 #ifdef FIXEDFANN
 	/* the decimal_point, used for shifting the fix point
@@ -140,10 +144,10 @@ struct fann
 	   activation_results array, the result is saved, and in the
 	   two values arrays, the values that gives the results are saved.
 	 */
-	fann_type activation_hidden_results[6];
-	fann_type activation_hidden_values[6];
-	fann_type activation_output_results[6];
-	fann_type activation_output_values[6];
+	fann_type activation_results_hidden[6];
+	fann_type activation_values_hidden[6];
+	fann_type activation_results_output[6];
+	fann_type activation_values_output[6];
 
 	/* Total number of connections.
 	 * very usefull, because the actual connections
@@ -162,6 +166,63 @@ struct fann
 	   the real mean square error is error_value/num_errors
 	 */
 	float error_value;
+
+	/* When using this, training is usually faster.
+	   Makes the error used for calculating the slopes
+	   higher when the difference is higher.
+	 */
+	unsigned int use_tanh_error_function;
+	
+	/* Variables for use with Cascade Correlation */
+
+	/* The error must change by at least this
+	   fraction of its old value to count as a
+	   significant change. NOT IMPLEMENTED YET
+	*/
+	/* float change_fraction; */
+
+	/* No change in this number of epochs will cause
+	   stagnation. NOT IMPLEMENTED YET
+	*/
+	/* unsigned int stagnation_epochs; */
+
+	/* Variables for use with Quickprop training */
+
+	/* Decay is used to make the weights not go so high */
+	float quickprop_decay;
+
+	/* Mu is a factor used to increase and decrease the stepsize */
+	float quickprop_mu;
+
+	/* Variables for use with with RPROP training */
+
+	/* Tells how much the stepsize should increase during learning */
+	float rprop_increase_factor;
+
+	/* Tells how much the stepsize should decrease during learning */
+	float rprop_decrease_factor;
+
+	/* The minimum stepsize */
+	float rprop_delta_min;
+
+	/* The maximum stepsize */
+	float rprop_delta_max;
+	
+	/* Used to contain the slope errors used during batch training
+	 * Is allocated during first training session,
+	 * which means that if we do not train, it is never allocated.
+	 */
+	fann_type *train_slopes;
+
+	/* The previous step taken by the quickprop/rprop procedures.
+	   Not allocated if not used.
+	 */
+	fann_type *prev_steps;
+
+	/* The slope values used by the quickprop/rprop procedures.
+	   Not allocated if not used.
+	 */
+	fann_type *prev_train_slopes;
 };
 
 /* Structure used to store data, for use with training. */
@@ -186,4 +247,15 @@ struct fann_error
 	char * errstr;
 };
 
+enum {
+	/* The quickprop training algorithm */
+	FANN_QUICKPROP_TRAIN = 0,
+	/* The iRprop- training algorithm */
+	FANN_RPROP_TRAIN,
+	/* Standard batch training */
+	FANN_BATCH_TRAIN,
+	/* Standard incremental or online training */
+	FANN_INCREMENTAL_TRAIN
+};
+
 #endif
diff --git a/src/include/fann_internal.h b/src/include/fann_internal.h
index 2342510..1b3035a 100644
--- a/src/include/fann_internal.h
+++ b/src/include/fann_internal.h
@@ -56,6 +56,21 @@ void fann_init_error_data(struct fann_error *errdat);
 struct fann * fann_create_from_fd(FILE *conf, const char *configuration_file);
 struct fann_train_data* fann_read_train_from_fd(FILE *file, char *filename);
 
+void fann_compute_MSE(struct fann *ann, fann_type *desired_output);
+void fann_update_output_weights(struct fann *ann);
+void fann_backpropagate_MSE(struct fann *ann);
+void fann_update_weights(struct fann *ann);
+void fann_update_slopes_batch(struct fann *ann);
+void fann_update_weights_quickprop(struct fann *ann, unsigned int num_data);
+void fann_update_weights_batch(struct fann *ann, unsigned int num_data);
+
+
+/* get a pointer to the weights */
+fann_type* fann_get_weights(struct fann *ann);
+/* get a pointer to the connections */
+struct fann_neuron** fann_get_connections(struct fann *ann);
+	
+
 /* called fann_max, in order to not interferre with predefined versions of max */
 #define fann_max(x, y) (((x) > (y)) ? (x) : (y))
 #define fann_min(x, y) (((x) < (y)) ? (x) : (y))

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/libfann.git



More information about the debian-science-commits mailing list