[r-cran-amore] 14/16: New upstream version 0.2-15

Andreas Tille tille at debian.org
Sat Oct 21 06:49:32 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository r-cran-amore.

commit 0084358d717ff865583b0e3cb16938139993dedb
Author: Andreas Tille <tille at debian.org>
Date:   Sat Oct 21 08:46:41 2017 +0200

    New upstream version 0.2-15
---
 ChangeLog                         |  30 ++++
 DESCRIPTION                       |  21 +++
 MD5                               |  32 ++++
 NAMESPACE                         |   5 +
 R/deltaE.R                        | 134 ++++++++++++++++
 R/graphviz.R                      |  52 +++++++
 R/newff.R                         | 224 +++++++++++++++++++++++++++
 R/sim.R                           | 212 ++++++++++++++++++++++++++
 R/trMethods.R                     |  59 +++++++
 debian/README.test                |   5 -
 debian/changelog                  |  44 ------
 debian/compat                     |   1 -
 debian/control                    |  22 ---
 debian/copyright                  |  33 ----
 debian/rules                      |   6 -
 debian/source/format              |   1 -
 debian/watch                      |   2 -
 man/ADAPTgd.MLPnet.Rd             |  37 +++++
 man/ADAPTgdwm.MLPnet.Rd           |  37 +++++
 man/BATCHgd.MLPnet.Rd             |  37 +++++
 man/BATCHgdwm.MLPnet.Rd           |  37 +++++
 man/deltaE.Rd                     |  56 +++++++
 man/graphviz.MLPnet.Rd            |  29 ++++
 man/init.MLPneuron.Rd             |  76 +++++++++
 man/newff.Rd                      |  76 +++++++++
 man/random.init.MLPnet.Rd         |  20 +++
 man/random.init.MLPneuron.Rd      |  27 ++++
 man/select.activation.function.Rd |  22 +++
 man/sim.MLPnet.Rd                 |  38 +++++
 man/taofun.Rd                     |  41 +++++
 man/train.Rd                      |  48 ++++++
 man/training.report.Rd            |  37 +++++
 src/ADAPTgd.c                     | 158 +++++++++++++++++++
 src/ADAPTgdwm.c                   | 160 +++++++++++++++++++
 src/AMORE.h                       | 193 +++++++++++++++++++++++
 src/BATCHgd.c                     | 311 +++++++++++++++++++++++++++++++++++++
 src/BATCHgdwm.c                   | 313 ++++++++++++++++++++++++++++++++++++++
 src/Makevars                      |   2 +
 src/Makevars.win                  |   2 +
 src/copynet.c                     | 208 +++++++++++++++++++++++++
 src/sim.c                         | 177 +++++++++++++++++++++
 41 files changed, 2911 insertions(+), 114 deletions(-)

diff --git a/ChangeLog b/ChangeLog
new file mode 100644
index 0000000..d855d71
--- /dev/null
+++ b/ChangeLog
@@ -0,0 +1,30 @@
+v0.2-15: 2014.04.10
+	sim.R
+		Added finite error comprobation
+
+	src/BATCHgd.c and BATCHgdwm.c 
+		learning rate and momentum are divided by the number of tests in the train batch
+
+v0.2-14 
+	Minimal documentation changes
+
+v0.2-13: 2013.12.12
+	trMethods.R
+		Added thread number parameter (only used in BATCH methods)
+	sim.R
+		Added thread number parameter
+
+	src/BATCHgd.c and BATCHgdwm.c 
+		Moved var declaration to reduce dependences
+		Paralelized with OpenMP
+	Makevars
+		Created
+		Included OpenMP
+
+	man/*.MLPnet.Rd train.Rd
+		Added n.threads argument
+
+	Enforced CRAN requeriments:
+		man/ init.MLPneuron.Rd newff.Rd train.Rd :
+			Usage line reduced to less than 90 chars
+		
diff --git a/DESCRIPTION b/DESCRIPTION
new file mode 100755
index 0000000..807a961
--- /dev/null
+++ b/DESCRIPTION
@@ -0,0 +1,21 @@
+Encoding: UTF-8
+Package: AMORE
+Version: 0.2-15
+Date: 2014-04-10
+Title: A MORE flexible neural network package
+Author: Manuel Castejon Limas, Joaquin B. Ordieres Mere, Ana Gonzalez
+        Marcos, Francisco Javier Martinez de Pison Ascacibar, Alpha V.
+        Pernia Espinoza, Fernando Alba Elias, Jose Maria Perez Ramos
+Maintainer: Manuel Castejón Limas <manuel.castejon at gmail.com>
+Description: This package was born to release the TAO robust neural
+        network algorithm to the R users. It has grown and I think it
+        can be of interest for the users wanting to implement their own
+        training algorithms as well as for those others whose needs lye
+        only in the "user space".
+License: GPL (>= 2)
+URL: http://rwiki.sciviews.org/doku.php?id=packages:cran:amore
+LazyLoad: yes
+Packaged: 2014-04-10 11:13:34 UTC; jmperez
+Repository: CRAN
+Date/Publication: 2014-04-14 14:53:30
+NeedsCompilation: yes
diff --git a/MD5 b/MD5
new file mode 100644
index 0000000..79bb1f1
--- /dev/null
+++ b/MD5
@@ -0,0 +1,32 @@
+56294ad08d1a7d6a1ddab97883005be6 *ChangeLog
+984037c6196bdca9b0ce3f9aa9a26dd1 *DESCRIPTION
+363a5a0606e225a6bb34735e29949770 *NAMESPACE
+e9770ed101ee0eda02af40128706e3a1 *R/deltaE.R
+775af82add070552cb450c5fdf2a334e *R/graphviz.R
+af5d56655c0d46ce17f3fe65f961a986 *R/newff.R
+f8463e4b56a9288e078dfdfe2cba3cb2 *R/sim.R
+628b497d23cdbf4cb1b81cc9eb9c4b35 *R/trMethods.R
+94bb63d02501e1ab9b694aa5453e9715 *man/ADAPTgd.MLPnet.Rd
+6520dcf8ceff027ead287ddf6899a878 *man/ADAPTgdwm.MLPnet.Rd
+46be61d708f27016a886c8d09d53f216 *man/BATCHgd.MLPnet.Rd
+3d2b9ba0bd20fb13e0017249afaa3184 *man/BATCHgdwm.MLPnet.Rd
+dcd2eb1ae8dd3a289aeb2fe81e1acdab *man/deltaE.Rd
+1649b80b9a28c22e43cc50fc39c4c52b *man/graphviz.MLPnet.Rd
+923098b796d577d634be8c131c8ffee9 *man/init.MLPneuron.Rd
+10811502f08a9e4c621618226e9619f4 *man/newff.Rd
+c6d2b5042614600b2fdf10efbd9aba89 *man/random.init.MLPnet.Rd
+954733a69d704255781959e9b137da6d *man/random.init.MLPneuron.Rd
+dc7abcd6bebafabcd079a01f5408c388 *man/select.activation.function.Rd
+5acb75de7643b306d4ec4a870e150208 *man/sim.MLPnet.Rd
+e7f3a8ee9c58fe8cb266631a91202345 *man/taofun.Rd
+e1f9a931021d212e8760226e88421b41 *man/train.Rd
+8340ec3ea75119aae4198f5b15fbc665 *man/training.report.Rd
+300f73bdcf2ca2d9efb8db64e5ed520e *src/ADAPTgd.c
+7f651c226da31e3cd6c2418a515fea74 *src/ADAPTgdwm.c
+c8663d0ae326781c59e099525568018e *src/AMORE.h
+e805420bb1e1c053346498bbdbc57ddf *src/BATCHgd.c
+f3fe19e4d455527f9778634f29726067 *src/BATCHgdwm.c
+95e3011e37d9dde0d75f3a3819b2acd3 *src/Makevars
+95e3011e37d9dde0d75f3a3819b2acd3 *src/Makevars.win
+504d18e452ea8897bbade8d739aed7a4 *src/copynet.c
+4d4c87bbc5f7ae640b3f0052fbde49c9 *src/sim.c
diff --git a/NAMESPACE b/NAMESPACE
new file mode 100755
index 0000000..2dd5b3f
--- /dev/null
+++ b/NAMESPACE
@@ -0,0 +1,5 @@
+useDynLib(AMORE)
+
+export(newff,train,sim,sim.MLPnet,ADAPTgd.MLPnet,ADAPTgdwm.MLPnet,BATCHgd.MLPnet,BATCHgdwm.MLPnet,error.LMS,error.LMLS,error.TAO,deltaE.TAO,hfun,phifun,dphifun,graphviz.MLPnet)
+
+S3method(sim, MLPnet)
diff --git a/R/deltaE.R b/R/deltaE.R
new file mode 100755
index 0000000..c73d863
--- /dev/null
+++ b/R/deltaE.R
@@ -0,0 +1,134 @@
+###############################################################################
+hfun <- function(v, k) {
+   
+   result <- array(NA, dim=dim(v))
+   smallers <- abs(v) < k
+   
+   result[smallers]  <- (v[smallers]^2)/2 * (1 - v[smallers]^2 / k^2 + v[smallers]^4 / (3*k^4) ) 
+   result[!smallers] <- k^2 / 6
+   
+   return (result)
+}
+ 
+###############################################################################
+phifun <- function (v, k) {
+
+   result <- array(NA, dim=dim(v))
+   smallers <- abs(v) < k
+   
+   result[smallers]  <- v[smallers] * ( 1-( v[smallers]^2 / k^2) )^2
+   result[!smallers] <- 0
+   
+   return (result)
+}
+###############################################################################
+dphifun <- function(v,k) {
+
+   result <- array(NA, dim=dim(v))
+   smallers <- abs(v) < k
+   
+   result[smallers]  <-  (sqrt(1-(v[smallers]^2/k^2))) - ( (v[smallers]^2/k^2) * (1-(v[smallers]^2/k^2)))
+   result[!smallers] <-  0
+   
+   return (result)
+}   
+###############################################################################
+#                         DELTA ERROR TAO
+###############################################################################
+deltaE.TAO <- function (arguments) {
+   prediction <- arguments[[1]]
+   target     <- arguments[[2]] 
+   Stao       <- arguments[[3]]$deltaE$Stao  # the third argument is the net.
+   
+   residual   <- prediction - target
+   scaled.residual <- residual / Stao
+   c1  <- 1.56
+   c2  <- 6.08
+   bf  <- c1^2 / 12 
+
+   h1  <- hfun(scaled.residual, c1)
+   h2  <- hfun(scaled.residual, c2)
+   phi1 <- phifun(scaled.residual,c1)
+   phi2 <- phifun(scaled.residual,c2)
+   
+   if (sum(phi1 * residual) == 0.0) {
+     dS2e <- 0.0
+   } else {
+     dS2e <- Stao * (sum(phi1) / (sum(phi1*residual)))
+   }
+   
+   result <-mean(2*Stao*dS2e*h2 + phi2*(Stao - dS2e * residual))
+   return(result)
+   
+}
+###############################################################################
+#                         DELTA ERROR LMS 
+###############################################################################
+deltaE.LMS <- function(arguments) {
+   prediction <- arguments[[1]]                      # arg1 is the prediction
+   target     <- arguments[[2]]                      # arg2 is the target
+   residual   <- prediction - target
+   return(residual)
+}
+###############################################################################
+#                         DELTA ERROR LMLS
+###############################################################################
+deltaE.LMLS <- function(arguments) {
+   prediction <- arguments[[1]]                      # arg1 is the prediction
+   target     <- arguments[[2]]                      # arg2 is the target
+   residual   <- prediction - target
+   result     <- residual / (1 + residual^2 / 2) 
+   return(result)
+}
+###############################################################################
+#                         ERROR LMS 
+###############################################################################
+error.LMS <- function(arguments) {
+   prediction <- arguments[[1]]                     # arg1 is the prediction
+   target     <- arguments[[2]]                     # arg2 is the target
+   residual   <- prediction - target
+   result     <- mean((prediction - target)^2)
+   return(result)
+}
+###############################################################################
+#                         ERROR LMLS
+###############################################################################
+error.LMLS <- function(arguments) {
+   prediction <- arguments[[1]]                     # arg1 is the prediction
+   target     <- arguments[[2]]                     # arg2 is the target
+   residual   <- prediction - target 
+   result     <- mean(log(1 + residual^2 / 2))
+   return(result)
+}
+###############################################################################
+#                          ERROR TAO
+###############################################################################
+error.TAO <- function(arguments) {
+   prediction <- arguments[[1]]                     # arg1 is the prediction
+   target     <- arguments[[2]]                     # arg2 is the target
+   Stao       <- arguments[[3]]$deltaE$Stao # arg3 is net
+   residual   <- prediction - target 
+   
+   n.residual <- nrow(residual)
+   perf <- NA
+   
+   scaled.residual <- residual / Stao
+   c1  <- 1.56
+   c2  <- 6.08
+   bf  <- c1^2 / 12 
+
+   h1  <- hfun(scaled.residual, c1)
+   h2  <- hfun(scaled.residual, c2)
+   
+   
+   new.Stao <- Stao*sqrt(sum(h1)/(n.residual * bf))  # n.residuals o n.residuals*n.output.MLPneurons ??
+   tao.error.squared <- new.Stao^2 * mean(h2)
+   return(list(perf=tao.error.squared, Stao=new.Stao))
+}
+###############################################################################
+
+    
+            
+	    
+
+
diff --git a/R/graphviz.R b/R/graphviz.R
new file mode 100644
index 0000000..ca354c4
--- /dev/null
+++ b/R/graphviz.R
@@ -0,0 +1,52 @@
+graphviz.MLPnet <- function(net,filename,digits=8) {
+   if (class(net)!="MLPnet") {
+      stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again");
+   }
+   cat(file=filename," digraph AMOREnet { \n",append=FALSE);
+   cat(file=filename,"rankdir=LR;         \n",append=TRUE);
+   cat(file=filename,"ordering=out;       \n",append=TRUE);
+   cat(file=filename,"ranksep=2;          \n",append=TRUE);
+   cat(file=filename,"nodesep=1;          \n",append=TRUE);
+   for (i in 1:length(net$layers[[1]])) {
+      cat(file=filename,"node [shape = hexagon, color=\"green\"] ", paste("\"Input ",i,"\"",sep=""),";\n",append=TRUE);
+   }
+   for (ind.neuron in 1:length(net$neurons)) {
+      neuron <- net$neuron[[ind.neuron]] ;
+      cat(file=filename,"node [shape = record, color=\"blue\"] ",append=TRUE); 
+      cat(file=filename,neuron$id,"[label = \"{<id> Id=\\N  | { ",append=TRUE);
+
+      for ( ind.weight in 1:length(neuron$weights) ) {
+         if (neuron$input.links[ind.weight] < 0 ) {
+           cat(file=filename,"wi",-neuron$input.links[ind.weight],": ",round(neuron$weights[ind.weight],digits),"|",sep="",append=TRUE);
+         } else {          
+           cat(file=filename,"w",neuron$input.link[ind.weight],": ",round(neuron$weights[ind.weight],digits),"|",sep="",append=TRUE);
+         }
+      }
+      cat(file=filename,"Bias:",round(neuron$bias,digits),"}|",neuron$activation.function,"|","<v0> v0:", round(neuron$v0,digits),"} \" ];\n",append=TRUE)
+   }
+ for (i in 1:length(net$layers[[length(net$layers)]])) {
+      cat(file=filename,"node [shape = hexagon, color=\"red\"] ", paste("\"Output ",i,"\"",sep=""),";\n",append=TRUE);
+   }
+
+   for (ind.neuron in 1:length(net$neurons)) {
+      neuron <- net$neurons[[ind.neuron]];
+      for ( ind.weight in 1:length(neuron$weights)) {
+         if (neuron$input.links[ind.weight] < 0 ) {
+            cat(file=filename,"\"Input ",-neuron$input.links[ind.weight],"\" -> ",neuron$id," ;\n", sep="",append=TRUE);
+         } else {
+            cat(file=filename,neuron$input.links[ind.weight]," -> ",neuron$id," ;\n", sep="",append=TRUE);
+         }
+      }
+      if (neuron$type=="output") {
+         cat(file=filename,neuron$id," -> \"Output ",neuron$output.aims,"\" ;\n", sep="",append=TRUE);
+      }
+
+
+
+
+
+   }
+cat(file=filename,"}\n",append=TRUE);
+
+
+}
diff --git a/R/newff.R b/R/newff.R
new file mode 100755
index 0000000..361b4ee
--- /dev/null
+++ b/R/newff.R
@@ -0,0 +1,224 @@
+#################################
+# Creates a new MLPnet object
+#############################
+# OJO : FALTA completar la entrada de un deltae custom para aceptar como parametro la función custom
+
+newff <- function (n.neurons, learning.rate.global, momentum.global=NA, error.criterium="LMS", Stao=NA, hidden.layer="tansig", output.layer="purelin", method="ADAPTgdwm") {
+
+   net <- list( layers=list(), neurons=list(), input=as.double(numeric(n.neurons[1])), output=as.double(numeric(n.neurons[length(n.neurons)])), target=as.double(numeric(n.neurons[length(n.neurons)])), deltaE=list(fname=as.integer(0),f=function(){},Stao=as.double(NA)), other.elements=list() )
+
+   if (length(n.neurons)<3) {
+       stop("You should enter a vector containing the number of input neurons, the number of neurons of each hidden layer and the number of outputs.")
+   }
+
+   possible.activation.functions <- c("custom","tansig","sigmoid","purelin","hardlim")
+
+   if ( is.na(hidden.activation.function.choice <- pmatch(hidden.layer,possible.activation.functions)) ) {
+       stop("You should use a correct activation function for the hidden layers.")
+   } 
+   if ( is.na(output.activation.function.choice <- pmatch(output.layer,possible.activation.functions)) ) {
+       stop("You should use a correct activation function for the output layer.")
+   }
+
+   possible.methods <- c("ADAPTgdwm","ADAPTgd","BATCHgd","BATCHgdwm")
+
+   if ( is.na(method.choice <- pmatch(method,possible.methods)) ) {
+       stop("You should use a correct training method: ADAPTgdwm, ADAPTgd, BATCHgdwm, BATCHgd. Read the help files.")
+   } 
+
+   layers.last.neuron  <- cumsum(n.neurons)
+   layers.first.neuron <- c(1,1+layers.last.neuron[1:(length(layers.last.neuron)-1)])[-c(1)]-n.neurons[1]
+   layers.last.neuron  <- layers.last.neuron[-c(1)]-n.neurons[1]
+
+   net$layers[[1]] <- -c(1:n.neurons[1])
+   for ( ind.layer in 1:length(layers.last.neuron) ) {
+      net$layers[[ind.layer+1]] <- layers.first.neuron[ind.layer]:layers.last.neuron[ind.layer]
+   }
+
+   input.links  <- net$layers[1:(length(net$layers)-1)]
+   output.links <- list()
+   for ( ind.layer in 2:length(layers.last.neuron)) {
+      output.links[[ind.layer-1]] <- layers.first.neuron[ind.layer]:layers.last.neuron[ind.layer]
+   }
+   output.links[[length(layers.last.neuron)]] <- NA
+
+   for (ind.layer in 2:length(n.neurons)) {
+      if (ind.layer == length(n.neurons)) {
+         this.neuron.type="output"
+         this.neuron.activation.function.choice <- output.activation.function.choice
+      } else {
+         this.neuron.type="hidden"
+         this.neuron.activation.function.choice <- hidden.activation.function.choice
+      }
+      if (method == "ADAPTgd"  ) {
+         method.dep.variables                      <- list()
+         method.dep.variables$delta                <- as.double(0)
+         method.dep.variables$learning.rate        <- as.double(learning.rate.global)
+      } else if (method == "ADAPTgdwm") {
+         method.dep.variables                      <- list()
+         method.dep.variables$delta                <- as.double(0)
+         method.dep.variables$learning.rate        <- as.double(learning.rate.global)
+         method.dep.variables$momentum             <- as.double(momentum.global)
+         method.dep.variables$former.weight.change <- as.double(numeric(n.neurons[ind.layer-1]))
+         method.dep.variables$former.bias.change   <- as.double(0)
+      } else if (method == "BATCHgd"  ) {
+         method.dep.variables                      <- list()
+         method.dep.variables$delta                <- as.double(0)
+         method.dep.variables$learning.rate        <- as.double(learning.rate.global)
+         method.dep.variables$sum.delta.x          <- as.double(numeric(n.neurons[ind.layer-1]))
+         method.dep.variables$sum.delta.bias       <- as.double(0)
+      } else if (method == "BATCHgdwm") {
+         method.dep.variables                      <- list()
+         method.dep.variables$delta                <- as.double(0)
+         method.dep.variables$learning.rate        <- as.double(learning.rate.global)
+         method.dep.variables$sum.delta.x          <- as.double(numeric(n.neurons[ind.layer-1]))
+         method.dep.variables$sum.delta.bias       <- as.double(0)
+         method.dep.variables$momentum             <- as.double(momentum.global)
+         method.dep.variables$former.weight.change <- as.double(numeric(n.neurons[ind.layer-1]))
+         method.dep.variables$former.bias.change   <- as.double(0)
+      }
+
+      for ( ind.MLPneuron.relative in 1:length(net$layers[[ind.layer]]) ) {
+         ind.MLPneuron <- net$layers[[ind.layer]][[ind.MLPneuron.relative]]
+         net$neurons[[ind.MLPneuron]] <- init.MLPneuron(id=ind.MLPneuron,type=this.neuron.type, activation.function=as.integer(this.neuron.activation.function.choice-1),output.links=output.links[[ind.layer-1]], output.aims=rep(ind.MLPneuron.relative,length(output.links[[ind.layer-1]])), input.links=input.links[[ind.layer-1]],weights=numeric(n.neurons[ind.layer-1]), bias=0, method, method.dep.variables )
+      }
+   }
+
+
+   if (error.criterium == "LMS" ) {
+      net$deltaE$fname <- as.integer(0)      # LMS_NAME  0
+      net$deltaE$f <- deltaE.LMS
+   } else if (error.criterium == "LMLS") {
+      net$deltaE$fname <- as.integer(1)      # LMLS_NAME 1
+      net$deltaE$f <- deltaE.LMLS
+   } else if (error.criterium == "TAO") {   
+      net$deltaE$fname <- as.integer(2)      # TAO_NAME  2
+      net$deltaE$f <- deltaE.TAO 
+      if (missing(Stao)){ 
+         stop("You should enter the Stao value")
+      } else {
+         net$deltaE$Stao <-as.double(Stao)
+      }
+   } else {
+      stop("You should enter either: \"LMS\", \"LMSL\" or \"TAO\". ")
+   }
+
+   class(net)              <- "MLPnet"
+   net <- random.init.MLPnet(net)
+return(net)
+}
+#################################
+# Creates individual neurons
+#########################
+init.MLPneuron   <- function(id,type,activation.function,output.links, output.aims, input.links, weights, bias, method, method.dep.variables) {
+aux <- select.activation.function(activation.function)
+neuron                      <- list()
+neuron$id                   <- as.integer(id)
+neuron$type                 <- as.character(type)
+neuron$activation.function  <- activation.function
+neuron$output.links         <- as.integer(output.links)
+neuron$output.aims          <- as.integer(output.aims)
+neuron$input.links          <- as.integer(input.links)
+neuron$weights              <- as.double(weights)
+neuron$bias                 <- as.double(bias)
+neuron$v0                   <- as.double(0)
+neuron$v1                   <- as.double(0)
+neuron$f0                   <- aux$f0
+neuron$f1                   <- aux$f1
+neuron$method               <- as.character(method)
+neuron$method.dep.variables <- method.dep.variables
+
+class(neuron) <- "neuron"
+return(neuron)
+}
+#########################################
+# Initialize the neuron bias and weights with random values according to the book:
+# Neural Networks. A comprehensive foundation. 2nd Edition.
+# Author: Simon Haykin.
+# pages = 182, 183, 184.
+#################################
+random.init.MLPneuron <- function(net.number.weights, neuron) {
+   extreme        <- sqrt(3/net.number.weights)
+   n.weights      <- length(neuron$weights)
+   neuron$weights <- runif(n.weights,min=-extreme,max=extreme)
+   neuron$bias    <- runif(1,min=-extreme,max=extreme)
+   return(neuron)
+}
+#################################################
+# Runs random.init.MLPneuron upon each neuron.
+###########################################
+random.init.MLPnet <- function(net) {
+   net.number.weights <- length(net$neurons)          #number of bias terms
+   for (ind.MLPneuron in 1:length(net$neurons)) {
+          net.number.weights <- net.number.weights + length(net$neurons[[ind.MLPneuron]]$weights)
+       }
+
+   for ( i in 1:length(net$neurons)) { 
+      net$neurons[[i]] <- random.init.MLPneuron(net.number.weights,net$neurons[[i]] )
+   }
+return(net)
+}
+
+#########################################
+# A simple function to bestow the neuron with the appropriate 
+select.activation.function <- function(activation.function) {
+   f0 <- NA
+   f1 <- NA
+
+# a.tansig  : 1/tanh(2/3)
+# b.tansig  : 2/3
+# a.sigmoid : 1.0
+
+   if (activation.function == 1 ) { # TANSIG
+     f0 <- function (v) {
+                          a.tansig   <- 1.715904708575539
+                          b.tansig   <- 0.6666666666666667
+                          return ( a.tansig * tanh( v * b.tansig ) )
+                        }
+     f1 <- function (v) {         # realmente usaremos f1= b.tansig/a.tansig*(a.tansig-f0)*(a.tansig+f0)
+                          a.tansig   <- 1.715904708575539
+                          b.tansig   <- 0.6666666666666667
+                          return( a.tansig * b.tansig * (1-tanh( v * b.tansig )^2)  )
+                        } 
+    } else if (activation.function == 2 ) { # SIGMOID
+     f0 <- function (v) {
+                          a.sigmoid  <- 1
+                          return( 1/(1+exp(- a.sigmoid * v)) )
+                        }
+     f1 <- function (v) {           # realmente usaremos f1=a.sigmoid*f0*(1-f0)
+                          a.sigmoid  <- 1
+                          return ( a.sigmoid * exp(- a.sigmoid * v) / (1+exp(- a.sigmoid * v))^2 )
+                        } 
+   } else if (activation.function == 3 ) { # PURELIN
+     f0 <- function (v) {
+                          return( v )  
+                        }
+     f1 <- function (v) {
+                          return( 1 ) 
+                        }
+   } else if (activation.function == 4 ) { # HARDLIM
+     f0 <- function (v) {
+                          if (v>=0) { return(1) } else { return(0) }
+                        }
+     f1 <- function (v) {
+                          return ( NA )
+                        }
+   }
+
+   return(list(f0=f0,f1=f1))
+}
+
+##############################################################
+# Manually set the learning rate and momentum for each neuron
+##############################################################
+# deprecated. do not use. does not work
+#set.learning.rate.and.momentum <- function(net, learning.rate, momentum) {
+#   for (i in 1:length(net$neurons)) {
+#      net$neurons[[i]]$learning.rate <- learning.rate
+#      net$neurons[[i]]$momentum <- momentum
+#   }
+#   return(net)
+#}
+#
+#
+
diff --git a/R/sim.R b/R/sim.R
new file mode 100755
index 0000000..a0c3c9c
--- /dev/null
+++ b/R/sim.R
@@ -0,0 +1,212 @@
+##################################################
+sim <-function (net,P,...) {
+UseMethod("sim")
+}
+
+##################################################
+sim.MLPnet <- function(net,P,...) {
+   if (class(net)!="MLPnet") {
+      stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again");
+   }
+   P <- as.matrix(P)
+   ytrans <- matrix(0, nrow=length(net$layer[[length(net$layer)]]), ncol=nrow(P))
+   ytrans <- .Call("sim_Forward_MLPnet", net, t(P), ytrans, .GlobalEnv, PACKAGE="AMORE")
+   return(t(ytrans))
+}
+###############################################################################################
+
+train <- function(net, P, T, Pval=NULL, Tval=NULL, error.criterium="LMS", report=TRUE, n.shows, show.step, Stao=NA, prob=NULL, n.threads=0L) {
+   if (class(net)!="MLPnet") {
+      stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again");
+   }
+   P <- as.matrix(P)
+   T <- as.matrix(T)
+
+   epoch.show.step <- 0
+   n.muestras <- nrow(P)
+
+   net$deltaE$fname <- as.integer(0)  # custom case
+   if(error.criterium=="LMS") { 
+     net$deltaE$fname <- as.integer(1)
+     net$deltaE$f <- deltaE.LMS
+   } else if(error.criterium=="LMLS") { 
+     net$deltaE$fname <- as.integer(2)
+     net$deltaE$f <- deltaE.LMLS
+   } else if(error.criterium=="TAO") { 
+     if (missing(Stao)) {
+        stop("You should enter the value of Stao")
+     } else {
+	net$deltaE$fname <- as.integer(3)
+	net$deltaE$f    <- deltaE.TAO
+        net$deltaE$Stao <- Stao
+     }
+   }
+
+   method <- net$neurons[[1]]$method
+
+   if (method =="ADAPTgd") {
+      train.method <- ADAPTgd.MLPnet
+   } else if (method =="ADAPTgdwm") {
+      train.method <- ADAPTgdwm.MLPnet
+   } else if (method =="BATCHgd") {
+      train.method <- BATCHgd.MLPnet
+   } else if (method =="BATCHgdwm") {
+      train.method <- BATCHgdwm.MLPnet
+   }
+
+   if (is.null(prob)) {
+      if (!is.null(Pval) & !is.null(Tval)) {
+	Merror <- matrix(NA, ncol=2, nrow=n.shows)
+         Pval <- as.matrix(Pval)
+         Tval <- as.matrix(Tval)
+         min.error.val <- Inf
+         bestnet <- net
+         for (idx.show in 1:n.shows) {
+            net <- train.method(net, P, T, show.step, n.threads=n.threads)
+            P.sim    <- sim.MLPnet(net,P)
+            Pval.sim <- sim.MLPnet(net,Pval) 
+            if(error.criterium=="LMS") { 
+               error     <- error.LMS(list(prediction=P.sim,    target=T    ))
+               error.val <- error.LMS(list(prediction=Pval.sim, target=Tval ))
+            } else if(error.criterium=="LMLS") { 
+               error     <- error.LMLS(list(prediction=P.sim,    target=T    ))
+               error.val <- error.LMLS(list(prediction=Pval.sim, target=Tval ))
+            } else if(error.criterium=="TAO") {                             
+               error.aux  <- error.TAO(list(prediction=P.sim, target=T, net=net))
+               error      <- error.aux$perf
+               new.tao    <- error.aux$Stao
+               error.val  <- error.TAO(list(prediction=Pval.sim, target=Tval, net=net))$perf
+               cat("Stao:", new.tao, " ")
+            }
+            Merror [idx.show,] <- c(error,error.val)
+
+            if((!is.finite(error)) | (!is.finite(error.val))){
+               stop("ERROR: Non-finite error found (Divergent). Try reducing the learning rate and/or the momentum");
+            }  
+
+            if (error.val <= min.error.val ) {
+               min.error.val <- error.val
+               bestnet <- net      
+               cat(paste("index.show:", idx.show, error.criterium,"\tTRAIN:",error,"\tVAL:",error.val,"\t BEST NET\n", sep=" "))
+            } else {
+               cat(paste("index.show:", idx.show, error.criterium,"\tTRAIN:",error,"\tVAL:",error.val,"\n", sep=" "))
+            }
+         }
+         net <- bestnet
+      } else {
+	Merror <- matrix(NA, ncol=1, nrow=n.shows)
+         for (idx.show in 1:n.shows) {
+            net <- train.method(net, P, T, show.step, n.threads=n.threads)
+            if (report) {
+		auxReport <-  training.report(net, P, T, idx.show, error.criterium)
+		net$other.elements$Stao <- auxReport$new.tao
+		Merror [idx.show,1] <- auxReport$error
+               if(!is.finite(auxReport$error)){
+                  stop("ERROR: Non-finite error found (Divergent). Try reducing the learning rate and/or the momentum");
+               } 
+            }else if(!all(is.finite(net$neurons[net$layers[[2]][1]][[1]]$weights))){
+               stop("ERROR: Non-finite weight found (Divergent). Try reducing the learning rate and/or the momentum");
+            }
+         }
+     }
+   } else {
+      if (!is.null(Pval) & !is.null(Tval)) {
+	Merror <- matrix(NA, ncol=2, nrow=n.shows)
+         Pval <- as.matrix(Pval)
+         Tval <- as.matrix(Tval)
+         min.error.val <- Inf
+         bestnet <- net
+         for (idx.show in 1:n.shows) {
+            orden <- sample(1:n.muestras, n.muestras, replace=TRUE , prob=prob)
+            net   <- train.method(net, P[orden, , drop=FALSE], T[orden, , drop=FALSE], show.step, n.threads=n.threads)
+            P.sim    <- sim.MLPnet(net,P)
+            Pval.sim <- sim.MLPnet(net,Pval) 
+            if(error.criterium=="LMS") { 
+               error     <- error.LMS(list(prediction=P.sim,    target=T    ))
+               error.val <- error.LMS(list(prediction=Pval.sim, target=Tval ))
+            } else if(error.criterium=="LMLS") { 
+               error     <- error.LMLS(list(prediction=P.sim,    target=T    ))
+               error.val <- error.LMLS(list(prediction=Pval.sim, target=Tval ))
+            } else if(error.criterium=="TAO") {                             
+               error.aux  <- error.TAO(list(prediction=P.sim, target=T, net=net))
+               error      <- error.aux$perf
+               new.tao    <- error.aux$Stao
+               error.val  <- error.TAO(list(prediction=Pval.sim, target=Tval, net=net))$perf
+               cat("Stao:", new.tao, " ")
+            }
+            Merror [idx.show,] <- c(error,error.val)
+            
+            if((!is.finite(error)) | (!is.finite(error.val))){
+               stop("ERROR: Non-finite error found (Divergent). Try reducing the learning rate and/or the momentum");
+            }  
+
+            if (error.val <= min.error.val ) {
+               min.error.val <- error.val
+               bestnet <- net      
+               cat(paste("index.show:", idx.show, error.criterium,"\tTRAIN:",error,"\tVAL:",error.val,"\t BEST NET\n", sep=" "))
+            } else {
+               cat(paste("index.show:", idx.show, error.criterium,"\tTRAIN:",error,"\tVAL:",error.val,"\n", sep=" "))
+            }
+         }
+         net <- bestnet
+      } else {
+   	Merror <- matrix(NA, ncol=1, nrow=n.shows)
+         for (idx.show in 1:n.shows) {
+            orden <- sample(1:n.muestras, n.muestras, replace=TRUE , prob=prob)
+            net <- train.method(net, P[orden, , drop=FALSE], T[orden, , drop=FALSE], show.step, n.threads=n.threads)
+            if (report) {
+		auxReport <-  training.report(net, P, T, idx.show, error.criterium)
+		net$other.elements$Stao <- auxReport$new.tao
+		Merror [idx.show,1] <- auxReport$error
+               if(!is.finite(auxReport$error)){
+                  stop("ERROR: Non-finite error found (Divergent). Try reducing the learning rate and/or the momentum");
+               } 
+            }else if(!all(is.finite(net$neurons[net$layers[[2]][1]][[1]]$weights))){
+               stop("ERROR: Non-finite weight found (Divergent). Try reducing the learning rate and/or the momentum");
+            }
+         }
+     }
+   }
+   return(list(net=net,Merror=Merror))
+
+}
+
+
+###############################################################################################
+training.report <- function(net,P,T, idx.show, error.criterium) {
+
+
+########### BEGIN do not delete ##########
+   if (class(net)!="MLPnet") {
+      stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again");
+   }
+   new.tao      <- NA
+
+########### END do not delete ############
+
+   P.sim <- sim.MLPnet(net,P)
+#          par(mfrow=c(1,2))
+#          plot(P,T, col="red", pch="*", ylim=range(rbind(T,P.sim)))
+#          points(P,P.sim, col="blue", pch="+")
+#          plot(P, ideal, col="red", pch=".", ylim=range(rbind(ideal,P.sim)))
+#          points(P,P.sim, col="blue", pch=".")
+   if(error.criterium=="LMS") { 
+           error <- error.LMS(list(prediction=P.sim, target=T))
+   } else if(error.criterium=="LMLS") { 
+           error <- error.LMLS(list(prediction=P.sim, target=T))
+
+########### BEGIN do not delete (only minor changes allowed) ##########
+   } else if(error.criterium=="TAO") {                             
+           error.aux <- error.TAO(list(prediction=P.sim, target=T, net=net))
+           error     <- error.aux$perf
+           new.tao   <- error.aux$Stao
+           cat("Stao:", new.tao, " ")
+   }
+########### END do not delete ############
+
+   cat(paste("index.show:", idx.show, error.criterium,error,"\n", sep=" "))
+
+########### BEGIN do not delete ##########
+return(list(error=error,new.tao=new.tao))
+########### END do not delete ############
+}
diff --git a/R/trMethods.R b/R/trMethods.R
new file mode 100755
index 0000000..e3ceae9
--- /dev/null
+++ b/R/trMethods.R
@@ -0,0 +1,59 @@
+##########################################################
+#	Adaptative Gradient Descent (without momentum)
+##########################################################
+ADAPTgd.MLPnet <- function(net, P, T, n.epochs, n.threads=0L) {
+   if (class(net)!="MLPnet") {
+      stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again");
+   }
+   net <- .Call("ADAPTgd_loop_MLPnet", net, t(P), t(T),as.integer(n.epochs), new.env(), PACKAGE="AMORE" )
+   return(net)
+}
+##################################################
+
+
+##########################################################
+#	Adaptative Gradient Descent (with momentum)
+##########################################################
+ADAPTgdwm.MLPnet <- function(net,P,T, n.epochs, n.threads=0L) {
+   if (class(net)!="MLPnet") {
+      stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again");
+   }
+   net <- .Call("ADAPTgdwm_loop_MLPnet", net, t(P), t(T),  as.integer(n.epochs), new.env(), PACKAGE="AMORE" )
+   return(net)
+}
+##################################################
+
+
+##############################################################
+#	BATCHgd ( BATCH gradient descent without momentum )
+##############################################################
+BATCHgd.MLPnet <- function(net, P, T, n.epochs, n.threads=0L) { # Each pattern is a row of P, 
+   if (class(net)!="MLPnet") {
+      stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again");
+   }
+
+#####  First Step: BATCHgd.Forward.MLPnet
+   for (ind.MLPneuron in 1:length(net$neurons)) {
+      net$neurons[[ind.MLPneuron]]$method.dep.variables$sum.delta.bias <- as.double(0)
+      net$neurons[[ind.MLPneuron]]$method.dep.variables$sum.delta.x    <- as.double(numeric(length(net$neurons[[ind.MLPneuron]]$method.dep.variables$sum.delta.x)))
+   }
+   net <- .Call("BATCHgd_loop_MLPnet", net, t(P), t(T), as.integer(n.epochs), new.env(), as.integer(n.threads), PACKAGE="AMORE")
+   return(net)
+}
+##############################################################
+#	BATCHgdwm ( BATCH gradient descent with momentum )
+##############################################################
+BATCHgdwm.MLPnet <- function(net, P, T, n.epochs, n.threads=0L) { # Each pattern is a row of P, 
+   if (class(net)!="MLPnet") {
+      stop("Your net parameter does not belong to the MLPnet class. Are you aware that the result from the train function is now a list instead of a net? Check parameters and try again");
+   }
+
+##### First step: BATCHgdwm.Forward.MLPnet
+   for (ind.MLPneuron in 1:length(net$neurons)) {
+      net$neurons[[ind.MLPneuron]]$method.dep.variables$sum.delta.bias <- as.double(0)
+      net$neurons[[ind.MLPneuron]]$method.dep.variables$sum.delta.x    <- as.double(numeric(length(net$neurons[[ind.MLPneuron]]$method.dep.variables$sum.delta.x)))
+   }
+   net <- .Call("BATCHgdwm_loop_MLPnet", net, t(P), t(T), as.integer(n.epochs), new.env(), as.integer(n.threads), PACKAGE="AMORE")
+   return(net)
+}
+#######
diff --git a/debian/README.test b/debian/README.test
deleted file mode 100644
index 996df78..0000000
--- a/debian/README.test
+++ /dev/null
@@ -1,5 +0,0 @@
-Notes on how this package can be tested.
-────────────────────────────────────────
-
-This package can be tested by loading it into R with the command
-‘library(AMORE)’ in order to confirm its integrity.
diff --git a/debian/changelog b/debian/changelog
deleted file mode 100644
index 4d293d3..0000000
--- a/debian/changelog
+++ /dev/null
@@ -1,44 +0,0 @@
-r-cran-amore (0.2-15-1) unstable; urgency=medium
-
-  * New upstream version
-  * cme fix dpkg-control
-
- -- Andreas Tille <tille at debian.org>  Sun, 22 Jun 2014 22:15:20 +0200
-
-r-cran-amore (0.2-12-3) unstable; urgency=low
-
-  * debian/source/format: 3.0 (quilt)
-  * debian/control:
-     - Standards-Version: 3.9.4 (no changes needed)
-     - Deleted DM-Upload-Allowed
-     - Better wording of long description
-  * Debhelper 9 (control+compat)
-  * Renamed README.Debian to README.test
-  * debian/rules: Drop unneeded code to detect R:Depends
-  * debian/copyright: DEP5
-
- -- Andreas Tille <tille at debian.org>  Thu, 16 May 2013 17:13:31 +0200
-
-r-cran-amore (0.2-12-2) unstable; urgency=low
-
-  * debian/rules: Make sure whitespace is really a tab to get
-    R:Depends substvar working
-
- -- Andreas Tille <tille at debian.org>  Mon, 31 May 2010 10:22:02 +0200
-
-r-cran-amore (0.2-12-1) unstable; urgency=low
-
-  * New upstream version
-  * Depend on a version equal or superior than the R upstream release that
-    was used to build this package, using a R:Depends substvar
-    (debian/control, debian/rules).
-  * Standards-Version: 3.8.4 (no changes needed)
-  * debian/README.Debian: explain how to test this package
-
- -- Andreas Tille <tille at debian.org>  Thu, 20 May 2010 14:49:30 +0200
-
-r-cran-amore (0.2-11-1) unstable; urgency=low
-
-  * Initial release (closes: #557123).
-
- -- Andreas Tille <tille at debian.org>  Thu, 19 Nov 2009 15:38:39 +0100
diff --git a/debian/compat b/debian/compat
deleted file mode 100644
index ec63514..0000000
--- a/debian/compat
+++ /dev/null
@@ -1 +0,0 @@
-9
diff --git a/debian/control b/debian/control
deleted file mode 100644
index dbc54e0..0000000
--- a/debian/control
+++ /dev/null
@@ -1,22 +0,0 @@
-Source: r-cran-amore
-Maintainer: Debian Science Team <debian-science-maintainers at lists.alioth.debian.org>
-Uploaders: Andreas Tille <tille at debian.org>
-Section: gnu-r
-Priority: optional
-Build-Depends: debhelper (>= 9),
-               cdbs,
-               r-base-dev
-Standards-Version: 3.9.5
-Vcs-Browser: http://anonscm.debian.org/viewvc/debian-science/packages/R/r-cran-amore/trunk/
-Vcs-Svn: svn://anonscm.debian.org/debian-science/packages/R/r-cran-amore/trunk/
-Homepage: http://cran.r-project.org/web/packages/AMORE/
-
-Package: r-cran-amore
-Architecture: any
-Depends: ${shlibs:Depends},
-         ${R:Depends}
-Description: GNU R: A MORE flexible neural network package
- This package was born to release the TAO robust neural network
- algorithm to the R users. It has grown and can be of interest for
- the users wanting to implement their own training algorithms as well
- as for those others whose needs lye only in the "user space".
diff --git a/debian/copyright b/debian/copyright
deleted file mode 100644
index cf75e9c..0000000
--- a/debian/copyright
+++ /dev/null
@@ -1,33 +0,0 @@
-Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
-Upstream-Name: AMORE
-Upstream-Contact: Manuel Castejón Limas <manuel.castejon at unileon.es>
-Source: http://cran.r-project.org/web/packages/AMORE
-
-Files: *
-Copyright: 2009 Manuel Castejón Limas, Joaquín B. Ordieres Meré,
-                Eliseo P. Vergara González, Francisco Javier Martínez de Pisón Ascacibar,
-                Alpha V. Pernía Espinoza, Fernando Alba Elías
-License: GPL-2+
-
-Files: debian/*
-Copyright: 2008-2013 Andreas Tille   <tille at debian.org>
-           2009 Charles Plessy <plessy at debian.org>
-License: GPL-2+
-
-License: GPL-2+
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 2 of the License, or
- (at your option) any later version.
- .
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- GNU General Public License for more details.
- .
- You should have received a copy of the GNU General Public License
- along with this program.  If not, see <http://www.gnu.org/licenses/>.
- .
- On Debian systems, the complete text of the GNU Public
- License version 2 can be found in `/usr/share/common-licenses/GPL-2'.
-
diff --git a/debian/rules b/debian/rules
deleted file mode 100755
index 8c7ae9b..0000000
--- a/debian/rules
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/usr/bin/make -f
-# 							-*- makefile -*-
-# debian/rules file for the Debian/GNU Linux r-cran-amore package
-# Copyright 2008 by Andreas Tille <tille at debian.org>
-
-include /usr/share/R/debian/r-cran.mk
diff --git a/debian/source/format b/debian/source/format
deleted file mode 100644
index 163aaf8..0000000
--- a/debian/source/format
+++ /dev/null
@@ -1 +0,0 @@
-3.0 (quilt)
diff --git a/debian/watch b/debian/watch
deleted file mode 100644
index 46ab729..0000000
--- a/debian/watch
+++ /dev/null
@@ -1,2 +0,0 @@
-version=3
-http://cran.r-project.org/src/contrib/AMORE_([-\d.]+)\.tar\.gz
diff --git a/man/ADAPTgd.MLPnet.Rd b/man/ADAPTgd.MLPnet.Rd
new file mode 100644
index 0000000..7f95869
--- /dev/null
+++ b/man/ADAPTgd.MLPnet.Rd
@@ -0,0 +1,37 @@
+\name{ADAPTgd.MLPnet}
+\alias{ADAPTgd.MLPnet}
+\title{Adaptative gradient descent training}
+\description{Adaptative gradient descent training method.}
+\usage{
+ADAPTgd.MLPnet(net,P, T,n.epochs, n.threads=0L)
+}
+
+\arguments{
+\item{net}{Neural Network to train.}
+\item{P}{Input data set.}
+\item{T}{Target output data set.}
+\item{n.epochs}{Number of epochs to train}
+\item{n.threads}{Unused, but required to match the BATCH* function template.}
+}
+
+\value{This function returns a neural network object modified according to the input and target data set.}
+\author{
+Manuel Castejón Limas. 	            \email{manuel.castejon at gmail.com}\cr
+Joaquin Ordieres Meré.	            \email{j.ordieres at upm.es}\cr
+Ana González Marcos.                \email{ana.gonzalez at unirioja.es} \cr
+Alpha V. Pernía Espinoza.           \email{alpha.pernia at unirioja.es}\cr
+Francisco Javier Martinez de Pisón. \email{fjmartin at unirioja.es}\cr
+Fernando Alba Elías.                \email{fernando.alba at unavarra.es}\cr
+}
+
+\references{
+Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr
+}
+
+\seealso{
+\code{\link{newff},\link{train},\link{ADAPTgdwm.MLPnet}} 
+}
+
+
+
+\keyword{neural}
diff --git a/man/ADAPTgdwm.MLPnet.Rd b/man/ADAPTgdwm.MLPnet.Rd
new file mode 100644
index 0000000..9c1a681
--- /dev/null
+++ b/man/ADAPTgdwm.MLPnet.Rd
@@ -0,0 +1,37 @@
+\name{ADAPTgdwm.MLPnet}
+\alias{ADAPTgdwm.MLPnet}
+\title{Adaptative gradient descent with momentum training}
+\description{Adaptative gradient descent with momentum training method.}
+\usage{
+ADAPTgdwm.MLPnet(net,P, T,n.epochs, n.threads=0L)
+}
+
+\arguments{
+\item{net}{Neural Network to train.}
+\item{P}{Input data set.}
+\item{T}{Target output data set.}
+\item{n.epochs}{Number of epochs to train}
+\item{n.threads}{Unused, but required to match the BATCH* function template.}
+}
+
+\value{This function returns a neural network object modified according to the input and target data set.}
+\author{
+Manuel Castejón Limas. 	            \email{manuel.castejon at gmail.com}\cr
+Joaquin Ordieres Meré.	            \email{j.ordieres at upm.es}\cr
+Ana González Marcos.                \email{ana.gonzalez at unirioja.es} \cr
+Alpha V. Pernía Espinoza.           \email{alpha.pernia at unirioja.es}\cr
+Francisco Javier Martinez de Pisón. \email{fjmartin at unirioja.es}\cr
+Fernando Alba Elías.                \email{fernando.alba at unavarra.es}\cr
+}
+
+\references{
+Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr
+}
+
+\seealso{
+\code{\link{newff},\link{train},\link{ADAPTgd.MLPnet}} 
+}
+
+
+\keyword{neural}
+
diff --git a/man/BATCHgd.MLPnet.Rd b/man/BATCHgd.MLPnet.Rd
new file mode 100644
index 0000000..e72dd1f
--- /dev/null
+++ b/man/BATCHgd.MLPnet.Rd
@@ -0,0 +1,37 @@
+\name{BATCHgd.MLPnet}
+\alias{BATCHgd.MLPnet}
+\title{Batch gradient descent training}
+\description{Modifies the neural network weights and biases according to the training set.}
+\usage{
+BATCHgd.MLPnet(net,P,T,n.epochs, n.threads=0L)
+}
+
+\arguments{
+\item{net}{Neural Network to train.}
+\item{P}{Input data set.}
+\item{T}{Target output data set.}
+\item{n.epochs}{Number of epochs to train}
+\item{n.threads}{Number of threads to spawn. If <1, spawns NumberProcessors-1 threads. If no OpenMP is found, this argument will be ignored.}
+}
+
+\value{This function returns a neural network object modified according to the chosen data.}
+\author{
+Manuel Castejón Limas. 	            \email{manuel.castejon at gmail.com}\cr
+Joaquin Ordieres Meré.	            \email{j.ordieres at upm.es}\cr
+Ana González Marcos.                \email{ana.gonzalez at unirioja.es} \cr
+Alpha V. Pernía Espinoza.           \email{alpha.pernia at unirioja.es}\cr
+Francisco Javier Martinez de Pisón. \email{fjmartin at unirioja.es}\cr
+Fernando Alba Elías.                \email{fernando.alba at unavarra.es}\cr
+}
+
+\references{
+Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr
+}
+
+\seealso{
+\code{\link{newff},\link{train},\link{BATCHgdwm.MLPnet}} 
+}
+
+
+\keyword{neural}
+
diff --git a/man/BATCHgdwm.MLPnet.Rd b/man/BATCHgdwm.MLPnet.Rd
new file mode 100644
index 0000000..eb4c3cd
--- /dev/null
+++ b/man/BATCHgdwm.MLPnet.Rd
@@ -0,0 +1,37 @@
+\name{BATCHgdwm.MLPnet}
+\alias{BATCHgdwm.MLPnet}
+\title{Batch gradient descent with momentum training}
+\description{Modifies the neural network weights and biases according to the training set.}
+\usage{
+BATCHgdwm.MLPnet(net,P,T, n.epochs, n.threads=0L)
+}
+
+\arguments{
+\item{net}{Neural Network to train.}
+\item{P}{Input data set.}
+\item{T}{Target output data set.}
+\item{n.epochs}{Number of epochs to train}
+\item{n.threads}{Number of threads to spawn. If <1, spawns NumberProcessors-1 threads. If no OpenMP is found, this argument will be ignored.}
+}
+
+\value{This functions returns a neural network object modified according to the chosen data.}
+\author{
+Manuel Castejón Limas. 	            \email{manuel.castejon at gmail.com}\cr
+Joaquin Ordieres Meré.	            \email{j.ordieres at upm.es}\cr
+Ana González Marcos.                \email{ana.gonzalez at unirioja.es} \cr
+Alpha V. Pernía Espinoza.           \email{alpha.pernia at unirioja.es}\cr
+Francisco Javier Martinez de Pisón. \email{fjmartin at unirioja.es}\cr
+Fernando Alba Elías.                \email{fernando.alba at unavarra.es}\cr
+}
+
+\references{
+Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr
+}
+
+\seealso{
+\code{\link{newff},\link{train},\link{BATCHgd.MLPnet}} 
+}
+
+
+\keyword{neural}
+
diff --git a/man/deltaE.Rd b/man/deltaE.Rd
new file mode 100644
index 0000000..c0e6591
--- /dev/null
+++ b/man/deltaE.Rd
@@ -0,0 +1,56 @@
+\name{error.LMS}
+\alias{error.LMS}
+\alias{error.LMLS}
+\alias{error.TAO}
+\alias{deltaE.LMS}
+\alias{deltaE.LMLS}
+\alias{deltaE.TAO}
+
+\title{Neural network training error criteria.}
+
+\description{The error functions calculate the goodness of fit of a neural network according to certain criterium:
+\itemize{
+\item LMS:  Least Mean Squares Error.
+\item LMLS: Least Mean Log Squares minimization. 
+\item TAO:  TAO error minimization.
+}
+The deltaE functions calculate the influence functions of their error criteria.
+}
+
+\usage{
+error.LMS(arguments)
+error.LMLS(arguments)
+error.TAO(arguments)
+deltaE.LMS(arguments)
+deltaE.LMLS(arguments)
+deltaE.TAO(arguments)
+}
+\arguments{
+\item{arguments}{List of arguments to pass to the functions.
+\itemize{   
+\item The first element is the prediction of the neuron.
+   \item The second element is the corresponding component of the target vector.
+   \item The third element is the whole net. This allows the TAO criterium to know the value of the S parameter and eventually ( next minor update) will allow the user to apply regularization criteria.}
+}
+}
+\value{This functions return the error and influence function criteria.}
+\author{
+Manuel Castejón Limas. 	            \email{manuel.castejon at gmail.com}\cr
+Joaquin Ordieres Meré.	            \email{j.ordieres at upm.es}\cr
+Ana González Marcos.                \email{ana.gonzalez at unirioja.es} \cr
+Alpha V. Pernía Espinoza.           \email{alpha.pernia at unirioja.es}\cr
+Francisco Javier Martinez de Pisón. \email{fjmartin at unirioja.es}\cr
+Fernando Alba Elías.                \email{fernando.alba at unavarra.es}\cr
+}
+
+\references{
+Pernía Espinoza, A.V., Ordieres Meré, J.B., Martínez de Pisón, F.J., González Marcos, A. TAO-robust backpropagation learning algorithm. Neural Networks. Vol. 18, Issue 2, pp. 191--204, 2005.\cr \cr
+Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr
+}
+
+\seealso{
+\code{\link{train}}
+}
+
+\keyword{neural}
+
diff --git a/man/graphviz.MLPnet.Rd b/man/graphviz.MLPnet.Rd
new file mode 100644
index 0000000..6b29385
--- /dev/null
+++ b/man/graphviz.MLPnet.Rd
@@ -0,0 +1,29 @@
+\name{graphviz.MLPnet}
+\alias{graphviz.MLPnet}
+\title{Neural network graphic representation}
+\description{Creates a dot file, suitable to be processed with graphviz, containing a graphical representation of the netwok topology and some numerical information about the network parameters.}
+\usage{
+graphviz.MLPnet(net, filename, digits)
+}
+
+\arguments{
+\item{net}{Neural Network.}
+\item{filename}{Name of the dot file to be written.}
+\item{digits}{Number of digits used to round the parameters.}
+}
+
+\value{This function writes a file suitable to be postprocessed with the graphviz package. Thus, multiple formats can be obtained: ps, pdf, ...}
+\author{
+Manuel Castejón Limas. 	            \email{manuel.castejon at gmail.com}\cr
+Joaquin Ordieres Meré.	            \email{j.ordieres at upm.es}\cr
+Ana González Marcos.                \email{ana.gonzalez at unirioja.es} \cr
+Alpha V. Pernía Espinoza.           \email{alpha.pernia at unirioja.es}\cr
+Francisco Javier Martinez de Pisón. \email{fjmartin at unirioja.es}\cr
+Fernando Alba Elías.                \email{fernando.alba at unavarra.es}\cr
+}
+
+\references{
+http:\/\/www.graphviz.org \cr \cr
+}
+
+\keyword{neural}
diff --git a/man/init.MLPneuron.Rd b/man/init.MLPneuron.Rd
new file mode 100644
index 0000000..621b338
--- /dev/null
+++ b/man/init.MLPneuron.Rd
@@ -0,0 +1,76 @@
+\name{init.MLPneuron}
+\alias{init.MLPneuron}
+\title{Neuron constructor.}
+\description{Creates a neuron according to the structure established by the AMORE package standard.}
+\usage{
+init.MLPneuron(id, type, activation.function, output.links, output.aims, input.links, 
+        weights, bias, method, method.dep.variables)
+}
+\arguments{
+\item{id}{Numerical index of the neuron (so as to be refered in a network operation).}
+\item{type}{Either hidden or ouput,  according to the layer the neuron belongs to.}
+\item{activation.function}{The name of the characteristic function of the neuron. It can be "pureline", "tansig", "sigmoid" or even "custom" in case that the user wants to configure its own activation function accordingly defining f0 and f1.}
+\item{output.links}{The id's of the neurons that accept the output value of this neuron as an input.}
+\item{output.aims}{The location of the output of the neuron in the input set of the addressed neuron. Gives answer to: Is this output the first, the second, the third, ..., input at the addressed neuron?. Similarly for an output neuron: Is this output the first, the second, the third, ..., element of the output vector?}
+\item{input.links}{The id's of the neurons whose outputs work as inputs for this neuron. Positive values represent that we take the outputs of other neurons as inputs. Negative values represent the coordinates of the input vector to be considered as inputs.}
+\item{weights}{The multiplying factors of the input values.}
+\item{bias}{The bias summed to the weighted sum of the inputs.}
+\item{method}{Prefered training method. Currently it can be:
+        \itemize{
+        \item "ADAPTgd": Adaptative gradient descend.
+        \item "ADAPTgdwm": Adaptative gradient descend with momentum.
+        \item "BATCHgd": BATCH gradient descend.
+        \item "BATCHgdwm": BATCH gradient descend with momentum.
+        }
+}
+
+\item{method.dep.variables}{Variables used by the training methods:
+        \itemize{
+        \item ADAPTgd method:
+                \itemize{
+                        \item delta: Used in the backpropagation method.
+                        \item learning.rate: Learning rate parameter. Notice that we can use a different rate for each neuron.
+                }
+        \item ADAPTgdwm method:
+                \itemize{
+                        \item delta: Used in the backpropagation method.
+                        \item learning.rate: Learning rate parameter. Notice that we can use a different rate for each neuron.
+                        \item momentum: Momentum constant used in the backpropagation with momentum learning criterium.
+                        \item former.weight.change: Last increment in the weight parameters. Used by the momentum training technique.
+                        \item former.bias.change: Last increment in the bias parameter. Used by the momentum training technique.
+                }
+        \item BATCHgd method: 
+                \itemize{
+                        \item delta: Used in the backpropagation method.
+                        \item learning.rate: Learning rate parameter. Notice that we can use a different rate for each neuron.
+                        \item sum.delta.x: Used as an acumulator of the changes to apply to the weight parameters in the batch training.
+                        \item sum.delta.bias: Used as an acumulator of the changes to apply to the bias parameters in the batch training.
+                }
+        \item BATCHgdwm method:
+                \itemize{
+                        \item delta: Used in the backpropagation method.
+                        \item learning.rate: Learning rate parameter. Notice that we can use a different rate for each neuron.
+                        \item sum.delta.x: Used as an acumulator of the changes to apply to the weight parameters in the batch training.
+                        \item sum.delta.bias: Used as an acumulator of the changes to apply to the bias parameters in the batch training.
+                        \item momentum: Momentum constant used in the backpropagation with momentum learning criterium.
+                        \item former.weight.change: Last increment in the weight parameters. Used by the momentum training technique.
+                        \item former.bias.change: Last increment in the bias parameter. Used by the momentum training technique.
+                }
+}}
+}
+
+\value{\emph{init.MLPneuron} returns a single neuron. Mainly used to create a neural network object.}
+\author{
+Manuel Castejón Limas. 	            \email{manuel.castejon at gmail.com}\cr
+Joaquin Ordieres Meré.	            \email{j.ordieres at upm.es}\cr
+Ana González Marcos.                \email{ana.gonzalez at unirioja.es} \cr
+Alpha V. Pernía Espinoza.           \email{alpha.pernia at unirioja.es}\cr
+Francisco Javier Martinez de Pisón. \email{fjmartin at unirioja.es}\cr
+Fernando Alba Elías.                \email{fernando.alba at unavarra.es}\cr
+}
+
+\seealso{
+\code{\link{newff}}, \code{\link{random.init.MLPnet}}, \code{\link{random.init.MLPneuron}}, \code{\link{select.activation.function}} , \code{\link{init.MLPneuron}}
+}
+\keyword{neural}
+
diff --git a/man/newff.Rd b/man/newff.Rd
new file mode 100644
index 0000000..ff4a438
--- /dev/null
+++ b/man/newff.Rd
@@ -0,0 +1,76 @@
+\name{newff}
+\alias{newff}
+\title{Create a Multilayer Feedforward Neural Network}
+\description{Creates a feedforward artificial neural network according to the structure established by the AMORE package standard.}
+\usage{
+newff(n.neurons, learning.rate.global, momentum.global, error.criterium, Stao, 
+	hidden.layer, output.layer, method) 
+}
+\arguments{
+\item{n.neurons}{Numeric vector containing the number of neurons of each layer. The first element of the vector is the number of input neurons, the last is the number of output neurons and the rest are the number of neuron of the different hidden layers.}
+\item{learning.rate.global}{Learning rate at which every neuron is trained.}
+\item{momentum.global}{Momentum for every neuron. Needed by several training methods.}
+\item{error.criterium}{Criterium used to measure to proximity of the neural network prediction to its target. Currently we can choose amongst: 
+\itemize{
+\item "LMS": Least Mean Squares.
+\item "LMLS": Least Mean Logarithm Squared (Liano 1996).
+\item "TAO": TAO Error (Pernia, 2004).
+}}
+\item{Stao}{Stao parameter for the TAO error criterium. Unused by the rest of criteria.}
+\item{hidden.layer}{Activation function of the hidden layer neurons. Available functions are:
+\itemize{
+\item "purelin".
+\item "tansig". 
+\item "sigmoid".
+\item "hardlim".
+\item "custom": The user must manually define the f0 and f1 elements of the neurons.
+}}
+\item{output.layer}{Activation function of the hidden layer neurons according to the former list shown above.}
+\item{method}{Prefered training method. Currently it can be:
+\itemize{
+\item "ADAPTgd": Adaptative gradient descend.
+\item "ADAPTgdwm": Adaptative gradient descend with momentum. 
+\item "BATCHgd": BATCH gradient descend.
+\item "BATCHgdwm": BATCH gradient descend with momentum.
+}}
+}
+\value{\emph{newff} returns a multilayer feedforward neural network object.}
+\author{
+Manuel Castejón Limas. 	            \email{manuel.castejon at gmail.com}\cr
+Joaquin Ordieres Meré.	            
+Ana González Marcos.                
+Alpha V. Pernía Espinoza.           
+Eliseo P. Vergara Gonzalez.         
+Francisco Javier Martinez de Pisón. 
+Fernando Alba Elías.                
+}
+
+\references{
+Pernía Espinoza, A.V., Ordieres Meré, J.B., Martínez de Pisón, F.J., González Marcos, A. TAO-robust backpropagation learning algorithm. Neural Networks. Vol. 18, Issue 2, pp. 191--204, 2005.\cr \cr
+Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr
+}
+
+\seealso{
+\code{\link{init.MLPneuron}}, \code{\link{random.init.MLPnet}}, \code{\link{random.init.MLPneuron}}, \code{\link{select.activation.function}} 
+}
+
+\examples{
+#Example 1
+
+library(AMORE)
+# P is the input vector
+P <- matrix(sample(seq(-1,1,length=1000), 1000, replace=FALSE), ncol=1) 
+# The network will try to approximate the target P^2
+target <- P^2                                   
+# We create a feedforward network, with two hidden layers.
+# The first hidden layer has three neurons and the second has two neurons.
+# The hidden layers have got Tansig activation functions and the output layer is Purelin.
+net <- newff(n.neurons=c(1,3,2,1), learning.rate.global=1e-2, momentum.global=0.5,
+        error.criterium="LMS", Stao=NA, hidden.layer="tansig", 
+        output.layer="purelin", method="ADAPTgdwm")
+result <- train(net, P, target, error.criterium="LMS", report=TRUE, show.step=100, n.shows=5 )
+y <- sim(result$net, P)
+plot(P,y, col="blue", pch="+")
+points(P,target, col="red", pch="x")
+}
+\keyword{neural}
diff --git a/man/random.init.MLPnet.Rd b/man/random.init.MLPnet.Rd
new file mode 100644
index 0000000..173835f
--- /dev/null
+++ b/man/random.init.MLPnet.Rd
@@ -0,0 +1,20 @@
+\name{random.init.MLPnet}
+\alias{random.init.MLPnet}
+\title{Initialize the network with random weigths and biases.}
+\description{Provides random values to the network weights and biases so as to start with. Basically it applies the random.init.MLPneuron function to every neuron in the network.}
+\usage{random.init.MLPnet(net) }
+\arguments{ \item{net}{The neural network object} }
+\value{\emph{random.init.MLPnet} returns the input network with weights and biases changed randomly.}
+\author{
+Manuel Castejón Limas. 	            \email{manuel.castejon at gmail.com}\cr
+Joaquin Ordieres Meré.	            \email{j.ordieres at upm.es}\cr
+Ana González Marcos.                \email{ana.gonzalez at unirioja.es} \cr
+Alpha V. Pernía Espinoza.           \email{alpha.pernia at unirioja.es}\cr
+Francisco Javier Martinez de Pisón. \email{fjmartin at unirioja.es}\cr
+Fernando Alba Elías.                \email{fernando.alba at unavarra.es}\cr
+}
+
+\seealso{
+\code{\link{random.init.MLPneuron}}, \code{\link{init.MLPneuron}}, \code{\link{newff}}
+}
+\keyword{neural}
diff --git a/man/random.init.MLPneuron.Rd b/man/random.init.MLPneuron.Rd
new file mode 100644
index 0000000..5d25856
--- /dev/null
+++ b/man/random.init.MLPneuron.Rd
@@ -0,0 +1,27 @@
+\name{random.init.MLPneuron}
+\alias{random.init.MLPneuron}
+\title{Initialize the neuron with random weigths and bias.}
+\description{Provides random values to the neuron weights and bias so as to start with. It is usually called by the random.init.NeuralNet function during the construction of the neural object by the \emph{newff} function. 
+}
+\details{The values are assigned according to the suggestions of \cite{Haykin}.}
+\usage{random.init.MLPneuron(net.number.weights, neuron) }
+\arguments{
+\item{net.number.weights}{Number of bias and weight parameters of the neural network the neuron belongs to.}
+\item{neuron}{The neuron object.}
+ }
+\value{\emph{random.init.MLPneuron} returns the input neuron with bias and weights changed randomly.}
+\author{
+Manuel Castejón Limas. 	            \email{manuel.castejon at gmail.com}\cr
+Joaquin Ordieres Meré.	            \email{j.ordieres at upm.es}\cr
+Ana González Marcos.                \email{ana.gonzalez at unirioja.es} \cr
+Alpha V. Pernía Espinoza.           \email{alpha.pernia at unirioja.es}\cr
+Francisco Javier Martinez de Pisón. \email{fjmartin at unirioja.es}\cr
+Fernando Alba Elías.                \email{fernando.alba at unavarra.es}\cr
+}
+\seealso{
+\code{\link{random.init.MLPnet}}, \code{\link{init.MLPneuron}}, \code{\link{newff}}
+}
+\references{
+Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr
+}
+\keyword{neural}
diff --git a/man/select.activation.function.Rd b/man/select.activation.function.Rd
new file mode 100644
index 0000000..28b0c43
--- /dev/null
+++ b/man/select.activation.function.Rd
@@ -0,0 +1,22 @@
+\name{select.activation.function}
+\alias{select.activation.function}
+\title{Provides R code of the selected activation function.}
+\description{Provides random values to the neuron weights and bias so as to start with. It is usually called by the random.init.NeuralNet function during the construction of the neural object by the \emph{newff} function. 
+}
+\usage{ select.activation.function(activation.function)}
+\arguments{
+\item{activation.function}{ Activation function name. Currently the user may choose amongst \emph{purelin}, \emph{tansig}, \emph{sigmoid}, \emph{hardlim} and \emph{custom}. If \emph{custom} is chosen the the user must manually assign the neuron  \emph{f0} and \emph{f1} functions.}
+ }
+\value{\emph{select.activation.function} returns a list with two elements. The first, \emph{f0} is the R code selected to serve as the neuron activation function. The second, \emph{f1} is the R code of the activation function derivative.}
+\author{
+Manuel Castejón Limas. 	            \email{manuel.castejon at gmail.com}\cr
+Joaquin Ordieres Meré.	            \email{j.ordieres at upm.es}\cr
+Ana González Marcos.                \email{ana.gonzalez at unirioja.es} \cr
+Alpha V. Pernía Espinoza.           \email{alpha.pernia at unirioja.es}\cr
+Francisco Javier Martinez de Pisón. \email{fjmartin at unirioja.es}\cr
+Fernando Alba Elías.                \email{fernando.alba at unavarra.es}\cr
+}
+\seealso{
+\code{\link{init.MLPneuron}}, \code{\link{newff}}
+}
+\keyword{neural}
diff --git a/man/sim.MLPnet.Rd b/man/sim.MLPnet.Rd
new file mode 100644
index 0000000..6c6ae4a
--- /dev/null
+++ b/man/sim.MLPnet.Rd
@@ -0,0 +1,38 @@
+\name{sim.MLPnet}
+\alias{sim}
+\alias{sim.MLPnet}
+
+\title{Performs the simulation of a neural network from an input data set.}
+\description{This function calculates the output values of the neural network for a given data set. Various versions are provided according to different degrees of C code conversion. The \emph{sim.MLPnet} function is the latest and quickest.}
+
+\usage{
+sim(net,P,...)
+#sim.MLPnet(net,P,...)
+}
+
+\arguments{
+\item{...}{Currently, the parameters below are accepted.}
+\item{net}{Neural Network to simulate.}
+\item{P}{Data Set input values.}
+}
+
+\value{This function returns a matrix containing the output values of the neural network for the given data set.}
+\author{
+Manuel Castejón Limas. 	            \email{manuel.castejon at gmail.com}\cr
+Joaquin Ordieres Meré               \email{j.ordieres at upm.es}\cr
+Ana González Marcos.                \email{ana.gonzalez at unirioja.es} \cr
+Alpha V. Pernía Espinoza.           \email{alpha.pernia at unirioja.es}\cr
+Francisco Javier Martinez de Pisón  \email{fjmartin at unirioja.es}\cr
+Fernando Alba Elías.                \email{fernando.alba at unavarra.es}\cr
+}
+
+\references{
+Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr
+}
+
+\seealso{
+\code{\link{newff},\link{train}}
+}
+
+\keyword{neural}
+
diff --git a/man/taofun.Rd b/man/taofun.Rd
new file mode 100644
index 0000000..7691bfb
--- /dev/null
+++ b/man/taofun.Rd
@@ -0,0 +1,41 @@
+\name{error.TAO}
+\alias{hfun}
+\alias{phifun}
+\alias{dphifun}
+
+\title{TAO robust error criterium auxiliar functions.}
+
+\description{Auxiliar functions. Not meant to be called from the user but from the \code{\link{error.TAO}} and the \code{\link{deltaE.TAO}} functions.
+}
+
+\usage{
+hfun(v,k)
+phifun(v,k)
+dphifun(v,k)
+}
+\arguments{
+\item{v}{Input value.}
+\item{k}{Threshold limit.}
+}
+
+\value{These functions return a numeric array with dimension equal to the dimension of v.}
+\author{
+Manuel Castejón Limas. 	            \email{manuel.castejon at gmail.com}\cr
+Joaquin Ordieres Meré.	            \email{j.ordieres at upm.es}\cr
+Ana González Marcos.                \email{ana.gonzalez at unirioja.es} \cr
+Alpha V. Pernía Espinoza.           \email{alpha.pernia at unirioja.es}\cr
+Francisco Javier Martinez de Pisón. \email{fjmartin at unirioja.es}\cr
+Fernando Alba Elías.                \email{fernando.alba at unavarra.es}\cr
+}
+
+\references{
+Pernía Espinoza, A.V., Ordieres Meré, J.B., Martínez de Pisón, F.J., González Marcos, A. TAO-robust backpropagation learning algorithm. Neural Networks. Vol. 18, Issue 2, pp. 191--204, 2005.\cr \cr
+Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr
+}
+
+\seealso{
+\code{\link{train}}
+}
+
+\keyword{neural}
+
diff --git a/man/train.Rd b/man/train.Rd
new file mode 100644
index 0000000..b1ea4bf
--- /dev/null
+++ b/man/train.Rd
@@ -0,0 +1,48 @@
+\name{train}
+\alias{train}
+
+\title{Neural network training function.}
+
+\description{For a given data set (training set), this function modifies the neural network weights and biases to approximate the relationships amongst variables present in the training set. These may serve to satisfy several needs, i.e. fitting non-linear functions.}
+
+\usage{
+train(net, P, T, Pval=NULL, Tval=NULL, error.criterium="LMS", report=TRUE,
+ n.shows, show.step, Stao=NA,prob=NULL,n.threads=0L)
+}
+
+\arguments{
+\item{net}{Neural Network to train.}
+\item{P}{Training set input values.}
+\item{T}{Training set output values}
+\item{Pval}{Validation set input values for optional early stopping.}
+\item{Tval}{Validation set output values for optional early stopping.}
+\item{error.criterium}{Criterium used to measure the goodness of fit:"LMS", "LMLS", "TAO".}
+\item{Stao}{Initial value of the S parameter used by the TAO algorithm.}
+\item{report}{Logical value indicating whether the training function should keep quiet or should provide graphical/written information during the training process instead.}
+\item{n.shows}{Number of times to report (if report is TRUE). The total number of training epochs is n.shows times show.step.}
+\item{show.step}{Number of epochs to train non-stop until the training function is allow to report.}
+\item{prob}{Vector with the probabilities of each sample so as to apply resampling training.}
+\item{n.threads}{Number of threads to spawn for the BATCH* training methods. If <1, spawns NumberProcessors-1 threads. If no OpenMP is found, this argument will be ignored.}
+}
+
+\value{This function returns a list with two elements: the trained Neural Network object with weights and biases adjusted by the adaptative backpropagation with momentum method and a matrix with the errors obtained during the training. If the validation set is provided, the early stopping technique is applied.}
+\author{
+Manuel Castejón Limas.              \email{manuel.castejon at gmail.com}\cr
+Joaquin Ordieres Meré               \email{j.ordieres at upm.es}\cr
+Ana González Marcos.                \email{ana.gonzalez at unirioja.es} \cr
+Alpha V. Pernía Espinoza.           \email{alpha.pernia at unirioja.es}\cr
+Francisco Javier Martinez de Pisón. \email{fjmartin at unirioja.es}\cr
+Fernando Alba Elías.                \email{fernando.alba at unavarra.es}\cr
+}
+
+\references{
+Pernía Espinoza, A.V., Ordieres Meré, J.B., Martínez de Pisón, F.J., González Marcos, A. TAO-robust backpropagation learning algorithm. Neural Networks. Vol. 18, Issue 2, pp. 191--204, 2005.\cr \cr
+Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr
+}
+
+\seealso{
+\code{\link{newff}} 
+}
+
+\keyword{neural}
+
diff --git a/man/training.report.Rd b/man/training.report.Rd
new file mode 100644
index 0000000..08722af
--- /dev/null
+++ b/man/training.report.Rd
@@ -0,0 +1,37 @@
+\name{training.report}
+\alias{training.report}
+\title{Neural network training report generator function.}
+
+\description{Function in charge of reporting the behavior of the network training. The users should modify this function according to their needs.}
+
+\usage{
+training.report(net,P,T, idx.show, error.criterium)
+}
+\arguments{
+\item{net}{Neural Network to train.}
+\item{P}{Training set input values.}
+\item{T}{Training set output values}
+\item{idx.show}{Current show index.}
+\item{error.criterium}{Criterium used to measure the goodness of fit.}
+}
+
+\value{This function does not return any value. Just useful for printing and plotting.}
+\author{
+Manuel Castejón Limas. 	            \email{manuel.castejon at gmail.com}\cr
+Joaquin Ordieres Meré.	            \email{j.ordieres at upm.es}\cr
+Ana González Marcos.                \email{ana.gonzalez at unirioja.es} \cr
+Alpha V. Pernía Espinoza.           \email{alpha.pernia at unirioja.es}\cr
+Francisco Javier Martinez de Pisón. \email{fjmartin at unirioja.es}\cr
+Fernando Alba Elías.                \email{fernando.alba at unavarra.es}\cr
+}
+
+\references{
+Simon Haykin. Neural Networks -- a Comprehensive Foundation. Prentice Hall, New Jersey, 2nd edition, 1999. ISBN 0-13-273350-1. \cr \cr
+}
+
+\seealso{
+\code{\link{train}}
+}
+
+\keyword{neural}
+
diff --git a/src/ADAPTgd.c b/src/ADAPTgd.c
new file mode 100755
index 0000000..b02e464
--- /dev/null
+++ b/src/ADAPTgd.c
@@ -0,0 +1,158 @@
+
+#include <string.h>
+#include <stdlib.h>
+#include <math.h>
+#include <R.h>
+#include <Rinternals.h>
+#include <Rdefines.h>
+#include "AMORE.h"
+
+
+
+/**
+##########################################################
+#	Adaptative Gradient Descent (without momentum)
+##########################################################
+**/
+
+
+
+
+SEXP ADAPTgd_loop_MLPnet (SEXP origNet, SEXP Ptrans, SEXP Ttrans, SEXP nepochs, SEXP rho) {
+   int * Ptransdim, *Ttransdim, fila, columna, Pcounter, Tcounter;
+   int considered_input, ind_neuron, ind_other_neuron, that_neuron, that_aim, ind_weight;
+   double aux_DELTA, x_input, a, bias_change, weight_change;
+   int epoch, n_epochs;
+
+   SEXP R_fcall, args, arg1, arg2, arg3;
+   SEXP aims, net;
+   struct AMOREneuron * ptneuron, * pt_that_neuron;
+   struct AMOREnet * ptnet;
+
+   double aux1, aux2;
+
+   PROTECT(net=duplicate(origNet));
+   Ptransdim = INTEGER(coerceVector(getAttrib(Ptrans, R_DimSymbol), INTSXP));
+   Ttransdim = INTEGER(coerceVector(getAttrib(Ttrans, R_DimSymbol), INTSXP));
+   n_epochs  = INTEGER(nepochs)[0];
+
+   ptnet = copynet_RC(net);
+   for (epoch=0; epoch < n_epochs; epoch++) {
+      for (fila=0, Pcounter=0, Tcounter=0; fila < Ptransdim[1]; fila++) {
+         for( columna =0; columna < Ptransdim[0] ; columna++, Pcounter++) {
+            ptnet->input[columna] =  REAL(Ptrans)[Pcounter];
+         }
+         for( columna =0; columna < Ttransdim[0] ; columna++, Tcounter++) {
+            ptnet->target[columna] =  REAL(Ttrans)[Tcounter];
+         }
+         /* BEGIN   void adaptgd_forward_mlpnet(AMOREnet * ptnet)   */
+         for (ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) {
+            ptneuron = ptnet->neurons[ind_neuron];
+            /* BEGIN adaptgd_forward_MLPneuron */
+            for (a=0.0, ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+               considered_input = ptneuron->input_links[ind_weight];
+               if (considered_input < 0 ) {
+                  x_input = ptnet->input[-1-considered_input];
+               } else {
+                  x_input = ptnet->neurons[-1+considered_input]->v0;
+               }
+               a +=  ptneuron->weights[ind_weight] * x_input;
+            }
+            a += ptneuron->bias;
+            switch (ptneuron->actf) {
+               case TANSIG_ACTF:
+                  ptneuron->v0 =  a_tansig * tanh(a * b_tansig); 
+                  ptneuron->v1 =  b_tansig / a_tansig * (a_tansig - ptneuron->v0)*(a_tansig + ptneuron->v0);
+                  break;
+               case SIGMOID_ACTF:
+                  ptneuron->v0 =  1/(1+exp(- a_sigmoid * a)) ; 
+                  ptneuron->v1 =  a_sigmoid * ptneuron->v0 * ( 1 - ptneuron->v0 );
+                  break;
+               case PURELIN_ACTF:
+                  ptneuron->v0 = a; 
+                  ptneuron->v1 = 1;
+                  break;
+               case HARDLIM_ACTF:
+                  if (a>=0) {
+                     ptneuron->v0 = 1.0;
+                  } else {
+                     ptneuron->v0 = 0.0;
+                  }
+                  ptneuron->v1 = NA_REAL;
+                 break;
+               case CUSTOM_ACTF:
+                  PROTECT(args    = allocVector(REALSXP,1));
+                  REAL(args)[0]   = a;
+                  PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F0), args));
+                  ptneuron->v0    = REAL(eval (R_fcall, rho))[0];
+                  PROTECT(args    = allocVector(REALSXP,1));   
+                  REAL(args)[0]   = a;
+                  PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F1), args));
+                  ptneuron->v1    = REAL(eval (R_fcall, rho))[0];
+                  UNPROTECT(4);
+             break; 
+            }
+         /* END adaptgd_forward_MLPneuron */
+         }
+         /* END     void adaptgd_forward_mlpnet(AMOREnet * ptnet)   */
+
+
+         /* BEGIN   void adaptgd_backwards_MLPnet (AMOREnet * ptnet, SEXP rho) */
+         for ( ind_neuron=ptnet->last_neuron; ind_neuron >=0;  ind_neuron-- ) {
+            ptneuron=ptnet->neurons[ind_neuron];
+         /**/
+            if (ptneuron->type==TYPE_OUTPUT) {
+               switch(ptnet->deltaE.name) {
+                  case LMS_NAME:
+                     aux_DELTA = ptneuron->v0 - ptnet->target[-1+ptneuron->output_aims[0]];
+                  break;
+                  case LMLS_NAME:
+                     aux_DELTA = ptneuron->v0- ptnet->target[-1+ptneuron->output_aims[0]];
+                     aux_DELTA = aux_DELTA / (1 + aux_DELTA*aux_DELTA / 2);
+                     break;
+                  default:   /* if (ptneuron->deltaE.name==TAO_NAME)   de momento tao es como custom*/ 
+                    /* ####### OJO FALTA cambiar el TAO  */
+                    PROTECT(args  = allocVector(VECSXP,3)     );
+                    PROTECT(arg3  = net                       );
+                    PROTECT(arg2  = allocVector(REALSXP,1)    );
+                    PROTECT(arg1  = allocVector(REALSXP,1)    );
+                    REAL(arg1)[0] = ptneuron->v0;
+                    REAL(arg2)[0] =  ptnet->target[-1+ptneuron->output_aims[0]];
+                    SET_VECTOR_ELT(args, 0, arg1);
+                    SET_VECTOR_ELT(args, 1, arg2);
+                    SET_VECTOR_ELT(args, 2, arg3);
+                    PROTECT(R_fcall = lang2(DELTAE_F, args) );
+                    aux_DELTA = REAL(eval (R_fcall, rho))[0];
+                    UNPROTECT(5);
+                    break;
+               };
+            } else {
+               aux_DELTA = 0.0;
+               for ( ind_other_neuron=0; ind_other_neuron <= ptneuron->last_output_link ; ind_other_neuron++ ) {
+                  pt_that_neuron = ptneuron->output_links[ind_other_neuron];
+                  that_aim       = -1+ptneuron->output_aims[ind_other_neuron];
+                  aux_DELTA     += pt_that_neuron->method_dep_variables.adaptgd.delta * pt_that_neuron->weights[that_aim] ;
+               }
+            }
+            ptneuron->method_dep_variables.adaptgd.delta = aux_DELTA * ptneuron->v1;
+            bias_change = - ptneuron->method_dep_variables.adaptgd.learning_rate * ptneuron->method_dep_variables.adaptgd.delta;
+            ptneuron->bias += bias_change;
+            for (ind_weight = 0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+               considered_input = ptneuron->input_links[ind_weight];
+               if (considered_input < 0 ) {
+                  x_input = ptnet->input[-1-considered_input];
+               } else {
+                  x_input = ptnet->neurons[-1+considered_input]->v0;
+               }
+               weight_change  =  - ptneuron->method_dep_variables.adaptgd.learning_rate * ptneuron->method_dep_variables.adaptgd.delta  * x_input ;
+               ptneuron->weights[ind_weight] += weight_change;
+            }
+            /**/
+         }
+         /* END    void adaptgd_backwards_MLPnet (AMOREnet * ptnet, SEXP rho) */
+      }
+   }
+   copynet_CR (net, ptnet);
+   UNPROTECT(1);
+   return (net);
+}
diff --git a/src/ADAPTgdwm.c b/src/ADAPTgdwm.c
new file mode 100755
index 0000000..a54e878
--- /dev/null
+++ b/src/ADAPTgdwm.c
@@ -0,0 +1,160 @@
+
+#include <string.h>
+#include <stdlib.h>
+#include <math.h>
+#include <R.h>
+#include <Rinternals.h>
+#include <Rdefines.h>
+#include "AMORE.h"
+
+
+/**
+##########################################################
+#	Adaptative Gradient Descent (with momentum)
+##########################################################
+*/
+
+SEXP ADAPTgdwm_loop_MLPnet (SEXP origNet, SEXP Ptrans, SEXP Ttrans, SEXP nepochs, SEXP rho) {
+   int * Ptransdim, *Ttransdim, fila, columna, Pcounter, Tcounter;
+   int considered_input, ind_neuron, ind_other_neuron, that_neuron, that_aim, ind_weight;
+   double aux_DELTA, x_input, a, bias_change, weight_change;
+   int epoch, n_epochs;
+
+   SEXP R_fcall, args, arg1, arg2, arg3;
+   SEXP aims, net;
+   struct AMOREneuron * ptneuron, * pt_that_neuron;
+   struct AMOREnet * ptnet;
+
+   double aux1, aux2;
+
+   PROTECT(net=duplicate(origNet));
+   Ptransdim = INTEGER(coerceVector(getAttrib(Ptrans, R_DimSymbol), INTSXP));
+   Ttransdim = INTEGER(coerceVector(getAttrib(Ttrans, R_DimSymbol), INTSXP));
+   n_epochs  = INTEGER(nepochs)[0];
+
+   ptnet = copynet_RC(net);
+   for (epoch=0; epoch < n_epochs; epoch++) {
+      for (fila=0, Pcounter=0, Tcounter=0; fila < Ptransdim[1]; fila++) {
+         for( columna =0; columna < Ptransdim[0] ; columna++, Pcounter++) {
+            ptnet->input[columna] =  REAL(Ptrans)[Pcounter];
+         }
+         for( columna =0; columna < Ttransdim[0] ; columna++, Tcounter++) {
+            ptnet->target[columna] =  REAL(Ttrans)[Tcounter];
+         }
+         /** BEGIN   void adaptgdwm_forward_mlpnet(AMOREnet * ptnet)   */
+         for (ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) {
+            ptneuron = ptnet->neurons[ind_neuron];
+            /* BEGIN adaptgdwm_forward_MLPneuron */
+            for (a=0.0, ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+               considered_input = ptneuron->input_links[ind_weight];
+               if (considered_input < 0 ) {
+                  x_input = ptnet->input[-1-considered_input];
+               } else {
+                  x_input = ptnet->neurons[-1+considered_input]->v0;
+               }
+               a +=  ptneuron->weights[ind_weight] * x_input;
+            }
+            a += ptneuron->bias;
+            switch (ptneuron->actf) {
+               case TANSIG_ACTF:
+                  ptneuron->v0 =  a_tansig * tanh(a * b_tansig); 
+                  ptneuron->v1 =  b_tansig / a_tansig * (a_tansig - ptneuron->v0)*(a_tansig + ptneuron->v0);
+                  break;
+               case SIGMOID_ACTF:
+                  ptneuron->v0 =  1/(1+exp(- a_sigmoid * a)) ; 
+                  ptneuron->v1 =  a_sigmoid * ptneuron->v0 * ( 1 - ptneuron->v0 );
+                  break;
+               case PURELIN_ACTF:
+                  ptneuron->v0 = a; 
+                  ptneuron->v1 = 1;
+                  break;
+               case HARDLIM_ACTF:
+                  if (a>=0) {
+                     ptneuron->v0 = 1.0;
+                  } else {
+                     ptneuron->v0 = 0.0;
+                  }
+                  ptneuron->v1 = NA_REAL;
+                 break;
+               case CUSTOM_ACTF:
+                  PROTECT(args    = allocVector(REALSXP,1));
+                  REAL(args)[0]   = a;
+                  PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F0), args));
+                  ptneuron->v0    = REAL(eval (R_fcall, rho))[0];
+                  PROTECT(args    = allocVector(REALSXP,1));   
+                  REAL(args)[0]   = a;
+                  PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F1), args));
+                  ptneuron->v1    = REAL(eval (R_fcall, rho))[0];
+                  UNPROTECT(4);
+                  break; 
+            }
+         /* END adaptgdwm_forward_MLPneuron */
+         }
+         /** END     void adaptgdwm_forward_mlpnet(AMOREnet * ptnet)   */
+
+
+         /* BEGIN   void adaptgdwm_backwards_MLPnet (AMOREnet * ptnet, SEXP rho) */
+         for ( ind_neuron=ptnet->last_neuron; ind_neuron >=0;  ind_neuron-- ) {
+            ptneuron=ptnet->neurons[ind_neuron];
+         /**/
+            if (ptneuron->type==TYPE_OUTPUT) {
+               switch(ptnet->deltaE.name) {
+                  case LMS_NAME:
+                     aux_DELTA = ptneuron->v0 - ptnet->target[-1+ptneuron->output_aims[0]];
+                  break;
+                  case LMLS_NAME:
+                     aux_DELTA = ptneuron->v0- ptnet->target[-1+ptneuron->output_aims[0]];
+                     aux_DELTA = aux_DELTA / (1 + aux_DELTA*aux_DELTA / 2);
+                     break;
+                  default:   /** if (ptneuron->deltaE.name==TAO_NAME)   de momento tao es como custom*/ 
+                    /** ####### OJO FALTA cambiar el TAO  */
+                    PROTECT(args  = allocVector(VECSXP,3)     );
+                    PROTECT(arg3  = net                       );
+                    PROTECT(arg2  = allocVector(REALSXP,1)    );
+                    PROTECT(arg1  = allocVector(REALSXP,1)    );
+                    REAL(arg1)[0] = ptneuron->v0;
+                    REAL(arg2)[0] =  ptnet->target[-1+ptneuron->output_aims[0]];
+                    SET_VECTOR_ELT(args, 0, arg1);
+                    SET_VECTOR_ELT(args, 1, arg2);
+                    SET_VECTOR_ELT(args, 2, arg3);
+                    PROTECT(R_fcall = lang2(DELTAE_F, args) );
+                    aux_DELTA = REAL(eval (R_fcall, rho))[0];
+                    UNPROTECT(5);
+                    break;
+               };
+            } else {
+               aux_DELTA = 0.0;
+               for ( ind_other_neuron=0; ind_other_neuron <= ptneuron->last_output_link ; ind_other_neuron++ ) {
+                  pt_that_neuron = ptneuron->output_links[ind_other_neuron];
+                  that_aim       = -1+ptneuron->output_aims[ind_other_neuron];
+                  aux_DELTA     += pt_that_neuron->method_dep_variables.adaptgdwm.delta * pt_that_neuron->weights[that_aim] ;
+               }
+            }
+            ptneuron->method_dep_variables.adaptgdwm.delta = aux_DELTA * ptneuron->v1;
+            bias_change = ptneuron->method_dep_variables.adaptgdwm.momentum  * ptneuron->method_dep_variables.adaptgdwm.former_bias_change - ptneuron->method_dep_variables.adaptgdwm.learning_rate * ptneuron->method_dep_variables.adaptgdwm.delta;
+            ptneuron->bias += bias_change;
+            ptneuron->method_dep_variables.adaptgdwm.former_bias_change <- bias_change;
+            for (ind_weight = 0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+               considered_input = ptneuron->input_links[ind_weight];
+               if (considered_input < 0 ) {
+                  x_input = ptnet->input[-1-considered_input];
+               } else {
+                  x_input = ptnet->neurons[-1+considered_input]->v0;
+               }
+               weight_change  =  ptneuron->method_dep_variables.adaptgdwm.momentum  * ptneuron->method_dep_variables.adaptgdwm.former_weight_change[ind_weight] - ptneuron->method_dep_variables.adaptgdwm.learning_rate * ptneuron->method_dep_variables.adaptgdwm.delta  * x_input ;
+               ptneuron->weights[ind_weight] += weight_change;
+               ptneuron->method_dep_variables.adaptgdwm.former_weight_change[ind_weight] = weight_change;
+            }
+            /**/
+         }
+         /* END    void adaptgdwm_backwards_MLPnet (AMOREnet * ptnet, SEXP rho) */
+      }
+   }
+   copynet_CR (net, ptnet);
+   UNPROTECT(1);
+   return (net);
+}
+
+
+
+
diff --git a/src/AMORE.h b/src/AMORE.h
new file mode 100755
index 0000000..43b0e82
--- /dev/null
+++ b/src/AMORE.h
@@ -0,0 +1,193 @@
+#define a_tansig    1.715904708575539
+#define b_tansig    0.6666666666666667
+#define b_split_a   0.3885219635652736
+#define a_sigmoid   1.0
+
+/* ************************************************ */ 
+/* net elements */
+#define id_NET_LAYERS  0
+#define id_NET_NEURONS 1
+#define id_NET_INPUT   2
+#define id_NET_OUTPUT  3
+#define id_NET_TARGET  4
+#define id_NET_DELTAE  5
+#define id_NET_OTHER_ELEMENTS  4
+
+/**/
+#define NET_LAYERS          VECTOR_ELT(net,id_NET_LAYERS)
+#define NET_NEURONS         VECTOR_ELT(net,id_NET_NEURONS)
+#define NET_INPUT           VECTOR_ELT(net,id_NET_INPUT)
+#define NET_OUTPUT          VECTOR_ELT(net,id_NET_OUTPUT)
+#define NET_TARGET          VECTOR_ELT(net,id_NET_TARGET)
+#define NET_DELTAE          VECTOR_ELT(net,id_NET_DELTAE)
+#define NET_OTHER_ELEMENTS  VECTOR_ELT(net,id_NET_OTHER_ELEMENTS)
+/* neuron elements */
+#define id_ID                    0
+#define id_TYPE                  1
+#define id_ACTIVATION_FUNCTION   2
+#define id_OUTPUT_LINKS          3
+#define id_OUTPUT_AIMS           4
+#define id_INPUT_LINKS           5
+#define id_WEIGHTS               6
+#define id_BIAS                  7
+#define id_V0                    8
+#define id_V1                    9
+#define id_F0                    10
+#define id_F1                    11
+#define id_METHOD                12
+#define id_METHOD_DEP_VARIABLES  13
+/**/
+#define ID                       VECTOR_ELT(neuron,id_ID)
+#define TYPE                     VECTOR_ELT(neuron,id_TYPE)
+#define ACTIVATION_FUNCTION      VECTOR_ELT(neuron,id_ACTIVATION_FUNCTION)
+#define OUTPUT_LINKS             VECTOR_ELT(neuron,id_OUTPUT_LINKS)
+#define OUTPUT_AIMS              VECTOR_ELT(neuron,id_OUTPUT_AIMS)
+#define INPUT_LINKS              VECTOR_ELT(neuron,id_INPUT_LINKS)
+#define WEIGHTS                  VECTOR_ELT(neuron,id_WEIGHTS)
+#define BIAS                     VECTOR_ELT(neuron,id_BIAS)
+#define V0                       VECTOR_ELT(neuron,id_V0)
+#define V1                       VECTOR_ELT(neuron,id_V1)
+#define F0                       VECTOR_ELT(neuron,id_F0)
+#define F1                       VECTOR_ELT(neuron,id_F1)
+#define METHOD                   VECTOR_ELT(neuron,id_METHOD)
+#define METHOD_DEP_VARIABLES     VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES)
+/* METHOD DEPENDENT VARIABLES */
+/* ADAPTgd Adaptative Gradient Descent */
+#define id_ADAPTgd_DELTA                   0
+#define id_ADAPTgd_LEARNING_RATE           1
+/**/
+#define ADAPTgd_DELTA          VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_ADAPTgd_DELTA)
+#define ADAPTgd_LEARNING_RATE  VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_ADAPTgd_LEARNING_RATE)
+/* ADAPTgdwm Adaptative Gradient Descent with Momentum */
+#define id_ADAPTgdwm_DELTA                 0
+#define id_ADAPTgdwm_LEARNING_RATE         1
+#define id_ADAPTgdwm_MOMENTUM              2
+#define id_ADAPTgdwm_FORMER_WEIGHT_CHANGE  3
+#define id_ADAPTgdwm_FORMER_BIAS_CHANGE    4
+/**/
+#define ADAPTgdwm_DELTA                 VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_ADAPTgdwm_DELTA)
+#define ADAPTgdwm_LEARNING_RATE         VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_ADAPTgdwm_LEARNING_RATE)
+#define ADAPTgdwm_MOMENTUM              VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_ADAPTgdwm_MOMENTUM)
+#define ADAPTgdwm_FORMER_WEIGHT_CHANGE  VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_ADAPTgdwm_FORMER_WEIGHT_CHANGE)
+#define ADAPTgdwm_FORMER_BIAS_CHANGE    VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_ADAPTgdwm_FORMER_BIAS_CHANGE)
+/* BATCHgd BATCH Gradient Descent */
+#define id_BATCHgd_DELTA                   0
+#define id_BATCHgd_LEARNING_RATE           1
+#define id_BATCHgd_SUM_DELTA_X             2
+#define id_BATCHgd_SUM_DELTA_BIAS          3
+/**/
+#define BATCHgd_DELTA                 VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgd_DELTA)
+#define BATCHgd_LEARNING_RATE         VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgd_LEARNING_RATE)
+#define BATCHgd_SUM_DELTA_X           VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgd_SUM_DELTA_X)
+#define BATCHgd_SUM_DELTA_BIAS        VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgd_SUM_DELTA_BIAS)
+/* BATCHgdwm BATCH Gradient Descent with Momentum */
+#define id_BATCHgdwm_DELTA                 0
+#define id_BATCHgdwm_LEARNING_RATE         1
+#define id_BATCHgdwm_SUM_DELTA_X           2
+#define id_BATCHgdwm_SUM_DELTA_BIAS        3
+#define id_BATCHgdwm_MOMENTUM              4
+#define id_BATCHgdwm_FORMER_WEIGHT_CHANGE  5
+#define id_BATCHgdwm_FORMER_BIAS_CHANGE    6
+/**/
+#define BATCHgdwm_DELTA                 VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgdwm_DELTA)
+#define BATCHgdwm_LEARNING_RATE         VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgdwm_LEARNING_RATE)
+#define BATCHgdwm_SUM_DELTA_X           VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgdwm_SUM_DELTA_X)
+#define BATCHgdwm_SUM_DELTA_BIAS        VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgdwm_SUM_DELTA_BIAS)
+#define BATCHgdwm_MOMENTUM              VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgdwm_MOMENTUM)
+#define BATCHgdwm_FORMER_WEIGHT_CHANGE  VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgdwm_FORMER_WEIGHT_CHANGE)
+#define BATCHgdwm_FORMER_BIAS_CHANGE    VECTOR_ELT(VECTOR_ELT(neuron,id_METHOD_DEP_VARIABLES),id_BATCHgdwm_FORMER_BIAS_CHANGE)
+
+
+/* OTHER ELEMENTS */
+#define id_DELTAE_NAME     0
+#define id_DELTAE_F        1
+#define id_DELTAE_STAO     2
+
+#define DELTAE_NAME  VECTOR_ELT(VECTOR_ELT(net,id_NET_DELTAE), id_DELTAE_NAME ) 
+#define DELTAE_F     VECTOR_ELT(VECTOR_ELT(net,id_NET_DELTAE), id_DELTAE_F    )
+#define DELTAE_STAO  VECTOR_ELT(VECTOR_ELT(net,id_NET_DELTAE), id_DELTAE_STAO )
+
+/**/
+#define CUSTOM_NAME 0
+#define LMS_NAME    1
+#define LMLS_NAME   2
+#define TAO_NAME    3
+
+#define CUSTOM_ACTF  0
+#define TANSIG_ACTF  1
+#define SIGMOID_ACTF 2
+#define PURELIN_ACTF 3 
+#define HARDLIM_ACTF 4
+
+#define TYPE_HIDDEN 0
+#define TYPE_OUTPUT 1
+
+#define METHOD_ADAPTgd   0
+#define METHOD_ADAPTgdwm 1
+#define METHOD_BATCHgd   2
+#define METHOD_BATCHgdwm 3
+
+struct AMOREneuron {
+   int      id, type, actf;
+   int      last_input_link, last_output_link;
+   int    * input_links;
+   double * weights;
+   struct AMOREneuron ** output_links;
+   int    * output_aims;
+   double   bias;
+   double   v0;
+   double   v1;
+   int      method;
+   union {
+      struct {
+         double   delta;
+         double   learning_rate;
+      }  adaptgd;
+      struct {
+         double   delta;
+         double   learning_rate;
+         double   momentum;
+         double * former_weight_change;
+         double   former_bias_change;
+      } adaptgdwm;
+      struct {
+         double   delta;
+         double   learning_rate;
+         double * sum_delta_x;
+         double   sum_delta_bias;
+      } batchgd;
+      struct {
+         double   delta;
+         double   learning_rate;
+         double * sum_delta_x;
+         double   sum_delta_bias;
+         double   momentum;
+         double * former_weight_change;
+         double   former_bias_change;
+      } batchgdwm;
+   } method_dep_variables;
+};
+
+struct AMOREnet {
+   struct AMOREneuron *** layers;
+   int  last_layer; 
+   int  * layer_size;
+   struct AMOREneuron ** neurons;
+   int      last_neuron;
+   double * input;
+   int      last_input;
+   double * output;
+   int      last_output;
+   double * target;
+   struct {
+      char  name;
+      double stao;
+   } deltaE;
+};
+
+
+
+struct AMOREnet * copynet_RC (SEXP net);
+void              copynet_CR (SEXP net, struct AMOREnet * ptnet);
+
+
diff --git a/src/BATCHgd.c b/src/BATCHgd.c
new file mode 100755
index 0000000..81778c8
--- /dev/null
+++ b/src/BATCHgd.c
@@ -0,0 +1,311 @@
+/*
+##############################################################
+# batchgdgd ( batchgd gradient descent without momentum )
+##############################################################
+*/
+
+#include <string.h>
+#include <math.h>
+#include <stdlib.h>
+#include <R.h>
+#include <Rinternals.h>
+#include <Rdefines.h>
+#include "AMORE.h"
+
+#ifdef _OPENMP
+  #include <omp.h>
+#endif
+/**
+##########################################################
+# BATCH Gradient Descent (without momentum)
+##########################################################
+**/
+
+SEXP BATCHgd_loop_MLPnet (SEXP origNet, SEXP Ptrans, SEXP Ttrans, SEXP nepochs, SEXP rho, SEXP thread_number ) {
+   //The only difference between wm and without it is the weight update (and the place the values are stored, one is batchgd the other is batchgdwm)
+   SEXP net;
+   SEXP R_fcall, args, arg1, arg2, arg3;
+
+   PROTECT(net=duplicate(origNet));
+   int* Ptransdim = INTEGER(coerceVector(getAttrib(Ptrans, R_DimSymbol), INTSXP));
+   int* Ttransdim = INTEGER(coerceVector(getAttrib(Ttrans, R_DimSymbol), INTSXP));
+   int n_epochs  = INTEGER(nepochs)[0];
+   struct AMOREnet* ptnet = copynet_RC(net);
+   struct AMOREneuron** neurons = ptnet->neurons;
+
+   /////////////////////////////////////////////////////////////////////////
+   //Convert input and target to double only once (and instead of copying it every time, just change the pointers)
+   //Different rows for easy switching pointers
+   double*  input_data  = REAL(Ptrans);
+   double*  target_data = REAL(Ttrans);
+   double** inputs  = (double**) R_alloc(Ptransdim[1],sizeof(double*)); //This is an 'Index'
+   double** targets = (double**) R_alloc(Ptransdim[1],sizeof(double*)); //This is an 'Index'
+
+   for (int fila=0; fila < Ptransdim[1]; fila++) {
+      inputs[fila]  = &input_data [fila*Ptransdim[0]];
+      targets[fila] = &target_data[fila*Ttransdim[0]];
+   }
+   /////////////////////////////////////////////////////////////////////////
+
+   /////////////////////////////////////////////////////////////////////////
+   // Thread number calculation
+   int n_threads = 1;
+#ifdef _OPENMP
+   {
+      int max_threads = omp_get_max_threads();
+      int given_threads = 0;
+
+      if (isInteger(thread_number))
+        given_threads = INTEGER(thread_number)[0];
+      else if (isNumeric(thread_number))
+        given_threads = floor(REAL(thread_number)[0]);
+
+      if (given_threads <1) //I HAVE THE POWER TO SCHEDULE!
+        if(max_threads  >1)
+          n_threads = max_threads-1; //Leave a CPU free
+        else
+          n_threads = 1;
+      else if (given_threads > max_threads)
+        n_threads = max_threads;
+      else
+        n_threads = given_threads;
+
+      if (neurons[0]->actf == CUSTOM_ACTF) //OMP + R custom functions = bad idea
+        n_threads = 1;
+      else if ((ptnet->deltaE.name != LMLS_NAME) && (ptnet->deltaE.name != LMS_NAME))
+        n_threads = 1;
+
+      //printf("Using %i threads from a max of %i.\n",n_threads ,max_threads);
+   }
+#endif
+   /////////////////////////////////////////////////////////////////////////
+
+   /////////////////////////////////////////////////////////////////////////
+   //Contribution (who is to blame) : Parallelization done by Jose Maria  
+   //Memory allocation for running different threads in parallel:
+   // Each thread will have his own pool of memory to handle the two kinds of temp vars:
+   //   Vars used only inside the forwards/backwards (v0, v1 and method_delta)
+   //     These vars will be initialized and read only by each thread
+   //   Vars that hold the information on how much the weights and the bias should change 
+   //     These vars will be initialized by each thread, then accumulated and read by the master thread when the batch is finished
+   int n_neurons = ptnet->last_neuron+1;
+   //Temp values, internal in each iteration
+   double **  v0s                 = (double** ) R_alloc(n_threads,sizeof(double* )); //This is an 'Index'
+   double **  v1s                 = (double** ) R_alloc(n_threads,sizeof(double* )); //This is an 'Index'
+   double **  method_deltas       = (double** ) R_alloc(n_threads,sizeof(double* )); //This is an 'Index'
+   //Accumulated values
+   double **  method_deltas_bias  = (double** ) R_alloc(n_threads,sizeof(double* )); //This is an 'Index'
+   double *** method_sums_delta_x = (double***) R_alloc(n_threads,sizeof(double**)); //This is an 'Index'
+
+   for(int id_thread=0; id_thread<n_threads;id_thread++){
+      double* chunk = (double*) R_alloc(4*n_neurons,sizeof(double)); //Actual chunk of memory for each thread, trying to avoid R_alloc calls
+      //Advantages: Good proximity reference in cache for values of the same thread, and since it has at least 2 neurons
+      // (Who would have a NNetwork with less than 2 neurons?), chunks are larger than 64 bytes (i7 L2 cache block size?)
+      v0s               [id_thread] =  chunk             ;  
+      v1s               [id_thread] = &chunk[  n_neurons];
+      method_deltas     [id_thread] = &chunk[2*n_neurons];
+      method_deltas_bias[id_thread] = &chunk[3*n_neurons];
+      
+      method_sums_delta_x[id_thread] = (double**) R_alloc(n_neurons,sizeof(double*)); //This is an 'Index'
+      for(int i=0; i<n_neurons; i++) //Different weigth number for each layer, TODO: R_alloc each layer instead of each neuron
+         method_sums_delta_x[id_thread][i] = (double*) R_alloc(neurons[i]->last_input_link+1,sizeof(double));
+   }
+   /////////////////////////////////////////////////////////////////////////
+
+   /////////////////////////////////////////////////////////////////////////
+   //Consistency (leave pnet as if the function had worked with their values instead of external ones)
+   // R_alloc should handle freeing the memory, so it's not needed to free the previously allocated memory to avoid memory leaks
+   // Changing pointer instead of copying data
+   ptnet->input  = inputs[Ptransdim[1]-1];
+   ptnet->target = targets[Ptransdim[1]-1];
+   /////////////////////////////////////////////////////////////////////////
+   
+   /////////////////////////////////////////////////////////////////////////
+   // Dividing learning rate (and momentum) by the number of samples in the training batch
+   // Using local temp memory because of cache (proximity references) and direct access to memory and avoiding modification of header file
+   // Using R_alloc for R to manage the memory
+   double * neuron_learning_rate  = (double*) R_alloc(n_neurons,sizeof(double));
+   for(int i=0; i<n_neurons; i++)
+      neuron_learning_rate[i] = ptnet->neurons[i]->method_dep_variables.batchgd.learning_rate / Ptransdim[1];
+   /////////////////////////////////////////////////////////////////////////
+
+   for (int epoch=0; epoch < n_epochs; epoch++) {
+      //Run BATCH in parallel
+      #pragma omp parallel num_threads(n_threads)
+      {
+#ifdef _OPENMP
+        int id_thread = omp_get_thread_num();
+#else
+        int id_thread = 0;
+#endif
+        //////////////////////////////////////////////////////////////////////////////////////
+        //// Using 'private' memory for each thread temp values instead of ptnet's own memory
+        //// It's needed for multithreaded execution, in single thread model it's also used (is only modified if not compiled with OMP).
+        //////////////////////////////////////////////////////////////////////////////////////
+        //Select vars for this thread from the "memory pool":
+        //  Used only by each thread:
+        double* v0 = v0s[id_thread]; // double[n_neurons] //Using this instead of ptneuron->v0
+        double* v1 = v1s[id_thread]; // double[n_neurons] //Using this instead of ptneuron->v1
+        double* method_delta      = method_deltas[id_thread]; // double[n_neurons] //Using this instead of ptneuron->ptneuron->method_dep_variables.batchgdwm.delta
+#ifdef _OPENMP
+        //  Used to update weigths:
+        double* method_delta_bias = method_deltas_bias[id_thread]; // double[n_neurons] //Instead of ptneuron->method_dep_variables.batchgd.sum_delta_bias
+        double** method_sum_delta_x = method_sums_delta_x[id_thread]; // double*[n_neurons] //Instead of ptneuron->method_dep_variables.batchgd.sum_delta_x
+        
+        //Initialize vars that handle comm between batch execution and weight update
+        for (int ind_neuron=0; ind_neuron <= ptnet->last_neuron; ind_neuron++){
+            method_delta_bias[ind_neuron] = 0.0; //TODO: Should memset be used?
+            for (int ind_weight=0; ind_weight <= neurons[ind_neuron]->last_input_link; ind_weight++)
+              method_sum_delta_x[ind_neuron][ind_weight] = 0.0; //TODO: Should memset be used?
+        }
+#endif
+        //////////////////////////////////////////////////////////////////////////////////////
+
+        #pragma omp for 
+        for (int fila=0; fila < Ptransdim[1]; fila++) {
+           // R_alloc should handle freeing the memory, so it's not needed to free the previously allocated memory to avoid memory leaks
+           // Also, these are read-only from this point onwards, should not be a problem accessing them on parallel threads 
+           // ptnet->input  = inputs[fila];  //Moved into actual access
+           // ptnet->target = targets[fila]; //Moved into actual access
+           
+           /* BEGIN   void batchgd_forward_mlpnet(AMOREnet * ptnet)   */
+           for (int ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) {
+              struct AMOREneuron * ptneuron = neurons[ind_neuron];
+              /* BEGIN batchgd_forward_MLPneuron */
+              double a=0.0;
+              for (int ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+                 int considered_input = ptneuron->input_links[ind_weight];
+                 double x_input = (considered_input < 0 )? inputs[fila][-1-considered_input] :  v0[-1+considered_input];
+                 a +=  ptneuron->weights[ind_weight] * x_input;
+              }
+              a += ptneuron->bias;
+              switch (ptneuron->actf) {
+                 case TANSIG_ACTF:
+                    v0[ind_neuron] =  a_tansig * tanh(a * b_tansig); 
+                    v1[ind_neuron] =  b_tansig / a_tansig * (a_tansig - v0[ind_neuron])*(a_tansig + v0[ind_neuron]);
+                    break;
+                 case SIGMOID_ACTF:
+                    v0[ind_neuron] =  1/(1+exp(- a_sigmoid * a)) ; 
+                    v1[ind_neuron] =  a_sigmoid * v0[ind_neuron] * ( 1 - v0[ind_neuron] );
+                    break;
+                 case PURELIN_ACTF:
+                    v0[ind_neuron] = a; 
+                    v1[ind_neuron] = 1;
+                    break;
+                 case HARDLIM_ACTF:
+                    if (a>=0) {
+                       v0[ind_neuron] = 1.0;
+                    } else {
+                       v0[ind_neuron] = 0.0;
+                    }
+                    v1[ind_neuron] = NA_REAL;
+                    break;
+                 case CUSTOM_ACTF:
+                    PROTECT(args    = allocVector(REALSXP,1));
+                    REAL(args)[0]   = a;
+                    PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F0), args));
+                    v0[ind_neuron]  = REAL(eval (R_fcall, rho))[0];
+                    PROTECT(args    = allocVector(REALSXP,1));   
+                    REAL(args)[0]   = a;
+                    PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F1), args));
+                    v1[ind_neuron]  = REAL(eval (R_fcall, rho))[0];
+                    UNPROTECT(4);
+                    break; 
+              }
+           /* END batchgd_forward_MLPneuron */
+           }
+           /* END     void batchgd_forward_mlpnet(AMOREnet * ptnet)   */
+
+
+           /* BEGIN   void Parcial_batchgd_backwards_MLPnet (AMOREnet * ptnet, SEXP rho) */
+           for (int ind_neuron=ptnet->last_neuron; ind_neuron >=0;  ind_neuron-- ) {
+              struct AMOREneuron* ptneuron=ptnet->neurons[ind_neuron];
+           /**/
+              double aux_DELTA = 0.0;
+              if (ptneuron->type==TYPE_OUTPUT) {
+                 switch(ptnet->deltaE.name) {
+                    case LMS_NAME:
+                       aux_DELTA = v0[ind_neuron] - targets[fila][-1+ptneuron->output_aims[0]];
+                    break;
+                    case LMLS_NAME:
+                       aux_DELTA = v0[ind_neuron] - targets[fila][-1+ptneuron->output_aims[0]];
+                       aux_DELTA = aux_DELTA / (1 + aux_DELTA*aux_DELTA / 2);
+                       break;
+                    default:   /* if (ptneuron->deltaE.name==TAO_NAME)   de momento tao es como custom*/ 
+                      /* ####### OJO FALTA cambiar el TAO  */
+                      PROTECT(args  = allocVector(VECSXP,3)     );
+                      PROTECT(arg3  = net                       );
+                      PROTECT(arg2  = allocVector(REALSXP,1)    );
+                      PROTECT(arg1  = allocVector(REALSXP,1)    );
+                      REAL(arg1)[0] = v0[ind_neuron];
+                      REAL(arg2)[0] =  targets[fila][-1+ptneuron->output_aims[0]];
+                      SET_VECTOR_ELT(args, 0, arg1);
+                      SET_VECTOR_ELT(args, 1, arg2);
+                      SET_VECTOR_ELT(args, 2, arg3);
+                      PROTECT(R_fcall = lang2(DELTAE_F, args) );
+                      aux_DELTA = REAL(eval (R_fcall, rho))[0];
+                      UNPROTECT(5);
+                      break;
+                 };
+              } else {
+                 for (int ind_other_neuron=0; ind_other_neuron <= ptneuron->last_output_link ; ind_other_neuron++ ) {
+                    struct AMOREneuron* pt_that_neuron = ptneuron->output_links[ind_other_neuron];
+                    int that_aim       = -1+ptneuron->output_aims[ind_other_neuron];
+                    aux_DELTA     += method_delta[pt_that_neuron->id-1] * pt_that_neuron->weights[that_aim] ;
+                 }
+              }
+
+              method_delta[ptneuron->id-1] = aux_DELTA * v1[ind_neuron]; //R ids start in 1
+
+              for (int ind_weight = 0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+                 int considered_input = ptneuron->input_links[ind_weight];
+                 double x_input = considered_input < 0 ? inputs[fila][-1-considered_input] : v0[-1+considered_input];
+#ifdef _OPENMP
+                 method_sum_delta_x[ind_neuron][ind_weight] += method_delta[ptneuron->id-1] * x_input ;
+              }
+              method_delta_bias[ind_neuron] += method_delta[ptneuron->id-1];
+           } /*/ End parcial backwards*/
+        } /* end bucle fila */
+      } //End parallel region
+
+//Up to this point BATCHGD and BATCHGDWM are the same
+
+      //////////////////////////////////////////////////////////////////////////////////////
+      //Update ptnet with the values from batch calculations
+      for(int id_thread=0; id_thread<n_threads;id_thread++){ //Maybe reduction could be used
+        for (int ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) {
+          struct AMOREneuron *  ptneuron = neurons[ind_neuron];
+          ptneuron->method_dep_variables.batchgd.sum_delta_bias +=  method_deltas_bias[id_thread][ind_neuron];
+          for (int ind_weight = 0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+            ptneuron->method_dep_variables.batchgd.sum_delta_x[ind_weight] += method_sums_delta_x[id_thread][ind_neuron][ind_weight];
+          }
+        }
+      }
+      //////////////////////////////////////////////////////////////////////////////////////
+#else
+                 ptneuron->method_dep_variables.batchgd.sum_delta_x[ind_weight] += method_delta[ptneuron->id-1] * x_input ;
+              }
+              ptneuron->method_dep_variables.batchgd.sum_delta_bias += method_delta[ptneuron->id-1];
+
+           } /*/ End parcial backwards*/
+        } /* end bucle fila */
+      } //End parallel region (#pragma should have been ignored)
+#endif     
+
+      /** BEGIN UPDATEWEIGHTS */
+      for (int ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) {
+         struct AMOREneuron* ptneuron = ptnet->neurons[ind_neuron];
+         double bias_change = - neuron_learning_rate[ind_neuron] * ptneuron->method_dep_variables.batchgd.sum_delta_bias;
+         ptneuron->bias += bias_change;
+         for (int ind_weight = 0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+            double weight_change  =  - neuron_learning_rate[ind_neuron] * ptneuron->method_dep_variables.batchgd.sum_delta_x[ind_weight] ;
+            ptneuron->weights[ind_weight] += weight_change;
+         }
+      }
+         /* END UPDATE WEIGHTS  */
+   } /* end epoch loop*/
+   copynet_CR (net, ptnet);
+   UNPROTECT(1);
+   return (net);
+}
+   
diff --git a/src/BATCHgdwm.c b/src/BATCHgdwm.c
new file mode 100755
index 0000000..ebbee31
--- /dev/null
+++ b/src/BATCHgdwm.c
@@ -0,0 +1,313 @@
+/**
+##############################################################
+# batchgdwm ( BATCH gradient descent WITH momentum )
+##############################################################
+*/
+
+#include <string.h>
+#include <math.h>
+#include <stdlib.h>
+#include <R.h>
+#include <Rinternals.h>
+#include <Rdefines.h>
+#include "AMORE.h"
+
+#ifdef _OPENMP
+  #include <omp.h>
+#endif
+
+SEXP BATCHgdwm_loop_MLPnet (SEXP origNet, SEXP Ptrans, SEXP Ttrans, SEXP nepochs, SEXP rho, SEXP thread_number ) {
+   //The only difference between wm and without it is the weight update (and the place the values are stored, one is batchgd the other is batchgdwm)
+   SEXP net;
+   SEXP R_fcall, args, arg1, arg2, arg3;
+
+   PROTECT(net=duplicate(origNet));
+   int* Ptransdim = INTEGER(coerceVector(getAttrib(Ptrans, R_DimSymbol), INTSXP));
+   int* Ttransdim = INTEGER(coerceVector(getAttrib(Ttrans, R_DimSymbol), INTSXP));
+   int n_epochs  = INTEGER(nepochs)[0];
+   struct AMOREnet* ptnet = copynet_RC(net);
+   struct AMOREneuron** neurons = ptnet->neurons;
+
+   /////////////////////////////////////////////////////////////////////////
+   //Convert input and target to double only once (and instead of copying it every time, just change the pointers)
+   //Different rows for easy switching pointers
+   double*  input_data  = REAL(Ptrans);
+   double*  target_data = REAL(Ttrans);
+   double** inputs  = (double**) R_alloc(Ptransdim[1],sizeof(double*)); //This is an 'Index'
+   double** targets = (double**) R_alloc(Ptransdim[1],sizeof(double*)); //This is an 'Index'
+
+   for (int fila=0; fila < Ptransdim[1]; fila++) {
+      inputs[fila]  = &input_data [fila*Ptransdim[0]];
+      targets[fila] = &target_data[fila*Ttransdim[0]];
+   }
+   /////////////////////////////////////////////////////////////////////////
+
+   /////////////////////////////////////////////////////////////////////////
+   // Thread number calculation
+   int n_threads = 1;
+#ifdef _OPENMP
+   {
+      int max_threads = omp_get_max_threads();
+      int given_threads = 0;
+
+      if (isInteger(thread_number))
+        given_threads = INTEGER(thread_number)[0];
+      else if (isNumeric(thread_number))
+        given_threads = floor(REAL(thread_number)[0]);
+
+      if (given_threads <1) //I HAVE THE POWER TO SCHEDULE!
+        if(max_threads  >1)
+          n_threads = max_threads-1; //Leave a CPU free
+        else
+          n_threads = 1;
+      else if (given_threads > max_threads)
+        n_threads = max_threads;
+      else
+        n_threads = given_threads;
+
+      if (neurons[0]->actf == CUSTOM_ACTF) //OMP + R custom functions = bad idea
+        n_threads = 1;
+      else if ((ptnet->deltaE.name != LMLS_NAME) && (ptnet->deltaE.name != LMS_NAME))
+        n_threads = 1;
+
+      //printf("Using %i threads from a max of %i.\n",n_threads ,max_threads);
+   }
+#endif
+   /////////////////////////////////////////////////////////////////////////
+
+   /////////////////////////////////////////////////////////////////////////
+   //Contribution (who is to blame) : Parallelization done by Jose Maria
+   //Memory allocation for running different threads in parallel:
+   // Each thread will have his own pool of memory to handle the two kinds of temp vars:
+   //   Vars used only inside the forwards/backwards (v0, v1 and method_delta)
+   //     These vars will be initialized and read only by each thread
+   //   Vars that hold the information on how much the weights and the bias should change 
+   //     These vars will be initialized by each thread, then accumulated and read by the master thread when the batch is finished
+   int n_neurons = ptnet->last_neuron+1;
+   //Temp values, internal in each iteration
+   double **  v0s                 = (double** ) R_alloc(n_threads,sizeof(double* )); //This is an 'Index'
+   double **  v1s                 = (double** ) R_alloc(n_threads,sizeof(double* )); //This is an 'Index'
+   double **  method_deltas       = (double** ) R_alloc(n_threads,sizeof(double* )); //This is an 'Index'
+   //Accumulated values
+   double **  method_deltas_bias  = (double** ) R_alloc(n_threads,sizeof(double* )); //This is an 'Index'
+   double *** method_sums_delta_x = (double***) R_alloc(n_threads,sizeof(double**)); //This is an 'Index'
+
+   for(int id_thread=0; id_thread<n_threads;id_thread++){
+      double* chunk = (double*) R_alloc(4*n_neurons,sizeof(double)); //Actual chunk of memory for each thread, trying to avoid R_alloc calls
+      //Advantages: Good proximity reference in cache for values of the same thread, and since it has at least 2 neurons
+      // (Who would have a NNetwork with less than 2 neurons?), chunks are larger than 64 bytes (i7 L2 cache block size?)
+      v0s               [id_thread] =  chunk             ;  
+      v1s               [id_thread] = &chunk[  n_neurons];
+      method_deltas     [id_thread] = &chunk[2*n_neurons];
+      method_deltas_bias[id_thread] = &chunk[3*n_neurons];
+      
+      method_sums_delta_x[id_thread] = (double**) R_alloc(n_neurons,sizeof(double*)); //This is an 'Index'
+      for(int i=0; i<n_neurons; i++) //Different weigth number for each layer, TODO: R_alloc each layer instead of each neuron
+         method_sums_delta_x[id_thread][i] = (double*) R_alloc(neurons[i]->last_input_link+1,sizeof(double));
+   }
+   /////////////////////////////////////////////////////////////////////////
+
+   /////////////////////////////////////////////////////////////////////////
+   //Consistency (leave pnet as if the function had worked with their values instead of external ones)
+   // R_alloc should handle freeing the memory, so it's not needed to free the previously allocated memory to avoid memory leaks
+   // Changing pointer instead of copying data
+   ptnet->input  = inputs[Ptransdim[1]-1];
+   ptnet->target = targets[Ptransdim[1]-1];
+   /////////////////////////////////////////////////////////////////////////
+   
+   /////////////////////////////////////////////////////////////////////////
+   // Dividing learning rate and momentum by the number of samples in the training batch
+   // Using local temp memory because of cache (proximity references) and direct access to memory and avoiding modification of header file
+   // Using R_alloc for R to manage the memory
+   double * neuron_learning_rate = (double*) R_alloc(n_neurons,sizeof(double));
+   double * neuron_momentum      = (double*) R_alloc(n_neurons,sizeof(double));
+   for(int i=0; i<n_neurons; i++){
+      neuron_learning_rate[i] = ptnet->neurons[i]->method_dep_variables.batchgdwm.learning_rate / Ptransdim[1];
+      neuron_momentum[i]      = ptnet->neurons[i]->method_dep_variables.batchgdwm.momentum      / Ptransdim[1];
+   }
+   /////////////////////////////////////////////////////////////////////////
+
+   for (int epoch=0; epoch < n_epochs; epoch++) {
+      //Run BATCH in parallel
+      #pragma omp parallel num_threads(n_threads)
+      {
+#ifdef _OPENMP
+        int id_thread = omp_get_thread_num();
+#else
+        int id_thread = 0;
+#endif
+        //////////////////////////////////////////////////////////////////////////////////////
+        //// Using 'private' memory for each thread temp values instead of ptnet's own memory
+        //// It's needed for multithreaded execution, in single thread model it's also used (is only modified if not compiled with OMP).
+        //////////////////////////////////////////////////////////////////////////////////////
+        //Select vars for this thread from the "memory pool":
+        //  Used only by each thread:
+        double* v0 = v0s[id_thread]; // double[n_neurons] //Using this instead of ptneuron->v0
+        double* v1 = v1s[id_thread]; // double[n_neurons] //Using this instead of ptneuron->v1
+        double* method_delta      = method_deltas[id_thread]; // double[n_neurons] //Using this instead of ptneuron->ptneuron->method_dep_variables.batchgdwm.delta
+#ifdef _OPENMP
+        //  Used to update weigths:
+        double* method_delta_bias = method_deltas_bias[id_thread]; // double[n_neurons] //Instead of ptneuron->method_dep_variables.batchgdwm.sum_delta_bias
+        double** method_sum_delta_x = method_sums_delta_x[id_thread]; // double*[n_neurons] //Instead of ptneuron->method_dep_variables.batchgdwm.sum_delta_x
+        
+        //Initialize vars that handle comm between batch execution and weight update
+        for (int ind_neuron=0; ind_neuron <= ptnet->last_neuron; ind_neuron++){
+            method_delta_bias[ind_neuron] = 0.0; //TODO: Should memset be used?
+            for (int ind_weight=0; ind_weight <= neurons[ind_neuron]->last_input_link; ind_weight++)
+              method_sum_delta_x[ind_neuron][ind_weight] = 0.0; //TODO: Should memset be used?
+        }
+#endif
+        //////////////////////////////////////////////////////////////////////////////////////
+
+        #pragma omp for 
+        for (int fila=0; fila < Ptransdim[1]; fila++) {
+           // R_alloc should handle freeing the memory, so it's not needed to free the previously allocated memory to avoid memory leaks
+           // Also, these are read-only from this point onwards, should not be a problem accessing them on parallel threads 
+           // ptnet->input  = inputs[fila];  //Moved into actual access
+           // ptnet->target = targets[fila]; //Moved into actual access
+           
+           /* BEGIN   void batchgd_forward_mlpnet(AMOREnet * ptnet)   */
+           for (int ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) {
+              struct AMOREneuron * ptneuron = neurons[ind_neuron];
+              /* BEGIN batchgd_forward_MLPneuron */
+              double a=0.0;
+              for (int ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+                 int considered_input = ptneuron->input_links[ind_weight];
+                 double x_input = (considered_input < 0 )? inputs[fila][-1-considered_input] :  v0[-1+considered_input];
+                 a +=  ptneuron->weights[ind_weight] * x_input;
+              }
+              a += ptneuron->bias;
+              switch (ptneuron->actf) {
+                 case TANSIG_ACTF:
+                    v0[ind_neuron] =  a_tansig * tanh(a * b_tansig); 
+                    v1[ind_neuron] =  b_tansig / a_tansig * (a_tansig - v0[ind_neuron])*(a_tansig + v0[ind_neuron]);
+                    break;
+                 case SIGMOID_ACTF:
+                    v0[ind_neuron] =  1/(1+exp(- a_sigmoid * a)) ; 
+                    v1[ind_neuron] =  a_sigmoid * v0[ind_neuron] * ( 1 - v0[ind_neuron] );
+                    break;
+                 case PURELIN_ACTF:
+                    v0[ind_neuron] = a; 
+                    v1[ind_neuron] = 1;
+                    break;
+                 case HARDLIM_ACTF:
+                    if (a>=0) {
+                       v0[ind_neuron] = 1.0;
+                    } else {
+                       v0[ind_neuron] = 0.0;
+                    }
+                    v1[ind_neuron] = NA_REAL;
+                    break;
+                 case CUSTOM_ACTF:
+                    PROTECT(args    = allocVector(REALSXP,1));
+                    REAL(args)[0]   = a;
+                    PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F0), args));
+                    v0[ind_neuron]  = REAL(eval (R_fcall, rho))[0];
+                    PROTECT(args    = allocVector(REALSXP,1));   
+                    REAL(args)[0]   = a;
+                    PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F1), args));
+                    v1[ind_neuron]  = REAL(eval (R_fcall, rho))[0];
+                    UNPROTECT(4);
+                    break; 
+              }
+           /* END batchgd_forward_MLPneuron */
+           }
+           /* END     void batchgd_forward_mlpnet(AMOREnet * ptnet)   */
+
+
+           /* BEGIN   void Parcial_batchgd_backwards_MLPnet (AMOREnet * ptnet, SEXP rho) */
+           for (int ind_neuron=ptnet->last_neuron; ind_neuron >=0;  ind_neuron-- ) {
+              struct AMOREneuron* ptneuron=ptnet->neurons[ind_neuron];
+           /**/
+              double aux_DELTA = 0.0;
+              if (ptneuron->type==TYPE_OUTPUT) {
+                 switch(ptnet->deltaE.name) {
+                    case LMS_NAME:
+                       aux_DELTA = v0[ind_neuron] - targets[fila][-1+ptneuron->output_aims[0]];
+                    break;
+                    case LMLS_NAME:
+                       aux_DELTA = v0[ind_neuron] - targets[fila][-1+ptneuron->output_aims[0]];
+                       aux_DELTA = aux_DELTA / (1 + aux_DELTA*aux_DELTA / 2);
+                       break;
+                    default:   /* if (ptneuron->deltaE.name==TAO_NAME)   de momento tao es como custom*/ 
+                      /* ####### OJO FALTA cambiar el TAO  */
+                      PROTECT(args  = allocVector(VECSXP,3)     );
+                      PROTECT(arg3  = net                       );
+                      PROTECT(arg2  = allocVector(REALSXP,1)    );
+                      PROTECT(arg1  = allocVector(REALSXP,1)    );
+                      REAL(arg1)[0] = v0[ind_neuron];
+                      REAL(arg2)[0] =  targets[fila][-1+ptneuron->output_aims[0]];
+                      SET_VECTOR_ELT(args, 0, arg1);
+                      SET_VECTOR_ELT(args, 1, arg2);
+                      SET_VECTOR_ELT(args, 2, arg3);
+                      PROTECT(R_fcall = lang2(DELTAE_F, args) );
+                      aux_DELTA = REAL(eval (R_fcall, rho))[0];
+                      UNPROTECT(5);
+                      break;
+                 };
+              } else {
+                 for (int ind_other_neuron=0; ind_other_neuron <= ptneuron->last_output_link ; ind_other_neuron++ ) {
+                    struct AMOREneuron* pt_that_neuron = ptneuron->output_links[ind_other_neuron];
+                    int that_aim       = -1+ptneuron->output_aims[ind_other_neuron];
+                    aux_DELTA     += method_delta[pt_that_neuron->id-1] * pt_that_neuron->weights[that_aim] ;
+                 }
+              }
+
+              method_delta[ptneuron->id-1] = aux_DELTA * v1[ind_neuron]; //R ids start in 1
+
+              for (int ind_weight = 0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+                 int considered_input = ptneuron->input_links[ind_weight];
+                 double x_input = considered_input < 0 ? inputs[fila][-1-considered_input] : v0[-1+considered_input];
+#ifdef _OPENMP
+                 method_sum_delta_x[ind_neuron][ind_weight] += method_delta[ptneuron->id-1] * x_input ;
+              }
+              method_delta_bias[ind_neuron] += method_delta[ptneuron->id-1];
+           } /*/ End parcial backwards*/
+        } /* end bucle fila */
+      } //End parallel region
+
+//Up to this point BATCHGD and BATCHGDWM are the same
+
+      //////////////////////////////////////////////////////////////////////////////////////
+      //Update ptnet with the values from batch calculations
+      for(int id_thread=0; id_thread<n_threads;id_thread++){ //Maybe reduction could be used
+        for (int ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) {
+          struct AMOREneuron *  ptneuron = neurons[ind_neuron];
+          ptneuron->method_dep_variables.batchgdwm.sum_delta_bias +=  method_deltas_bias[id_thread][ind_neuron];
+          for (int ind_weight = 0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+            ptneuron->method_dep_variables.batchgdwm.sum_delta_x[ind_weight] += method_sums_delta_x[id_thread][ind_neuron][ind_weight];
+          }
+        }
+      }
+      //////////////////////////////////////////////////////////////////////////////////////
+#else
+                 ptneuron->method_dep_variables.batchgdwm.sum_delta_x[ind_weight] += method_delta[ptneuron->id-1] * x_input ;
+              }
+              ptneuron->method_dep_variables.batchgdwm.sum_delta_bias += method_delta[ptneuron->id-1];
+
+           } /*/ End parcial backwards*/
+        } /* end bucle fila */
+      } //End parallel region (#pragma should have been ignored)
+#endif     
+
+      /** BEGIN UPDATEWEIGHTS */
+      for (int ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) {
+         struct AMOREneuron * ptneuron = ptnet->neurons[ind_neuron];
+         double bias_change = neuron_momentum[ind_neuron] * ptneuron->method_dep_variables.batchgdwm.former_bias_change - neuron_learning_rate[ind_neuron] * ptneuron->method_dep_variables.batchgdwm.sum_delta_bias;
+         ptneuron->method_dep_variables.batchgdwm.former_bias_change = bias_change ;
+         ptneuron->bias += bias_change;
+         for (int ind_weight = 0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+            double weight_change  =  neuron_momentum[ind_neuron] * ptneuron->method_dep_variables.batchgdwm.former_weight_change[ind_weight] - neuron_learning_rate[ind_neuron] * ptneuron->method_dep_variables.batchgdwm.sum_delta_x[ind_weight] ;
+            ptneuron->method_dep_variables.batchgdwm.former_weight_change[ind_weight] = weight_change ;
+            ptneuron->weights[ind_weight] += weight_change;
+         }
+            /**/
+      }
+         /* END UPDATE WEIGHTS  */
+   } /* end epoch loop*/
+   copynet_CR (net, ptnet);
+   UNPROTECT(1);
+   return (net);
+}
+   
+
diff --git a/src/Makevars b/src/Makevars
new file mode 100644
index 0000000..0ee3615
--- /dev/null
+++ b/src/Makevars
@@ -0,0 +1,2 @@
+PKG_CFLAGS = $(SHLIB_OPENMP_CFLAGS)
+PKG_LIBS = $(SHLIB_OPENMP_CFLAGS)
diff --git a/src/Makevars.win b/src/Makevars.win
new file mode 100644
index 0000000..0ee3615
--- /dev/null
+++ b/src/Makevars.win
@@ -0,0 +1,2 @@
+PKG_CFLAGS = $(SHLIB_OPENMP_CFLAGS)
+PKG_LIBS = $(SHLIB_OPENMP_CFLAGS)
diff --git a/src/copynet.c b/src/copynet.c
new file mode 100644
index 0000000..a0a324f
--- /dev/null
+++ b/src/copynet.c
@@ -0,0 +1,208 @@
+
+#include <string.h>
+#include <stdlib.h>
+#include <math.h>
+#include <R.h>
+#include <Rinternals.h>
+#include <Rdefines.h>
+#include "AMORE.h"
+
+
+
+struct AMOREnet * copynet_RC (SEXP net);
+void              copynet_CR (SEXP net, struct AMOREnet * ptnet);
+/**
+##########################################################
+# copynet_RC
+#	Copies the SEXP net to the *ptnet
+##########################################################
+**/
+
+struct AMOREnet * copynet_RC (SEXP net) {
+   struct AMOREnet * ptnet;
+   struct AMOREneuron * ptneuron;
+   int i, ind_neuron, ind_input_neuron, ind_output_neuron, ind_layer;
+   SEXP neuron;
+   int aux_neuron;
+
+   ptnet = (struct AMOREnet *) R_alloc(1, sizeof(struct AMOREnet));
+   ptnet->last_neuron  = -1+LENGTH(NET_NEURONS);
+   ptnet->last_input   = -1+LENGTH(NET_INPUT);
+   ptnet->last_output  = -1+LENGTH(NET_OUTPUT);
+   ptnet->input  = (double *) R_alloc(ptnet->last_input  + 1, sizeof(double));
+   ptnet->output = (double *) R_alloc(ptnet->last_output + 1, sizeof(double));
+   ptnet->target = (double *) R_alloc(ptnet->last_output + 1, sizeof(double));
+   for (i=0; i <= ptnet->last_input; i++) {
+      ptnet->input[i] =  REAL(NET_INPUT)[i];
+   }
+   for (i=0; i <= ptnet->last_output; i++) {
+      ptnet->output[i] =  REAL(NET_OUTPUT)[i];
+      ptnet->target[i] =  REAL(NET_OUTPUT)[i];
+   }
+   ptnet->deltaE.name = INTEGER(DELTAE_NAME)[0];
+   ptnet->deltaE.stao = REAL(DELTAE_STAO)[0];
+
+   ptnet->neurons = (struct AMOREneuron **) R_alloc(ptnet->last_neuron + 1, sizeof(struct AMOREneuron *));
+   for (ind_neuron=0; ind_neuron <= ptnet->last_neuron; ind_neuron ++ ) {
+      ptnet->neurons[ind_neuron] = (struct AMOREneuron *) R_alloc(1, sizeof(struct AMOREneuron));
+   /* do not join with the following block*/
+   }
+   for (ind_neuron=0; ind_neuron <= ptnet->last_neuron; ind_neuron ++ ) {
+      PROTECT(neuron=VECTOR_ELT(NET_NEURONS, ind_neuron ) );
+      ptneuron = ptnet->neurons[ind_neuron];
+      ptneuron->id               =  INTEGER(ID)[0];
+      if (strcmp(CHAR(STRING_ELT(TYPE,0)),"output")==0) {
+         ptneuron->type = TYPE_OUTPUT;
+      } else {
+         ptneuron->type = TYPE_HIDDEN;
+      }
+
+      ptneuron->actf             =  INTEGER(ACTIVATION_FUNCTION)[0] ;
+      ptneuron->last_output_link =  -1 + LENGTH(OUTPUT_LINKS) ;
+      ptneuron->last_input_link  =  -1 + LENGTH(INPUT_LINKS)  ;
+      ptneuron->output_aims      = (int *) R_alloc(ptneuron->last_output_link+1, sizeof(int));
+      ptneuron->input_links      = (int *) R_alloc(ptneuron->last_input_link+1,  sizeof(int));
+      ptneuron->output_links     = (struct AMOREneuron **) R_alloc(ptneuron->last_output_link+1, sizeof(struct AMOREneuron *));
+      ptneuron->weights          = (double *) R_alloc(ptneuron->last_input_link+1, sizeof(double));
+
+      for (ind_input_neuron=0; ind_input_neuron <= ptneuron->last_input_link; ind_input_neuron++) {
+         ptneuron->input_links[ind_input_neuron] = INTEGER(INPUT_LINKS)[ind_input_neuron];
+         ptneuron->weights[ind_input_neuron] = REAL(WEIGHTS)[ind_input_neuron];
+      }
+      for (ind_output_neuron=0; ind_output_neuron <= ptneuron->last_output_link; ind_output_neuron++) {
+         ptneuron->output_aims[ind_output_neuron]  = INTEGER(OUTPUT_AIMS)[0];
+         if(INTEGER(OUTPUT_LINKS)[ind_output_neuron]==NA_INTEGER){
+            ptneuron->output_links[ind_output_neuron] = NULL;
+         } else {
+            ptneuron->output_links[ind_output_neuron] = ptnet->neurons[-1+INTEGER(OUTPUT_LINKS)[ind_output_neuron]];
+         }
+      }
+      ptneuron->bias    = REAL(BIAS)[0];
+      ptneuron->v0      = REAL(V0)[0];
+      ptneuron->v1      = REAL(V1)[0];
+      if (strcmp(CHAR(STRING_ELT(METHOD,0)),"ADAPTgd")==0) {
+         ptneuron->method  = METHOD_ADAPTgd;
+         ptneuron->method_dep_variables.adaptgd.delta              = REAL(ADAPTgd_DELTA)[0] ;
+         ptneuron->method_dep_variables.adaptgd.learning_rate      = REAL(ADAPTgd_LEARNING_RATE)[0] ;
+      } else if (strcmp(CHAR(STRING_ELT(METHOD,0)),"ADAPTgdwm")==0) {
+         ptneuron->method  = METHOD_ADAPTgdwm;
+         ptneuron->method_dep_variables.adaptgdwm.delta              = REAL(ADAPTgdwm_DELTA)[0] ;
+         ptneuron->method_dep_variables.adaptgdwm.learning_rate      = REAL(ADAPTgdwm_LEARNING_RATE)[0] ;
+         ptneuron->method_dep_variables.adaptgdwm.momentum           = REAL(ADAPTgdwm_MOMENTUM)[0] ;
+         ptneuron->method_dep_variables.adaptgdwm.former_bias_change = REAL(ADAPTgdwm_FORMER_BIAS_CHANGE)[0];
+         ptneuron->method_dep_variables.adaptgdwm.former_weight_change = (double *) R_alloc(ptneuron->last_input_link+1, sizeof(double));
+         for (ind_input_neuron=0; ind_input_neuron <= ptneuron->last_input_link; ind_input_neuron++) {
+            ptneuron->method_dep_variables.adaptgdwm.former_weight_change[ind_input_neuron] = REAL(ADAPTgdwm_FORMER_WEIGHT_CHANGE)[ind_input_neuron] ;
+         }
+      } else if (strcmp(CHAR(STRING_ELT(METHOD,0)),"BATCHgd")==0) {
+         ptneuron->method  = METHOD_BATCHgd;
+         ptneuron->method_dep_variables.batchgd.delta              = REAL(BATCHgd_DELTA)[0] ;
+         ptneuron->method_dep_variables.batchgd.learning_rate      = REAL(BATCHgd_LEARNING_RATE)[0] ;
+         ptneuron->method_dep_variables.batchgd.sum_delta_x        = (double *) R_alloc(ptneuron->last_input_link+1, sizeof(double));
+         for (ind_input_neuron=0; ind_input_neuron <= ptneuron->last_input_link; ind_input_neuron++) {
+            ptneuron->method_dep_variables.batchgd.sum_delta_x[ind_input_neuron] = REAL(BATCHgd_SUM_DELTA_X)[ind_input_neuron] ;
+         }      
+         ptneuron->method_dep_variables.batchgd.sum_delta_bias     = REAL(BATCHgd_SUM_DELTA_BIAS)[0] ;
+      } else if (strcmp(CHAR(STRING_ELT(METHOD,0)),"BATCHgdwm")==0) {
+         ptneuron->method  = METHOD_BATCHgdwm;
+         ptneuron->method_dep_variables.batchgdwm.delta            = REAL(BATCHgdwm_DELTA)[0] ;
+         ptneuron->method_dep_variables.batchgdwm.learning_rate    = REAL(BATCHgdwm_LEARNING_RATE)[0] ;
+         ptneuron->method_dep_variables.batchgdwm.sum_delta_x      = (double *) R_alloc(ptneuron->last_input_link+1, sizeof(double));
+         for (ind_input_neuron=0; ind_input_neuron <= ptneuron->last_input_link; ind_input_neuron++) {
+            ptneuron->method_dep_variables.batchgdwm.sum_delta_x[ind_input_neuron] = REAL(BATCHgdwm_SUM_DELTA_X)[ind_input_neuron] ;
+         }      
+         ptneuron->method_dep_variables.batchgdwm.sum_delta_bias     = REAL(BATCHgdwm_SUM_DELTA_BIAS)[0] ;
+         ptneuron->method_dep_variables.batchgdwm.momentum           = REAL(BATCHgdwm_MOMENTUM)[0] ;
+         ptneuron->method_dep_variables.batchgdwm.former_bias_change = REAL(BATCHgdwm_FORMER_BIAS_CHANGE)[0] ;
+         ptneuron->method_dep_variables.batchgdwm.former_weight_change = (double *) R_alloc(ptneuron->last_input_link+1, sizeof(double));
+         for (ind_input_neuron=0; ind_input_neuron <= ptneuron->last_input_link; ind_input_neuron++) {
+            ptneuron->method_dep_variables.batchgdwm.former_weight_change[ind_input_neuron] = REAL(BATCHgdwm_FORMER_WEIGHT_CHANGE)[ind_input_neuron] ;
+         }
+      }
+      UNPROTECT(1);
+   }
+   ptnet->last_layer   = -2+LENGTH(NET_LAYERS); /* the first one doesn't count */
+   ptnet->layer_size   = (int *) R_alloc(ptnet->last_layer  + 1, sizeof(int));
+   ptnet->layers = (struct AMOREneuron ***) R_alloc(1+ptnet->last_layer, sizeof(struct AMOREneuron **));   
+   for (ind_layer=0; ind_layer <= ptnet->last_layer ; ind_layer++) {
+      ptnet->layer_size[ind_layer] = LENGTH(VECTOR_ELT(NET_LAYERS, 1+ind_layer));
+      ptnet->layers[ind_layer] = (struct AMOREneuron **) R_alloc(ptnet->layer_size[ind_layer], sizeof(struct AMOREneuron *));
+      for (ind_neuron=0; ind_neuron < ptnet->layer_size[ind_layer]; ind_neuron++) {
+         aux_neuron = -1+INTEGER(VECTOR_ELT(NET_LAYERS, 1+ind_layer))[ind_neuron];
+         ptnet->layers[ind_layer][ind_neuron] = ptnet->neurons[ aux_neuron ];
+      }
+   }
+   return (ptnet);
+}
+
+/** 
+################################
+# copynet_CR
+# Copies *ptnet to SEXP net
+################################
+**/
+void copynet_CR (SEXP net, struct AMOREnet * ptnet){
+   struct AMOREneuron * ptneuron;
+   int ind_neuron, ind_input_neuron, ind_output_neuron, ind_weight;
+   SEXP neuron;
+
+   REAL(DELTAE_STAO)[0] = ptnet->deltaE.stao ;
+
+   for (ind_neuron=0; ind_neuron <= ptnet->last_neuron; ind_neuron ++ ) {
+      PROTECT(neuron=VECTOR_ELT(NET_NEURONS, ind_neuron ) );
+      ptneuron = ptnet->neurons[ind_neuron];
+      for (ind_input_neuron=0; ind_input_neuron <= ptneuron->last_input_link; ind_input_neuron++) {
+         REAL(WEIGHTS)[ind_input_neuron] = ptneuron->weights[ind_input_neuron] ;
+      }
+      REAL(BIAS)[0] = ptneuron->bias ;
+      REAL(V0)[0]   = ptneuron->v0 ;
+      REAL(V1)[0]   = ptneuron->v1 ;
+
+      switch(ptneuron->method) {
+         case METHOD_ADAPTgd :
+            REAL(ADAPTgd_DELTA)[0]         = ptneuron->method_dep_variables.adaptgd.delta ;
+            REAL(ADAPTgd_LEARNING_RATE)[0] = ptneuron->method_dep_variables.adaptgd.learning_rate;
+            break;
+         case METHOD_ADAPTgdwm:
+            REAL(ADAPTgdwm_DELTA)[0]              = ptneuron->method_dep_variables.adaptgdwm.delta;
+            REAL(ADAPTgdwm_LEARNING_RATE)[0]      = ptneuron->method_dep_variables.adaptgdwm.learning_rate ;
+            REAL(ADAPTgdwm_MOMENTUM)[0]           = ptneuron->method_dep_variables.adaptgdwm.momentum  ;
+            REAL(ADAPTgdwm_FORMER_BIAS_CHANGE)[0] = ptneuron->method_dep_variables.adaptgdwm.former_bias_change ;
+            for  (ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+               REAL(ADAPTgdwm_FORMER_WEIGHT_CHANGE)[ind_weight] = ptneuron->method_dep_variables.adaptgdwm.former_weight_change[ind_weight];
+            }
+            break;
+         case METHOD_BATCHgd:
+            REAL(BATCHgd_DELTA)[0]                = ptneuron->method_dep_variables.batchgd.delta ;
+            REAL(BATCHgd_LEARNING_RATE)[0]        = ptneuron->method_dep_variables.batchgd.learning_rate ;
+            for  (ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+               REAL(BATCHgd_SUM_DELTA_X)[ind_weight] = ptneuron->method_dep_variables.batchgd.sum_delta_x[ind_weight];
+            }
+            REAL(BATCHgd_SUM_DELTA_BIAS)[0]       = ptneuron->method_dep_variables.batchgd.sum_delta_bias ;
+            break;
+        default:
+            REAL(BATCHgdwm_DELTA)[0]              = ptneuron->method_dep_variables.batchgdwm.delta ;
+            REAL(BATCHgdwm_LEARNING_RATE)[0]      = ptneuron->method_dep_variables.batchgdwm.learning_rate ;
+            for  (ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+               REAL(BATCHgdwm_SUM_DELTA_X)[ind_weight] = ptneuron->method_dep_variables.batchgdwm.sum_delta_x[ind_weight];
+            }            
+            REAL(BATCHgdwm_SUM_DELTA_BIAS)[0]     = ptneuron->method_dep_variables.batchgdwm.sum_delta_bias;
+            REAL(BATCHgdwm_MOMENTUM)[0]           = ptneuron->method_dep_variables.batchgdwm.momentum ;
+            REAL(BATCHgdwm_FORMER_BIAS_CHANGE)[0] = ptneuron->method_dep_variables.batchgdwm.former_bias_change ;
+            for (ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+                REAL(BATCHgdwm_FORMER_WEIGHT_CHANGE)[ind_weight] = ptneuron->method_dep_variables.batchgdwm.former_weight_change[ind_weight];
+            }
+            break;
+        }
+      UNPROTECT(1);
+   }
+   return ;
+}
+
+
+
+
+
+
+
+
diff --git a/src/sim.c b/src/sim.c
new file mode 100755
index 0000000..e08f48f
--- /dev/null
+++ b/src/sim.c
@@ -0,0 +1,177 @@
+#include <string.h>
+#include <math.h>
+#include <stdlib.h>
+#include <R.h>
+#include <Rinternals.h>
+#include <Rdefines.h>
+#include "AMORE.h"
+
+/******************************************************************************************************************/
+SEXP sim_Forward_MLPnet (SEXP net, SEXP Ptrans, SEXP ytrans, SEXP rho) {
+   int * Ptransdim, *ytransdim, fila, columna, Pcounter, ycounter;
+   int considered_input, ind_neuron, ind_other_neuron, that_neuron, that_aim, ind_weight;
+   double  x_input, a;
+   int epoch, n_epochs;
+
+   SEXP R_fcall, args, arg1, arg2, arg3;
+   SEXP aims;
+   struct AMOREneuron * ptneuron, * pt_that_neuron;
+   struct AMOREnet * ptnet;
+
+   double aux1, aux2;
+
+   Ptransdim = INTEGER(coerceVector(getAttrib(Ptrans, R_DimSymbol), INTSXP));
+   ytransdim = INTEGER(coerceVector(getAttrib(ytrans, R_DimSymbol), INTSXP));
+
+
+   ptnet = copynet_RC(net);
+
+   for (fila=0, Pcounter=0, ycounter=0; fila < Ptransdim[1]; fila++) {
+      for( columna =0; columna < Ptransdim[0] ; columna++, Pcounter++) {
+         ptnet->input[columna] =  REAL(Ptrans)[Pcounter];
+      }
+      for (ind_neuron=0; ind_neuron <= ptnet->last_neuron ; ind_neuron++ ) {
+         ptneuron = ptnet->neurons[ind_neuron];
+         for (a=0.0, ind_weight=0; ind_weight <= ptneuron->last_input_link; ind_weight++) {
+            considered_input = ptneuron->input_links[ind_weight];
+            if (considered_input < 0 ) {
+               x_input = ptnet->input[-1-considered_input];
+            } else {
+               x_input = ptnet->neurons[-1+considered_input]->v0;
+            }
+            a +=  ptneuron->weights[ind_weight] * x_input;
+         }
+         a += ptneuron->bias;
+         switch (ptneuron->actf) {
+            case TANSIG_ACTF:
+               ptneuron->v0 =  a_tansig * tanh(a * b_tansig); 
+               break;
+            case SIGMOID_ACTF:
+               ptneuron->v0 =  1/(1+exp(- a_sigmoid * a)) ; 
+               break;
+            case PURELIN_ACTF:
+               ptneuron->v0 = a; 
+               break;
+            case HARDLIM_ACTF:
+               if (a>=0) {
+                  ptneuron->v0 = 1.0;
+               } else {
+                  ptneuron->v0 = 0.0;
+               }
+               break;
+            case CUSTOM_ACTF:
+               PROTECT(args    = allocVector(REALSXP,1));
+               REAL(args)[0]   = a;
+               PROTECT(R_fcall = lang2(VECTOR_ELT(VECTOR_ELT(NET_NEURONS, ind_neuron), id_F0), args));
+               ptneuron->v0    = REAL(eval (R_fcall, rho))[0];
+               UNPROTECT(2);
+             break; 
+         }
+      }
+      for (ind_neuron=0; ind_neuron < ytransdim[0] ; ind_neuron++ ) {
+         REAL(ytrans)[ycounter++] = ptnet->layers[ptnet->last_layer][ind_neuron]->v0;
+      }
+    } 
+
+    return (ytrans);
+}
+
+
+
+/******************************************************************************************************************/
+
+void print_MLPneuron (SEXP neuron) {
+int i;
+   Rprintf("***********************************************************\n");
+/* ID */
+   Rprintf("ID:\t\t\t%d \n",             INTEGER(ID)[0]            );
+/* TYPE */
+   Rprintf("TYPE:\t\t\t%s \n",           CHAR(STRING_ELT(TYPE,0))  );
+/* ACTIVATION FUNCTION */
+   Rprintf("ACT. FUNCTION:\t\t%s\n",     CHAR(STRING_ELT(ACTIVATION_FUNCTION,0)) );
+/* OUTPUT LINKS */
+   if (INTEGER(OUTPUT_LINKS)[0] != NA_INTEGER ) {
+      for (i=0; i<LENGTH(OUTPUT_LINKS); i++) {
+         Rprintf("OUTPUT_LINKS %d:\t\t%d \n", i+1, INTEGER(OUTPUT_LINKS)[i]  );
+     }
+   } else {
+      Rprintf("OUTPUT_LINKS:\t\tNA\n");
+   }
+/* OUTPUT AIMS */
+   for (i=0; i<LENGTH(OUTPUT_AIMS); i++) {
+      Rprintf("OUTPUT_AIMS.%d:\t\t%d \n", i+1, INTEGER(OUTPUT_AIMS)[i]   );
+   }
+/* INPUT LINKS */
+   for (i=0; i<LENGTH(INPUT_LINKS); i++) {
+      Rprintf("INPUT_LINKS.%d:\t\t%d \n", i+1, INTEGER(INPUT_LINKS)[i]  );
+   }
+/* WEIGHTS */
+   for (i=0; i<LENGTH(WEIGHTS); i++) {
+      Rprintf("WEIGHTS.%d:\t\t%f \n", i+1, REAL(WEIGHTS)[i]  );
+   }
+/* BIAS */
+   Rprintf("BIAS:\t\t\t%f \n", REAL(BIAS)[0]  );
+/* V0 */
+   Rprintf("V0:\t\t\t%f \n", REAL(V0)[0]  );
+/* V1 */
+   Rprintf("V1:\t\t\t%f \n", REAL(V1)[0]  );
+/* METHOD */
+   Rprintf("METHOD:\t\t\t%s\n", CHAR(STRING_ELT(METHOD,0))  );
+   Rprintf("METHOD DEP VARIABLES:\n");
+   if (           strcmp(CHAR(STRING_ELT(METHOD,0)),"ADAPTgd"  )==0) {
+      /* DELTA */
+           Rprintf("DELTA:\t\t\t%f \n",       REAL(ADAPTgd_DELTA)[0]  );
+      /* LEARNING RATE */
+           Rprintf("LEARNING RATE:\t\t%f \n", REAL(ADAPTgd_LEARNING_RATE)[0]  );
+           Rprintf("***********************************************************\n");
+   } else    if ( strcmp(CHAR(STRING_ELT(METHOD,0)),"ADAPTgdwm")==0) {
+      /* DELTA */
+           Rprintf("DELTA:\t\t\t%f \n",       REAL(ADAPTgdwm_DELTA)[0]  );
+      /* LEARNING RATE */
+           Rprintf("LEARNING RATE:\t\t%f \n", REAL(ADAPTgdwm_LEARNING_RATE)[0]  );
+      /* MOMENTUM */
+           Rprintf("MOMENTUM:\t\t%f \n",      REAL(ADAPTgdwm_MOMENTUM)[0]  );
+      /* FORMER WEIGHT CHANGE */
+           for (i=0; i<LENGTH(ADAPTgdwm_FORMER_WEIGHT_CHANGE); i++) {
+              Rprintf("FORMER_WEIGHT_CHANGE.%d:\t%f \n", i+1,  REAL(ADAPTgdwm_FORMER_WEIGHT_CHANGE)[i]  );
+           }
+      /* FORMER BIAS CHANGE */
+           Rprintf("FORMER_BIAS_CHANGE:\t%f \n", REAL(ADAPTgdwm_FORMER_BIAS_CHANGE)[0]  );
+           Rprintf("***********************************************************\n");
+   } else    if ( strcmp(CHAR(STRING_ELT(METHOD,0)),"BATCHgd"  )==0) {
+      /* DELTA */
+           Rprintf("DELTA:\t\t\t%f \n",       REAL(BATCHgd_DELTA)[0]  );
+      /* LEARNING RATE */
+           Rprintf("LEARNING RATE:\t\t%f \n", REAL(BATCHgd_LEARNING_RATE)[0]  );
+      /* SUM DELTA X */
+           for (i=0; i<LENGTH(BATCHgdwm_SUM_DELTA_X); i++) {
+              Rprintf("SUM DELTA X %d:\t\t%f \n", i+1,  REAL(BATCHgd_SUM_DELTA_X)[i]  );
+           }
+      /* SUM DELTA BIAS */
+           Rprintf("SUM DELTA BIAS:\t\t%f \n",REAL(BATCHgd_SUM_DELTA_BIAS)[0]  );
+           Rprintf("***********************************************************\n");
+   } else    if ( strcmp(CHAR(STRING_ELT(METHOD,0)),"BATCHgdwm")==0) {
+      /* DELTA */
+           Rprintf("DELTA:\t\t\t%f \n",       REAL(BATCHgdwm_DELTA)[0]  );
+      /* LEARNING RATE */
+           Rprintf("LEARNING RATE:\t\t%f \n", REAL(BATCHgdwm_LEARNING_RATE)[0]  );
+      /* MOMENTUM */
+           Rprintf("MOMENTUM:\t\t%f \n",      REAL(BATCHgdwm_MOMENTUM)[0]  );
+      /* FORMER WEIGHT CHANGE */
+           for (i=0; i<LENGTH(ADAPTgdwm_FORMER_WEIGHT_CHANGE); i++) {
+              Rprintf("FORMER_WEIGHT_CHANGE.%d:\t%f \n", i+1,  REAL(BATCHgdwm_FORMER_WEIGHT_CHANGE)[i]  );
+           }
+      /* FORMER BIAS CHANGE */
+           Rprintf("FORMER_BIAS_CHANGE:\t%f \n", REAL(BATCHgdwm_FORMER_BIAS_CHANGE)[0]  );
+      /* SUM DELTA X */
+           for (i=0; i<LENGTH(BATCHgdwm_SUM_DELTA_X); i++) {
+              Rprintf("SUM DELTA X %d:\t\t%f \n", i+1,  REAL(BATCHgdwm_SUM_DELTA_X)[i]  );
+           }
+      /* SUM DELTA BIAS */
+           Rprintf("SUM DELTA BIAS:\t\t%f \n",REAL(BATCHgdwm_SUM_DELTA_BIAS)[0]  );
+           Rprintf("***********************************************************\n");
+   }
+
+}
+/******************************************************************************************************************/
+

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/r-cran-amore.git



More information about the debian-science-commits mailing list