[mlpack] 195/207: Remove unreleased features.

Barak A. Pearlmutter barak+git at pearlmutter.net
Thu Mar 23 17:53:53 UTC 2017


This is an automated email from the git hooks/post-receive script.

bap pushed a commit to branch master
in repository mlpack.

commit 4d6c2a4775b4a9518bf6d26eae06da2f4bd8e0ee
Author: Ryan Curtin <ryan at ratml.org>
Date:   Tue Mar 21 14:45:43 2017 -0400

    Remove unreleased features.
---
 src/mlpack/methods/ann/CMakeLists.txt              |  14 -
 .../ann/activation_functions/identity_function.hpp |  96 --
 .../ann/activation_functions/logistic_function.hpp | 114 ---
 .../activation_functions/rectifier_function.hpp    | 115 ---
 .../ann/activation_functions/softsign_function.hpp | 134 ---
 .../ann/activation_functions/tanh_function.hpp     | 105 ---
 src/mlpack/methods/ann/cnn.hpp                     | 448 ----------
 .../ann/convolution_rules/fft_convolution.hpp      | 221 -----
 .../ann/convolution_rules/naive_convolution.hpp    | 190 ----
 .../ann/convolution_rules/svd_convolution.hpp      | 199 -----
 src/mlpack/methods/ann/ffn.hpp                     | 447 ----------
 .../kathirvalavakumar_subavathi_init.hpp           | 121 ---
 .../methods/ann/init_rules/nguyen_widrow_init.hpp  | 117 ---
 src/mlpack/methods/ann/init_rules/oivs_init.hpp    | 130 ---
 .../methods/ann/init_rules/orthogonal_init.hpp     |  82 --
 src/mlpack/methods/ann/init_rules/random_init.hpp  |  92 --
 src/mlpack/methods/ann/init_rules/zero_init.hpp    |  65 --
 src/mlpack/methods/ann/layer/base_layer.hpp        | 223 -----
 src/mlpack/methods/ann/layer/bias_layer.hpp        | 208 -----
 .../ann/layer/binary_classification_layer.hpp      | 106 ---
 src/mlpack/methods/ann/layer/constant_layer.hpp    | 121 ---
 src/mlpack/methods/ann/layer/conv_layer.hpp        | 324 -------
 src/mlpack/methods/ann/layer/dropconnect_layer.hpp | 361 --------
 src/mlpack/methods/ann/layer/dropout_layer.hpp     | 252 ------
 src/mlpack/methods/ann/layer/glimpse_layer.hpp     | 484 -----------
 src/mlpack/methods/ann/layer/hard_tanh_layer.hpp   | 259 ------
 src/mlpack/methods/ann/layer/leaky_relu_layer.hpp  | 240 -----
 src/mlpack/methods/ann/layer/linear_layer.hpp      | 289 -------
 src/mlpack/methods/ann/layer/log_softmax_layer.hpp | 131 ---
 src/mlpack/methods/ann/layer/lstm_layer.hpp        | 418 ---------
 .../ann/layer/multiclass_classification_layer.hpp  |  98 ---
 .../methods/ann/layer/multiply_constant_layer.hpp  | 113 ---
 .../ann/layer/negative_log_likelihood_layer.hpp    | 127 ---
 src/mlpack/methods/ann/layer/one_hot_layer.hpp     |  96 --
 src/mlpack/methods/ann/layer/pooling_layer.hpp     | 267 ------
 src/mlpack/methods/ann/layer/recurrent_layer.hpp   | 192 ----
 .../methods/ann/layer/reinforce_normal_layer.hpp   | 139 ---
 src/mlpack/methods/ann/layer/softmax_layer.hpp     | 114 ---
 src/mlpack/methods/ann/layer/sparse_bias_layer.hpp | 177 ----
 .../methods/ann/layer/sparse_input_layer.hpp       | 180 ----
 .../methods/ann/layer/sparse_output_layer.hpp      | 227 -----
 .../methods/ann/layer/vr_class_reward_layer.hpp    | 171 ----
 src/mlpack/methods/ann/network_util.hpp            | 247 ------
 .../ann/performance_functions/cee_function.hpp     |  74 --
 .../ann/performance_functions/mse_function.hpp     |  61 --
 .../ann/performance_functions/sparse_function.hpp  | 141 ---
 .../ann/performance_functions/sse_function.hpp     |  64 --
 .../methods/ann/pooling_rules/max_pooling.hpp      |  56 --
 .../methods/ann/pooling_rules/mean_pooling.hpp     |  56 --
 src/mlpack/methods/ann/rnn.hpp                     | 799 -----------------
 src/mlpack/methods/mvu/mvu.hpp                     |  48 -
 src/mlpack/methods/mvu/mvu_main.cpp                |  79 --
 src/mlpack/methods/rmva/rmva.hpp                   | 963 ---------------------
 src/mlpack/methods/rmva/rmva_main.cpp              | 286 ------
 54 files changed, 10881 deletions(-)

diff --git a/src/mlpack/methods/ann/CMakeLists.txt b/src/mlpack/methods/ann/CMakeLists.txt
deleted file mode 100644
index 44572c4..0000000
--- a/src/mlpack/methods/ann/CMakeLists.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-# Define the files we need to compile
-# Anything not in this list will not be compiled into mlpack.
-set(SOURCES
-  init_rules/random_init.hpp
-)
-
-# Add directory name to sources.
-set(DIR_SRCS)
-foreach(file ${SOURCES})
-  set(DIR_SRCS ${DIR_SRCS} ${CMAKE_CURRENT_SOURCE_DIR}/${file})
-endforeach()
-# Append sources (with directory name) to list of all mlpack sources (used at
-# the parent scope).
-set(MLPACK_SRCS ${MLPACK_SRCS} ${DIR_SRCS} PARENT_SCOPE)
diff --git a/src/mlpack/methods/ann/activation_functions/identity_function.hpp b/src/mlpack/methods/ann/activation_functions/identity_function.hpp
deleted file mode 100644
index b1a1990..0000000
--- a/src/mlpack/methods/ann/activation_functions/identity_function.hpp
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * @file identity_function.hpp
- * @author Marcus Edel
- *
- * Definition and implementation of the identity function.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_ACTIVATION_FUNCTIONS_IDENTITY_FUNCTION_HPP
-#define MLPACK_METHODS_ANN_ACTIVATION_FUNCTIONS_IDENTITY_FUNCTION_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * The identity function, defined by
- *
- * @f{eqnarray*}{
- * f(x) &=& x \\
- * f'(x) &=& 1
- * @f}
- */
-class IdentityFunction
-{
- public:
-  /**
-   * Computes the identity function.
-   *
-   * @param x Input data.
-   * @return f(x).
-   */
-  static double fn(const double x)
-  {
-    return x;
-  }
-
-  /**
-   * Computes the identity function.
-   *
-   * @param x Input data.
-   * @param y The resulting output activation.
-   */
-  template<typename InputVecType, typename OutputVecType>
-  static void fn(const InputVecType& x, OutputVecType& y)
-  {
-    y = x;
-  }
-
-  /**
-   * Computes the first derivative of the identity function.
-   *
-   * @param x Input data.
-   * @return f'(x)
-   */
-  static double deriv(const double /* unused */)
-  {
-    return 1.0;
-  }
-
-  /**
-   * Computes the first derivatives of the identity function.
-   *
-   * @param y Input activations.
-   * @param x The resulting derivatives.
-   */
-  template<typename InputVecType, typename OutputVecType>
-  static void deriv(const InputVecType& y, OutputVecType& x)
-  {
-    x.ones(y.n_elem);
-  }
-
-  /**
-   * Computes the first derivatives of the identity function using a 3rd order
-   * tensor as input.
-   *
-   * @param y Input activations.
-   * @param x The resulting derivatives.
-   */
-  template<typename eT>
-  static void deriv(const arma::Cube<eT>& y, arma::Cube<eT>& x)
-  {
-    x.ones(y.n_rows, y.n_cols, y.n_slices);
-  }
-
-
-}; // class IdentityFunction
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/activation_functions/logistic_function.hpp b/src/mlpack/methods/ann/activation_functions/logistic_function.hpp
deleted file mode 100644
index f818cd4..0000000
--- a/src/mlpack/methods/ann/activation_functions/logistic_function.hpp
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * @file logistic_function.hpp
- * @author Marcus Edel
- *
- * Definition and implementation of the logistic function.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_ACTIVATION_FUNCTIONS_LOGISTIC_FUNCTION_HPP
-#define MLPACK_METHODS_ANN_ACTIVATION_FUNCTIONS_LOGISTIC_FUNCTION_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * The logistic function, defined by
- *
- * @f{eqnarray*}{
- * f(x) &=& \frac{1}{1 + e^{-x}} \\
- * f'(x) &=& f(x) * (1 - f(x)) \\
- * f^{-1}(y) &=& ln(\frac{y}{1-y})
- * @f}
- */
-class LogisticFunction
-{
-  public:
-  /**
-   * Computes the logistic function.
-   *
-   * @param x Input data.
-   * @return f(x).
-   */
-  template<typename eT>
-  static double fn(const eT x)
-  {
-    if (x < arma::Datum<eT>::log_max)
-    {
-      if (x > -arma::Datum<eT>::log_max)
-        return 1.0 /  (1.0 + std::exp(-x));
-
-      return 0.0;
-    }
-
-    return 1.0;
-  }
-
-  /**
-   * Computes the logistic function.
-   *
-   * @param x Input data.
-   * @param y The resulting output activation.
-   */
-  template<typename InputVecType, typename OutputVecType>
-  static void fn(const InputVecType& x, OutputVecType& y)
-  {
-    y = (1.0 / (1 + arma::exp(-x)));
-  }
-
-  /**
-   * Computes the first derivative of the logistic function.
-   *
-   * @param x Input data.
-   * @return f'(x)
-   */
-  static double deriv(const double y)
-  {
-    return y * (1.0 - y);
-  }
-
-  /**
-   * Computes the first derivatives of the logistic function.
-   *
-   * @param y Input activations.
-   * @param x The resulting derivatives.
-   */
-  template<typename InputVecType, typename OutputVecType>
-  static void deriv(const InputVecType& y, OutputVecType& x)
-  {
-    x = y % (1.0 - y);
-  }
-
-  /**
-   * Computes the inverse of the logistic function.
-   *
-   * @param y Input data.
-   * @return f^{-1}(y)
-   */
-  static double inv(const double y)
-  {
-    return arma::trunc_log(y / (1 - y));
-  }
-
-  /**
-   * Computes the inverse of the logistic function.
-   *
-   * @param y Input data.
-   * @return  x The resulting inverse of the input data.
-   */
-  template<typename InputVecType, typename OutputVecType>
-  static void inv(const InputVecType& y, OutputVecType& x)
-  {
-    x = arma::trunc_log(y / (1 - y));
-  }
-}; // class LogisticFunction
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/activation_functions/rectifier_function.hpp b/src/mlpack/methods/ann/activation_functions/rectifier_function.hpp
deleted file mode 100644
index 1241de1..0000000
--- a/src/mlpack/methods/ann/activation_functions/rectifier_function.hpp
+++ /dev/null
@@ -1,115 +0,0 @@
-/**
- * @file rectifier_function.hpp
- * @author Marcus Edel
- *
- * Definition and implementation of the rectifier function as described by
- * V. Nair and G. E. Hinton.
- *
- * For more information, see the following paper.
- *
- * @code
- * @misc{NairHinton2010,
- *   author = {Vinod Nair, Geoffrey E. Hinton},
- *   title = {Rectified Linear Units Improve Restricted Boltzmann Machines},
- *   year = {2010}
- * }
- * @endcode
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_ACTIVATION_FUNCTIONS_RECTIFIER_FUNCTION_HPP
-#define MLPACK_METHODS_ANN_ACTIVATION_FUNCTIONS_RECTIFIER_FUNCTION_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <algorithm>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * The rectifier function, defined by
- *
- * @f{eqnarray*}{
- * f(x) &=& \max(0, x) \\
- * f'(x) &=& \left\{
- *   \begin{array}{lr}
- *     1 & : x > 0 \\
- *     0 & : x \le 0
- *   \end{array}
- * \right.
- * @f}
- */
-class RectifierFunction
-{
- public:
-  /**
-   * Computes the rectifier function.
-   *
-   * @param x Input data.
-   * @return f(x).
-   */
-  static double fn(const double x)
-  {
-    return std::max(0.0, x);
-  }
-
-  /**
-   * Computes the rectifier function using a dense matrix as input.
-   *
-   * @param x Input data.
-   * @param y The resulting output activation.
-   */
-  template<typename eT>
-  static void fn(const arma::Mat<eT>& x, arma::Mat<eT>& y)
-  {
-    y = arma::max(arma::zeros<arma::Mat<eT> >(x.n_rows, x.n_cols), x);
-  }
-
-  /**
-   * Computes the rectifier function using a 3rd-order tensor as input.
-   *
-   * @param x Input data.
-   * @param y The resulting output activation.
-   */
-  template<typename eT>
-  static void fn(const arma::Cube<eT>& x, arma::Cube<eT>& y)
-  {
-    y = x;
-    for (size_t s = 0; s < x.n_slices; s++)
-      fn(x.slice(s), y.slice(s));
-  }
-
-  /**
-   * Computes the first derivative of the rectifier function.
-   *
-   * @param x Input data.
-   * @return f'(x)
-   */
-  static double deriv(const double y)
-  {
-    return y > 0;
-  }
-
-  /**
-   * Computes the first derivatives of the rectifier function.
-   *
-   * @param y Input activations.
-   * @param x The resulting derivatives.
-   */
-  template<typename InputType, typename OutputType>
-  static void deriv(const InputType& y, OutputType& x)
-  {
-    x = y;
-
-    for (size_t i = 0; i < y.n_elem; i++)
-      x(i) = deriv(y(i));
-  }
-}; // class RectifierFunction
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/activation_functions/softsign_function.hpp b/src/mlpack/methods/ann/activation_functions/softsign_function.hpp
deleted file mode 100644
index 45ef71b..0000000
--- a/src/mlpack/methods/ann/activation_functions/softsign_function.hpp
+++ /dev/null
@@ -1,134 +0,0 @@
-/**
- * @file softsign_function.hpp
- * @author Marcus Edel
- *
- * Definition and implementation of the softsign function as described by
- * X. Glorot and Y. Bengio.
- *
- * For more information, see the following paper.
- *
- * @code
- * @inproceedings{GlorotAISTATS2010,
- *   title={title={Understanding the difficulty of training deep feedforward
- *   neural networks},
- *   author={Glorot, Xavier and Bengio, Yoshua},
- *   booktitle={Proceedings of AISTATS 2010},
- *   year={2010}
- * }
- * @endcode
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_ACTIVATION_FUNCTIONS_SOFTSIGN_FUNCTION_HPP
-#define MLPACK_METHODS_ANN_ACTIVATION_FUNCTIONS_SOFTSIGN_FUNCTION_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * The softsign function, defined by
- *
- * @f{eqnarray*}{
- * f(x) &=& \frac{x}{1 + |x|} \\
- * f'(x) &=& (1 - |x|)^2 \\
- * f(x) &=& \left\{
- *   \begin{array}{lr}
- *     -\frac{y}{y-1} & : x > 0 \\
- *     \frac{x}{1 + x} & : x \le 0
- *   \end{array}
- * \right.
- * @f}
- */
-class SoftsignFunction
-{
-  public:
-  /**
-   * Computes the softsign function.
-   *
-   * @param x Input data.
-   * @return f(x).
-   */
-  static double fn(const double x)
-  {
-    if (x < DBL_MAX)
-      return x > -DBL_MAX ? x / (1.0 + std::abs(x)) : -1.0;
-    return 1.0;
-  }
-
-  /**
-   * Computes the softsign function.
-   *
-   * @param x Input data.
-   * @param y The resulting output activation.
-   */
-  template<typename InputVecType, typename OutputVecType>
-  static void fn(const InputVecType& x, OutputVecType& y)
-  {
-    y = x;
-
-    for (size_t i = 0; i < x.n_elem; i++)
-      y(i) = fn(x(i));
-  }
-
-  /**
-   * Computes the first derivative of the softsign function.
-   *
-   * @param y Input data.
-   * @return f'(x)
-   */
-  static double deriv(const double y)
-  {
-    return std::pow(1.0 - std::abs(y), 2);
-  }
-
-  /**
-   * Computes the first derivatives of the softsign function.
-   *
-   * @param y Input activations.
-   * @param x The resulting derivatives.
-   */
-  template<typename InputVecType, typename OutputVecType>
-  static void deriv(const InputVecType& y, OutputVecType& x)
-  {
-    x = arma::pow(1.0 - arma::abs(y), 2);
-  }
-
-  /**
-   * Computes the inverse of the softsign function.
-   *
-   * @param y Input data.
-   * @return f^{-1}(y)
-   */
-  static double inv(const double y)
-  {
-    if (y > 0)
-      return y < 1 ? -y / (y - 1) : DBL_MAX;
-    else
-      return y > -1 ? y / (1 + y) : -DBL_MAX;
-  }
-
-  /**
-   * Computes the inverse of the softsign function.
-   *
-   * @param y Input data.
-   * @param x The resulting inverse of the input data.
-   */
-  template<typename InputVecType, typename OutputVecType>
-  static void inv(const InputVecType& y, OutputVecType& x)
-  {
-    x = y;
-
-    for (size_t i = 0; i < y.n_elem; i++)
-      x(i) = inv(y(i));
-  }
-}; // class SoftsignFunction
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/activation_functions/tanh_function.hpp b/src/mlpack/methods/ann/activation_functions/tanh_function.hpp
deleted file mode 100644
index 4cd81f2..0000000
--- a/src/mlpack/methods/ann/activation_functions/tanh_function.hpp
+++ /dev/null
@@ -1,105 +0,0 @@
-/**
- * @file tanh_function.hpp
- * @author Marcus Edel
- *
- * Definition and implementation of the Tangens Hyperbolic function.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_ACTIVATION_FUNCTIONS_TANH_FUNCTION_HPP
-#define MLPACK_METHODS_ANN_ACTIVATION_FUNCTIONS_TANH_FUNCTION_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * The tanh function, defined by
- *
- * @f{eqnarray*}{
- * f(x) &=& \frac{e^x - e^{-x}}{e^x + e^{-x}} \\
- * f'(x) &=& 1 - \tanh^2(x) \\
- * f^{-1}(x) &=& \arctan(x)
- * @f}
- */
-class TanhFunction
-{
-  public:
-  /**
-   * Computes the tanh function.
-   *
-   * @param x Input data.
-   * @return f(x).
-   */
-  static double fn(const double x)
-  {
-    return std::tanh(x);
-  }
-
-  /**
-   * Computes the tanh function.
-   *
-   * @param x Input data.
-   * @param y The resulting output activation.
-   */
-  template<typename InputVecType, typename OutputVecType>
-  static void fn(const InputVecType& x, OutputVecType& y)
-  {
-    y = arma::tanh(x);
-  }
-
-  /**
-   * Computes the first derivative of the tanh function.
-   *
-   * @param y Input data.
-   * @return f'(x)
-   */
-  static double deriv(const double y)
-  {
-    return 1 - std::pow(y, 2);
-  }
-
-  /**
-   * Computes the first derivatives of the tanh function.
-   *
-   * @param y Input data.
-   * @param x The resulting derivatives.
-   */
-  template<typename InputVecType, typename OutputVecType>
-  static void deriv(const InputVecType& y, OutputVecType& x)
-  {
-    x = 1 - arma::pow(y, 2);
-  }
-
-  /**
-   * Computes the inverse of the tanh function.
-   *
-   * @param y Input data.
-   * @return f^{-1}(x)
-   */
-  static double inv(const double y)
-  {
-    return std::atanh(y);
-  }
-
-  /**
-   * Computes the inverse of the tanh function.
-   *
-   * @param y Input data.
-   * @param x The resulting inverse of the input data.
-   */
-  template<typename InputVecType, typename OutputVecType>
-  static void inv(const InputVecType& y, OutputVecType& x)
-  {
-    x = arma::atanh(y);
-  }
-}; // class TanhFunction
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/cnn.hpp b/src/mlpack/methods/ann/cnn.hpp
deleted file mode 100644
index 0f01ed5..0000000
--- a/src/mlpack/methods/ann/cnn.hpp
+++ /dev/null
@@ -1,448 +0,0 @@
-/**
- * @file cnn.hpp
- * @author Shangtong Zhang
- * @author Marcus Edel
- *
- * Definition of the CNN class, which implements convolutional neural networks.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_CNN_HPP
-#define MLPACK_METHODS_ANN_CNN_HPP
-
-#include <mlpack/prereqs.hpp>
-
-#include <mlpack/methods/ann/network_util.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-#include <mlpack/methods/ann/init_rules/nguyen_widrow_init.hpp>
-#include <mlpack/methods/ann/performance_functions/cee_function.hpp>
-#include <mlpack/core/optimizers/rmsprop/rmsprop.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * An implementation of a standard convolutional network.
- *
- * @tparam LayerTypes Contains all layer modules used to construct the network.
- * @tparam OutputLayerType The outputlayer type used to evaluate the network.
- * @tparam PerformanceFunction Performance strategy used to calculate the error.
- */
-template <
-  typename LayerTypes,
-  typename OutputLayerType,
-  typename InitializationRuleType = NguyenWidrowInitialization,
-  class PerformanceFunction = CrossEntropyErrorFunction<>
->
-class CNN
-{
- public:
-  //! Convenience typedef for the internal model construction.
-  using NetworkType = CNN<LayerTypes,
-                          OutputLayerType,
-                          InitializationRuleType,
-                          PerformanceFunction>;
-
-  /**
-   * Create the CNN object with the given predictors and responses set (this is
-   * the set that is used to train the network) and the given optimizer.
-   * Optionally, specify which initialize rule and performance function should
-   * be used.
-   *
-   * @param network Network modules used to construct the network.
-   * @param outputLayer Outputlayer used to evaluate the network.
-   * @param predictors Input training variables.
-   * @param responses Outputs resulting from input training variables.
-   * @param optimizer Instantiated optimizer used to train the model.
-   * @param initializeRule Optional instantiated InitializationRule object
-   *        for initializing the network paramter.
-   * @param performanceFunction Optional instantiated PerformanceFunction
-   *        object used to claculate the error.
-   */
-  template<typename LayerType,
-           typename OutputType,
-           template<typename> class OptimizerType>
-  CNN(LayerType &&network,
-      OutputType &&outputLayer,
-      const arma::cube& predictors,
-      const arma::mat& responses,
-      OptimizerType<NetworkType>& optimizer,
-      InitializationRuleType initializeRule = InitializationRuleType(),
-      PerformanceFunction performanceFunction = PerformanceFunction());
-
-  /**
-   * Create the CNN object with the given predictors and responses set (this is
-   * the set that is used to train the network). Optionally, specify which
-   * initialize rule and performance function should be used.
-   *
-   * @param network Network modules used to construct the network.
-   * @param outputLayer Outputlayer used to evaluate the network.
-   * @param predictors Input training variables.
-   * @param responses Outputs resulting from input training variables.
-   * @param initializeRule Optional instantiated InitializationRule object
-   *        for initializing the network paramter.
-   * @param performanceFunction Optional instantiated PerformanceFunction
-   *        object used to claculate the error.
-   */
-  template<typename LayerType, typename OutputType>
-  CNN(LayerType &&network,
-      OutputType &&outputLayer,
-      const arma::cube& predictors,
-      const arma::mat& responses,
-      InitializationRuleType initializeRule = InitializationRuleType(),
-      PerformanceFunction performanceFunction = PerformanceFunction());
-
-  /**
-   * Create the CNN object with an empty predictors and responses set and
-   * default optimizer. Make sure to call Train(predictors, responses) when
-   * training.
-   *
-   * @param network Network modules used to construct the network.
-   * @param outputLayer Outputlayer used to evaluate the network.
-   * @param initializeRule Optional instantiated InitializationRule object
-   *        for initializing the network paramter.
-   * @param performanceFunction Optional instantiated PerformanceFunction
-   *        object used to claculate the error.
-   */
-  template<typename LayerType, typename OutputType>
-  CNN(LayerType &&network,
-      OutputType &&outputLayer,
-      InitializationRuleType initializeRule = InitializationRuleType(),
-      PerformanceFunction performanceFunction = PerformanceFunction());
-  /**
-   * Train the convolutional neural network on the given input data. By default, the
-   * RMSprop optimization algorithm is used, but others can be specified
-   * (such as mlpack::optimization::SGD).
-   *
-   * This will use the existing model parameters as a starting point for the
-   * optimization. If this is not what you want, then you should access the
-   * parameters vector directly with Parameters() and modify it as desired.
-   *
-   * @tparam OptimizerType Type of optimizer to use to train the model.
-   * @param predictors Input training variables.
-   * @param responses Outputs results from input training variables.
-   */
-  template<
-      template<typename> class OptimizerType = mlpack::optimization::RMSprop
-  >
-  void Train(const arma::cube& predictors, const arma::mat& responses);
-
-  /**
-   * Train the convolutional neural network with the given instantiated optimizer.
-   * Using this overload allows configuring the instantiated optimizer before
-   * training is performed.
-   *
-   * This will use the existing model parameters as a starting point for the
-   * optimization. If this is not what you want, then you should access the
-   * parameters vector directly with Parameters() and modify it as desired.
-   *
-   * @param optimizer Instantiated optimizer used to train the model.
-   */
-  template<
-      template<typename> class OptimizerType = mlpack::optimization::RMSprop
-  >
-  void Train(OptimizerType<NetworkType>& optimizer);
-
-  /**
-   * Train the convolutional neural network on the given input data using the
-   * given optimizer.
-   *
-   * This will use the existing model parameters as a starting point for the
-   * optimization. If this is not what you want, then you should access the
-   * parameters vector directly with Parameters() and modify it as desired.
-   *
-   * @tparam OptimizerType Type of optimizer to use to train the model.
-   * @param predictors Input training variables.
-   * @param responses Outputs results from input training variables.
-   * @param optimizer Instantiated optimizer used to train the model.
-   */
-  template<
-      template<typename> class OptimizerType = mlpack::optimization::RMSprop
-  >
-  void Train(const arma::cube& predictors,
-             const arma::mat& responses,
-             OptimizerType<NetworkType>& optimizer);
-
-  /**
-   * Predict the responses to a given set of predictors. The responses will
-   * reflect the output of the given output layer as returned by the
-   * OutputClass() function.
-   *
-   * @param predictors Input predictors.
-   * @param responses Matrix to put output predictions of responses into.
-   */
-  void Predict(arma::cube& predictors, arma::mat& responses);
-
-  /**
-   * Evaluate the convolutional neural network with the given parameters. This
-   * function is usually called by the optimizer to train the model.
-   *
-   * @param parameters Matrix model parameters.
-   * @param i Index of point to use for objective function evaluation.
-   * @param deterministic Whether or not to train or test the model. Note some
-   * layer act differently in training or testing mode.
-   */
-  double Evaluate(const arma::mat& parameters,
-                  const size_t i,
-                  const bool deterministic = true);
-
-  /**
-   * Evaluate the gradient of the convolutional neural network with the given
-   * parameters, and with respect to only one point in the dataset. This is
-   * useful for optimizers such as SGD, which require a separable objective
-   * function.
-   *
-   * @param parameters Matrix of the model parameters to be optimized.
-   * @param i Index of points to use for objective function gradient evaluation.
-   * @param gradient Matrix to output gradient into.
-   */
-  void Gradient(const arma::mat& parameters,
-                const size_t i,
-                arma::mat& gradient);
-
-  //! Return the number of separable functions (the number of predictor points).
-  size_t NumFunctions() const { return numFunctions; }
-
-  //! Return the initial point for the optimization.
-  const arma::mat& Parameters() const { return parameter; }
-  //! Modify the initial point for the optimization.
-  arma::mat& Parameters() { return parameter; }
-
-  /**
-   * Serialize the convolutional neural network.
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */);
-
- private:
-  /**
-   * Reset the network by setting the layer status.
-   */
-  template<size_t I = 0, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  ResetParameter(std::tuple<Tp...>& /* unused */) { /* Nothing to do here */ }
-
-  template<size_t I = 0, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  ResetParameter(std::tuple<Tp...>& network)
-  {
-    ResetDeterministic(std::get<I>(network));
-    ResetParameter<I + 1, Tp...>(network);
-  }
-
-  /**
-   * Reset the layer status by setting the current deterministic parameter
-   * through all layer that implement the Deterministic function.
-   */
-  template<typename T>
-  typename std::enable_if<
-      HasDeterministicCheck<T, bool&(T::*)(void)>::value, void>::type
-  ResetDeterministic(T& layer)
-  {
-    layer.Deterministic() = deterministic;
-  }
-
-  template<typename T>
-  typename std::enable_if<
-      !HasDeterministicCheck<T, bool&(T::*)(void)>::value, void>::type
-  ResetDeterministic(T& /* unused */) { /* Nothing to do here */
-  }
-
-  /**
-   * Run a single iteration of the feed forward algorithm, using the given
-   * input and target vector, store the calculated error into the error
-   * vector.
-   */
-  template<size_t I = 0, typename DataType, typename... Tp>
-  void Forward(const DataType& input, std::tuple<Tp...>& network)
-  {
-    std::get<I>(network).InputParameter() = input;
-
-    std::get<I>(network).Forward(std::get<I>(network).InputParameter(),
-                           std::get<I>(network).OutputParameter());
-
-    ForwardTail<I + 1, Tp...>(network);
-  }
-
-  template<size_t I = 1, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  ForwardTail(std::tuple<Tp...>& network)
-  {
-    LinkParameter(network);
-  }
-
-  template<size_t I = 1, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  ForwardTail(std::tuple<Tp...>& network)
-  {
-    std::get<I>(network).Forward(std::get<I - 1>(network).OutputParameter(),
-        std::get<I>(network).OutputParameter());
-
-    ForwardTail<I + 1, Tp...>(network);
-  }
-
-  /**
-   * Link the calculated activation with the connection layer.
-   */
-  template<size_t I = 1, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  LinkParameter(std::tuple<Tp...>& /* unused */) { /* Nothing to do here */ }
-
-  template<size_t I = 1, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  LinkParameter(std::tuple<Tp...>& network)
-  {
-    if (!LayerTraits<typename std::remove_reference<
-        decltype(std::get<I>(network))>::type>::IsBiasLayer)
-    {
-      std::get<I>(network).InputParameter() = std::get<I - 1>(
-          network).OutputParameter();
-    }
-
-    LinkParameter<I + 1, Tp...>(network);
-  }
-
-  /*
-   * Calculate the output error and update the overall error.
-   */
-  template<typename DataType, typename ErrorType, typename... Tp>
-  double OutputError(const DataType& target,
-                     ErrorType& error,
-                     const std::tuple<Tp...>& network)
-  {
-    // Calculate and store the output error.
-    outputLayer.CalculateError(
-        std::get<sizeof...(Tp) - 1>(network).OutputParameter(), target, error);
-
-    // Masures the network's performance with the specified performance
-    // function.
-    return performanceFunc.Error(network, target, error);
-  }
-
-  /**
-   * Run a single iteration of the feed backward algorithm, using the given
-   * error of the output layer. Note that we iterate backward through the
-   * layer modules.
-   */
-  template<size_t I = 1, typename DataType, typename... Tp>
-  typename std::enable_if<I < (sizeof...(Tp) - 1), void>::type
-  Backward(const DataType& error, std::tuple<Tp...>& network)
-  {
-    std::get<sizeof...(Tp) - I>(network).Backward(
-        std::get<sizeof...(Tp) - I>(network).OutputParameter(), error,
-        std::get<sizeof...(Tp) - I>(network).Delta());
-
-    BackwardTail<I + 1, DataType, Tp...>(error, network);
-  }
-
-  template<size_t I = 1, typename DataType, typename... Tp>
-  typename std::enable_if<I == (sizeof...(Tp)), void>::type
-  BackwardTail(const DataType& /* unused */,
-               std::tuple<Tp...>& /* unused */) { /* Nothing to do here */ }
-
-  template<size_t I = 1, typename DataType, typename... Tp>
-  typename std::enable_if<I < (sizeof...(Tp)), void>::type
-  BackwardTail(const DataType& error, std::tuple<Tp...>& network)
-  {
-    std::get<sizeof...(Tp) - I>(network).Backward(
-        std::get<sizeof...(Tp) - I>(network).OutputParameter(),
-        std::get<sizeof...(Tp) - I + 1>(network).Delta(),
-        std::get<sizeof...(Tp) - I>(network).Delta());
-
-    BackwardTail<I + 1, DataType, Tp...>(error, network);
-  }
-
-  /**
-   * Iterate through all layer modules and update the the gradient using the
-   * layer defined optimizer.
-   */
-  template<
-      size_t I = 0,
-      size_t Max = std::tuple_size<LayerTypes>::value - 1,
-      typename... Tp
-  >
-  typename std::enable_if<I == Max, void>::type
-  UpdateGradients(std::tuple<Tp...>& /* unused */) { /* Nothing to do here */ }
-
-  template<
-      size_t I = 0,
-      size_t Max = std::tuple_size<LayerTypes>::value - 1,
-      typename... Tp
-  >
-  typename std::enable_if<I < Max, void>::type
-  UpdateGradients(std::tuple<Tp...>& network)
-  {
-    Update(std::get<I>(network), std::get<I>(network).OutputParameter(),
-           std::get<I + 1>(network).Delta());
-
-    UpdateGradients<I + 1, Max, Tp...>(network);
-  }
-
-  template<typename T, typename P, typename D>
-  typename std::enable_if<
-      HasGradientCheck<T, P&(T::*)()>::value, void>::type
-  Update(T& layer, P& /* unused */, D& delta)
-  {
-    layer.Gradient(layer.InputParameter(), delta, layer.Gradient());
-  }
-
-  template<typename T, typename P, typename D>
-  typename std::enable_if<
-      !HasGradientCheck<T, P&(T::*)()>::value, void>::type
-  Update(T& /* unused */, P& /* unused */, D& /* unused */)
-  {
-    /* Nothing to do here */
-  }
-
-  /*
-   * Calculate and store the output activation.
-   */
-  template<typename DataType, typename... Tp>
-  void OutputPrediction(DataType& output, std::tuple<Tp...>& network)
-  {
-    // Calculate and store the output prediction.
-    outputLayer.OutputClass(std::get<sizeof...(Tp) - 1>(
-        network).OutputParameter(), output);
-  }
-
-  //! Instantiated convolutional neural network.
-  LayerTypes network;
-
-  //! The outputlayer used to evaluate the network
-  OutputLayerType& outputLayer;
-
-  //! Performance strategy used to claculate the error.
-  PerformanceFunction performanceFunc;
-
-  //! The current evaluation mode (training or testing).
-  bool deterministic;
-
-  //! Matrix of (trained) parameters.
-  arma::mat parameter;
-
-  //! The matrix of data points (predictors).
-  arma::cube predictors;
-
-  //! The matrix of responses to the input data points.
-  arma::mat responses;
-
-  //! The number of separable functions (the number of predictor points).
-  size_t numFunctions;
-
-  //! Locally stored backward error.
-  arma::mat error;
-
-  //! Locally stored sample size.
-  size_t sampleSize;
-}; // class CNN
-
-} // namespace ann
-} // namespace mlpack
-
-// Include implementation.
-#include "cnn_impl.hpp"
-
-#endif
diff --git a/src/mlpack/methods/ann/convolution_rules/fft_convolution.hpp b/src/mlpack/methods/ann/convolution_rules/fft_convolution.hpp
deleted file mode 100644
index 4eaa038..0000000
--- a/src/mlpack/methods/ann/convolution_rules/fft_convolution.hpp
+++ /dev/null
@@ -1,221 +0,0 @@
-/**
- * @file fft_convolution.hpp
- * @author Shangtong Zhang
- * @author Marcus Edel
- *
- * Implementation of the convolution through fft.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_CONVOLUTION_RULES_FFT_CONVOLUTION_HPP
-#define MLPACK_METHODS_ANN_CONVOLUTION_RULES_FFT_CONVOLUTION_HPP
-
-#include <mlpack/prereqs.hpp>
-#include "border_modes.hpp"
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Computes the two-dimensional convolution through fft. This class allows
- * specification of the type of the border type. The convolution can be compute
- * with the valid border type of the full border type (default).
- *
- * FullConvolution: returns the full two-dimensional convolution.
- * ValidConvolution: returns only those parts of the convolution that are
- * computed without the zero-padded edges.
- *
- * @tparam BorderMode Type of the border mode (FullConvolution or
- * ValidConvolution).
- * @tparam padLastDim Pad the last dimension of the input to to turn it from
- * odd to even.
- */
-template<typename BorderMode = FullConvolution, const bool padLastDim = false>
-class FFTConvolution
-{
- public:
-  /*
-   * Perform a convolution through fft (valid mode). This method only supports
-   * input which is even on the last dimension. In case of an odd input width, a
-   * user can manually pad the imput or specify the padLastDim parameter which
-   * takes care of the padding. The filter instead can have any size. When using
-   * the valid mode the filters has to be smaller than the input.
-   *
-   * @param input Input used to perform the convolution.
-   * @param filter Filter used to perform the conolution.
-   * @param output Output data that contains the results of the convolution.
-   */
-  template<typename eT, typename Border = BorderMode>
-  static typename std::enable_if<
-      std::is_same<Border, ValidConvolution>::value, void>::type
-  Convolution(const arma::Mat<eT>& input,
-              const arma::Mat<eT>& filter,
-              arma::Mat<eT>& output)
-  {
-    arma::Mat<eT> inputPadded = input;
-    arma::Mat<eT> filterPadded = filter;
-
-    if (padLastDim)
-      inputPadded.resize(inputPadded.n_rows, inputPadded.n_cols + 1);
-
-    // Pad filter and input to the output shape.
-    filterPadded.resize(inputPadded.n_rows, inputPadded.n_cols);
-
-    output = arma::real(ifft2(arma::fft2(inputPadded) % arma::fft2(
-        filterPadded)));
-
-    // Extract the region of interest. We don't need to handle the padLastDim in
-    // a special way we just cut it out from the output matrix.
-    output = output.submat(filter.n_rows - 1, filter.n_cols - 1,
-        input.n_rows - 1, input.n_cols - 1);
-  }
-
-  /*
-   * Perform a convolution through fft (full mode). This method only supports
-   * input which is even on the last dimension. In case of an odd input width, a
-   * user can manually pad the imput or specify the padLastDim parameter which
-   * takes care of the padding. The filter instead can have any size.
-   *
-   * @param input Input used to perform the convolution.
-   * @param filter Filter used to perform the conolution.
-   * @param output Output data that contains the results of the convolution.
-   */
-  template<typename eT, typename Border = BorderMode>
-  static typename std::enable_if<
-      std::is_same<Border, FullConvolution>::value, void>::type
-  Convolution(const arma::Mat<eT>& input,
-              const arma::Mat<eT>& filter,
-              arma::Mat<eT>& output)
-  {
-    // In case of the full convolution outputRows and outputCols doesn't
-    // represent the true output size when the padLastDim parameter is set,
-    // instead it's the working size.
-    const size_t outputRows = input.n_rows + 2 * (filter.n_rows - 1);
-    size_t outputCols = input.n_cols + 2 * (filter.n_cols - 1);
-
-    if (padLastDim)
-        outputCols++;
-
-    // Pad filter and input to the working output shape.
-    arma::Mat<eT> inputPadded = arma::zeros<arma::Mat<eT> >(outputRows,
-        outputCols);
-    inputPadded.submat(filter.n_rows - 1, filter.n_cols - 1,
-          filter.n_rows - 1 + input.n_rows - 1,
-          filter.n_cols - 1 + input.n_cols - 1) = input;
-
-    arma::Mat<eT> filterPadded = filter;
-    filterPadded.resize(outputRows, outputCols);
-
-    // Perform FFT and IFFT
-    output = arma::real(ifft2(arma::fft2(inputPadded) % arma::fft2(
-        filterPadded)));
-
-    // Extract the region of interest. We don't need to handle the padLastDim
-    // parameter in a special way we just cut it out from the output matrix.
-    output = output.submat(filter.n_rows - 1, filter.n_cols - 1,
-        2 * (filter.n_rows - 1) + input.n_rows - 1,
-        2 * (filter.n_cols - 1) + input.n_cols - 1);
-  }
-
-  /*
-   * Perform a convolution through fft using 3rd order tensors. This method only
-   * supports input which is even on the last dimension. In case of an odd input
-   * width, a user can manually pad the imput or specify the padLastDim
-   * parameter which takes care of the padding. The filter instead can have any
-   * size.
-   *
-   * @param input Input used to perform the convolution.
-   * @param filter Filter used to perform the conolution.
-   * @param output Output data that contains the results of the convolution.
-   */
-  template<typename eT>
-  static void Convolution(const arma::Cube<eT>& input,
-                          const arma::Cube<eT>& filter,
-                          arma::Cube<eT>& output)
-  {
-    arma::Mat<eT> convOutput;
-    FFTConvolution<BorderMode>::Convolution(input.slice(0), filter.slice(0),
-        convOutput);
-
-    output = arma::Cube<eT>(convOutput.n_rows, convOutput.n_cols,
-        input.n_slices);
-    output.slice(0) = convOutput;
-
-    for (size_t i = 1; i < input.n_slices; i++)
-    {
-      FFTConvolution<BorderMode>::Convolution(input.slice(i), filter.slice(i),
-          convOutput);
-      output.slice(i) = convOutput;
-    }
-  }
-
-  /*
-   * Perform a convolution through fft using dense matrix as input and a 3rd
-   * order tensors as filter and output. This method only supports input which
-   * is even on the last dimension. In case of an odd input width, a user can
-   * manually pad the imput or specify the padLastDim parameter which takes care
-   * of the padding. The filter instead can have any size.
-   *
-   * @param input Input used to perform the convolution.
-   * @param filter Filter used to perform the conolution.
-   * @param output Output data that contains the results of the convolution.
-   */
-  template<typename eT>
-  static void Convolution(const arma::Mat<eT>& input,
-                          const arma::Cube<eT>& filter,
-                          arma::Cube<eT>& output)
-  {
-    arma::Mat<eT> convOutput;
-    FFTConvolution<BorderMode>::Convolution(input, filter.slice(0),
-        convOutput);
-
-    output = arma::Cube<eT>(convOutput.n_rows, convOutput.n_cols,
-        filter.n_slices);
-    output.slice(0) = convOutput;
-
-    for (size_t i = 1; i < filter.n_slices; i++)
-    {
-      FFTConvolution<BorderMode>::Convolution(input, filter.slice(i),
-          convOutput);
-      output.slice(i) = convOutput;
-    }
-  }
-
-  /*
-   * Perform a convolution using a 3rd order tensors as input and output and a
-   * dense matrix as filter.
-   *
-   * @param input Input used to perform the convolution.
-   * @param filter Filter used to perform the conolution.
-   * @param output Output data that contains the results of the convolution.
-   */
-  template<typename eT>
-  static void Convolution(const arma::Cube<eT>& input,
-                          const arma::Mat<eT>& filter,
-                          arma::Cube<eT>& output)
-  {
-    arma::Mat<eT> convOutput;
-    FFTConvolution<BorderMode>::Convolution(input.slice(0), filter,
-        convOutput);
-
-    output = arma::Cube<eT>(convOutput.n_rows, convOutput.n_cols,
-        input.n_slices);
-    output.slice(0) = convOutput;
-
-    for (size_t i = 1; i < input.n_slices; i++)
-    {
-      FFTConvolution<BorderMode>::Convolution(input.slice(i), filter,
-          convOutput);
-      output.slice(i) = convOutput;
-    }
-  }
-
-};  // class FFTConvolution
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/convolution_rules/naive_convolution.hpp b/src/mlpack/methods/ann/convolution_rules/naive_convolution.hpp
deleted file mode 100644
index 0e4e05f..0000000
--- a/src/mlpack/methods/ann/convolution_rules/naive_convolution.hpp
+++ /dev/null
@@ -1,190 +0,0 @@
-/**
- * @file naive_convolution.hpp
- * @author Shangtong Zhang
- * @author Marcus Edel
- *
- * Implementation of the convolution.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_CONVOLUTION_RULES_NAIVE_CONVOLUTION_HPP
-#define MLPACK_METHODS_ANN_CONVOLUTION_RULES_NAIVE_CONVOLUTION_HPP
-
-#include <mlpack/prereqs.hpp>
-#include "border_modes.hpp"
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Computes the two-dimensional convolution. This class allows specification of
- * the type of the border type. The convolution can be compute with the valid
- * border type of the full border type (default).
- *
- * FullConvolution: returns the full two-dimensional convolution.
- * ValidConvolution: returns only those parts of the convolution that are
- * computed without the zero-padded edges.
- *
- * @tparam BorderMode Type of the border mode (FullConvolution or
- * ValidConvolution).
- */
-template<typename BorderMode = FullConvolution>
-class NaiveConvolution
-{
- public:
-  /*
-   * Perform a convolution (valid mode).
-   *
-   * @param input Input used to perform the convolution.
-   * @param filter Filter used to perform the conolution.
-   * @param output Output data that contains the results of the convolution.
-   */
-  template<typename eT, typename Border = BorderMode>
-  static typename std::enable_if<
-      std::is_same<Border, ValidConvolution>::value, void>::type
-  Convolution(const arma::Mat<eT>& input,
-              const arma::Mat<eT>& filter,
-              arma::Mat<eT>& output)
-  {
-    output = arma::zeros<arma::Mat<eT> >(input.n_rows - filter.n_rows + 1,
-        input.n_cols - filter.n_cols + 1);
-
-    // It seems to be about 3.5 times faster to use pointers instead of
-    // filter(ki, kj) * input(leftInput + ki, topInput + kj) and output(i, j).
-    eT* outputPtr = output.memptr();
-
-    for (size_t j = 0; j < output.n_cols; ++j)
-    {
-      for (size_t i = 0; i < output.n_rows; ++i, outputPtr++)
-      {
-        const eT* kernelPtr = filter.memptr();
-        for (size_t kj = 0; kj < filter.n_cols; ++kj)
-        {
-          const eT* inputPtr = input.colptr(kj + j) + i;
-          for (size_t ki = 0; ki < filter.n_rows; ++ki, ++kernelPtr, ++inputPtr)
-            *outputPtr += *kernelPtr * (*inputPtr);
-        }
-      }
-    }
-  }
-
-  /*
-   * Perform a convolution (full mode).
-   *
-   * @param input Input used to perform the convolution.
-   * @param filter Filter used to perform the conolution.
-   * @param output Output data that contains the results of the convolution.
-   */
-  template<typename eT, typename Border = BorderMode>
-  static typename std::enable_if<
-      std::is_same<Border, FullConvolution>::value, void>::type
-  Convolution(const arma::Mat<eT>& input,
-              const arma::Mat<eT>& filter,
-              arma::Mat<eT>& output)
-  {
-    const size_t outputRows = input.n_rows + 2 * (filter.n_rows - 1);
-    const size_t outputCols = input.n_cols + 2 * (filter.n_cols - 1);
-
-    // Pad filter and input to the working output shape.
-    arma::Mat<eT> inputPadded = arma::zeros<arma::Mat<eT> >(outputRows,
-        outputCols);
-    inputPadded.submat(filter.n_rows - 1, filter.n_cols - 1,
-          filter.n_rows - 1 + input.n_rows - 1,
-          filter.n_cols - 1 + input.n_cols - 1) = input;
-
-    NaiveConvolution<ValidConvolution>::Convolution(inputPadded, filter,
-        output);
-  }
-
-  /*
-   * Perform a convolution using 3rd order tensors.
-   *
-   * @param input Input used to perform the convolution.
-   * @param filter Filter used to perform the conolution.
-   * @param output Output data that contains the results of the convolution.
-   */
-  template<typename eT>
-  static void Convolution(const arma::Cube<eT>& input,
-                          const arma::Cube<eT>& filter,
-                          arma::Cube<eT>& output)
-  {
-    arma::Mat<eT> convOutput;
-    NaiveConvolution<BorderMode>::Convolution(input.slice(0), filter.slice(0),
-        convOutput);
-
-    output = arma::Cube<eT>(convOutput.n_rows, convOutput.n_cols,
-        input.n_slices);
-    output.slice(0) = convOutput;
-
-    for (size_t i = 1; i < input.n_slices; i++)
-    {
-      NaiveConvolution<BorderMode>::Convolution(input.slice(i), filter.slice(i),
-          output.slice(i));
-    }
-  }
-
-  /*
-   * Perform a convolution using dense matrix as input and a 3rd order tensors
-   * as filter and output.
-   *
-   * @param input Input used to perform the convolution.
-   * @param filter Filter used to perform the conolution.
-   * @param output Output data that contains the results of the convolution.
-   */
-  template<typename eT>
-  static void Convolution(const arma::Mat<eT>& input,
-                          const arma::Cube<eT>& filter,
-                          arma::Cube<eT>& output)
-  {
-    arma::Mat<eT> convOutput;
-    NaiveConvolution<BorderMode>::Convolution(input, filter.slice(0),
-        convOutput);
-
-    output = arma::Cube<eT>(convOutput.n_rows, convOutput.n_cols,
-        filter.n_slices);
-    output.slice(0) = convOutput;
-
-    for (size_t i = 1; i < filter.n_slices; i++)
-    {
-      NaiveConvolution<BorderMode>::Convolution(input, filter.slice(i),
-          output.slice(i));
-    }
-  }
-
-  /*
-   * Perform a convolution using a 3rd order tensors as input and output and a
-   * dense matrix as filter.
-   *
-   * @param input Input used to perform the convolution.
-   * @param filter Filter used to perform the conolution.
-   * @param output Output data that contains the results of the convolution.
-   */
-  template<typename eT>
-  static void Convolution(const arma::Cube<eT>& input,
-                          const arma::Mat<eT>& filter,
-                          arma::Cube<eT>& output)
-  {
-    arma::Mat<eT> convOutput;
-    NaiveConvolution<BorderMode>::Convolution(input.slice(0), filter,
-        convOutput);
-
-    output = arma::Cube<eT>(convOutput.n_rows, convOutput.n_cols,
-        input.n_slices);
-    output.slice(0) = convOutput;
-
-    for (size_t i = 1; i < input.n_slices; i++)
-    {
-      NaiveConvolution<BorderMode>::Convolution(input.slice(i), filter,
-          output.slice(i));
-    }
-  }
-
-};  // class NaiveConvolution
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/convolution_rules/svd_convolution.hpp b/src/mlpack/methods/ann/convolution_rules/svd_convolution.hpp
deleted file mode 100644
index e34d8bd..0000000
--- a/src/mlpack/methods/ann/convolution_rules/svd_convolution.hpp
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * @file svd_convolution.hpp
- * @author Marcus Edel
- *
- * Implementation of the convolution using the singular value decomposition to
- * speeded up the computation.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_CONVOLUTION_RULES_SVD_CONVOLUTION_HPP
-#define MLPACK_METHODS_ANN_CONVOLUTION_RULES_SVD_CONVOLUTION_HPP
-
-#include <mlpack/prereqs.hpp>
-#include "border_modes.hpp"
-#include "fft_convolution.hpp"
-#include "naive_convolution.hpp"
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Computes the two-dimensional convolution using singular value decomposition.
- * This class allows specification of the type of the border type. The
- * convolution can be compute with the valid border type of the full border
- * type (default).
- *
- * FullConvolution: returns the full two-dimensional convolution.
- * ValidConvolution: returns only those parts of the convolution that are
- * computed without the zero-padded edges.
- *
- * @tparam BorderMode Type of the border mode (FullConvolution or
- * ValidConvolution).
- */
-template<typename BorderMode = FullConvolution>
-class SVDConvolution
-{
- public:
-  /*
-   * Perform a convolution (valid or full mode) using singular value
-   * decomposition. By using singular value decomposition of the filter matrix
-   * the convolution can be expressed as a sum of outer products. Each product
-   * can be computed efficiently as convolution with a row and a column vector.
-   * The individual convolutions are computed with the naive implementation
-   * which is fast if the filter is low-dimensional.
-   *
-   * @param input Input used to perform the convolution.
-   * @param filter Filter used to perform the conolution.
-   * @param output Output data that contains the results of the convolution.
-   */
-  template<typename eT>
-  static void Convolution(const arma::Mat<eT>& input,
-                          const arma::Mat<eT>& filter,
-                          arma::Mat<eT>& output)
-  {
-    // Use the naive convolution in case the filter isn't two dimensional or the
-    // filter is bigger than the input.
-    if (filter.n_rows > input.n_rows || filter.n_cols > input.n_cols ||
-        filter.n_rows == 1 || filter.n_cols == 1)
-    {
-      NaiveConvolution<BorderMode>::Convolution(input, filter, output);
-    }
-    else
-    {
-      arma::Mat<eT> U, V, subOutput;
-      arma::Col<eT> s;
-
-      arma::svd_econ(U, s, V, filter);
-
-      // Rank approximation using the singular values calculated with singular
-      // value decomposition of dense filter matrix.
-      const size_t rank = arma::sum(s > (s.n_elem * arma::max(s) *
-          arma::datum::eps));
-
-      // Test for separability based on the rank of the kernel and take
-      // advantage of the low rank.
-      if (rank * (filter.n_rows + filter.n_cols) < filter.n_elem)
-      {
-        arma::Mat<eT> subFilter = V.unsafe_col(0) * s(0);
-        NaiveConvolution<BorderMode>::Convolution(input, subFilter, subOutput);
-
-        subOutput = subOutput.t();
-        NaiveConvolution<BorderMode>::Convolution(subOutput, U.unsafe_col(0),
-            output);
-
-        for (size_t r = 1; r < rank; r++)
-        {
-          subFilter = V.unsafe_col(r) * s(r);
-          NaiveConvolution<BorderMode>::Convolution(input, subFilter,
-              subOutput);
-
-          arma::Mat<eT> temp;
-          subOutput = subOutput.t();
-          NaiveConvolution<BorderMode>::Convolution(subOutput, U.unsafe_col(r),
-              temp);
-          output += temp;
-        }
-
-        output = output.t();
-      }
-      else
-      {
-        FFTConvolution<BorderMode>::Convolution(input, filter, output);
-      }
-    }
-  }
-
-  /*
-   * Perform a convolution using 3rd order tensors.
-   *
-   * @param input Input used to perform the convolution.
-   * @param filter Filter used to perform the conolution.
-   * @param output Output data that contains the results of the convolution.
-   */
-  template<typename eT>
-  static void Convolution(const arma::Cube<eT>& input,
-                          const arma::Cube<eT>& filter,
-                          arma::Cube<eT>& output)
-  {
-    arma::Mat<eT> convOutput;
-    SVDConvolution<BorderMode>::Convolution(input.slice(0), filter.slice(0),
-        convOutput);
-
-    output = arma::Cube<eT>(convOutput.n_rows, convOutput.n_cols,
-        input.n_slices);
-    output.slice(0) = convOutput;
-
-    for (size_t i = 1; i < input.n_slices; i++)
-    {
-      SVDConvolution<BorderMode>::Convolution(input.slice(i), filter.slice(i),
-          convOutput);
-      output.slice(i) = convOutput;
-    }
-  }
-
-  /*
-   * Perform a convolution using dense matrix as input and a 3rd order tensors
-   * as filter and output.
-   *
-   * @param input Input used to perform the convolution.
-   * @param filter Filter used to perform the conolution.
-   * @param output Output data that contains the results of the convolution.
-   */
-  template<typename eT>
-  static void Convolution(const arma::Mat<eT>& input,
-                          const arma::Cube<eT>& filter,
-                          arma::Cube<eT>& output)
-  {
-    arma::Mat<eT> convOutput;
-    SVDConvolution<BorderMode>::Convolution(input, filter.slice(0), convOutput);
-
-    output = arma::Cube<eT>(convOutput.n_rows, convOutput.n_cols,
-        filter.n_slices);
-    output.slice(0) = convOutput;
-
-    for (size_t i = 1; i < filter.n_slices; i++)
-    {
-      SVDConvolution<BorderMode>::Convolution(input, filter.slice(i),
-          convOutput);
-      output.slice(i) = convOutput;
-    }
-  }
-
-  /*
-   * Perform a convolution using a 3rd order tensors as input and output and a
-   * dense matrix as filter.
-   *
-   * @param input Input used to perform the convolution.
-   * @param filter Filter used to perform the conolution.
-   * @param output Output data that contains the results of the convolution.
-   */
-  template<typename eT>
-  static void Convolution(const arma::Cube<eT>& input,
-                          const arma::Mat<eT>& filter,
-                          arma::Cube<eT>& output)
-  {
-    arma::Mat<eT> convOutput;
-    SVDConvolution<BorderMode>::Convolution(input.slice(0), filter, convOutput);
-
-    output = arma::Cube<eT>(convOutput.n_rows, convOutput.n_cols,
-        input.n_slices);
-    output.slice(0) = convOutput;
-
-    for (size_t i = 1; i < input.n_slices; i++)
-    {
-      SVDConvolution<BorderMode>::Convolution(input.slice(i), filter,
-          convOutput);
-      output.slice(i) = convOutput;
-    }
-  }
-
-};  // class SVDConvolution
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/ffn.hpp b/src/mlpack/methods/ann/ffn.hpp
deleted file mode 100644
index 0046536..0000000
--- a/src/mlpack/methods/ann/ffn.hpp
+++ /dev/null
@@ -1,447 +0,0 @@
-/**
- * @file ffn.hpp
- * @author Marcus Edel
- *
- * Definition of the FFN class, which implements feed forward neural networks.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_FFN_HPP
-#define MLPACK_METHODS_ANN_FFN_HPP
-
-#include <mlpack/prereqs.hpp>
-
-#include <mlpack/methods/ann/network_util.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-#include <mlpack/methods/ann/init_rules/nguyen_widrow_init.hpp>
-#include <mlpack/methods/ann/performance_functions/cee_function.hpp>
-#include <mlpack/core/optimizers/rmsprop/rmsprop.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of a standard feed forward network.
- *
- * @tparam LayerTypes Contains all layer modules used to construct the network.
- * @tparam OutputLayerType The output layer type used to evaluate the network.
- * @tparam InitializationRuleType Rule used to initialize the weight matrix.
- * @tparam PerformanceFunction Performance strategy used to calculate the error.
- */
-template <
-  typename LayerTypes,
-  typename OutputLayerType,
-  typename InitializationRuleType = NguyenWidrowInitialization,
-  class PerformanceFunction = CrossEntropyErrorFunction<>
->
-class FFN
-{
- public:
-  //! Convenience typedef for the internal model construction.
-  using NetworkType = FFN<LayerTypes,
-                          OutputLayerType,
-                          InitializationRuleType,
-                          PerformanceFunction>;
-
-  /**
-   * Create the FFN object with the given predictors and responses set (this is
-   * the set that is used to train the network) and the given optimizer.
-   * Optionally, specify which initialize rule and performance function should
-   * be used.
-   *
-   * @param network Network modules used to construct the network.
-   * @param outputLayer Output layer used to evaluate the network.
-   * @param predictors Input training variables.
-   * @param responses Outputs resulting from input training variables.
-   * @param optimizer Instantiated optimizer used to train the model.
-   * @param initializeRule Optional instantiated InitializationRule object
-   *        for initializing the network parameter.
-   * @param performanceFunction Optional instantiated PerformanceFunction
-   *        object used to calculate the error.
-   */
-  template<typename LayerType,
-           typename OutputType,
-           template<typename> class OptimizerType>
-  FFN(LayerType &&network,
-      OutputType &&outputLayer,
-      const arma::mat& predictors,
-      const arma::mat& responses,
-      OptimizerType<NetworkType>& optimizer,
-      InitializationRuleType initializeRule = InitializationRuleType(),
-      PerformanceFunction performanceFunction = PerformanceFunction());
-
-  /**
-   * Create the FFN object with the given predictors and responses set (this is
-   * the set that is used to train the network). Optionally, specify which
-   * initialize rule and performance function should be used.
-   *
-   * @param network Network modules used to construct the network.
-   * @param outputLayer Output layer used to evaluate the network.
-   * @param predictors Input training variables.
-   * @param responses Outputs resulting from input training variables.
-   * @param initializeRule Optional instantiated InitializationRule object
-   *        for initializing the network parameter.
-   * @param performanceFunction Optional instantiated PerformanceFunction
-   *        object used to calculate the error.
-   */
-  template<typename LayerType, typename OutputType>
-  FFN(LayerType &&network,
-      OutputType &&outputLayer,
-      const arma::mat& predictors,
-      const arma::mat& responses,
-      InitializationRuleType initializeRule = InitializationRuleType(),
-      PerformanceFunction performanceFunction = PerformanceFunction());
-
-  /**
-   * Create the FNN object with an empty predictors and responses set and
-   * default optimizer. Make sure to call Train(predictors, responses) when
-   * training.
-   *
-   * @param network Network modules used to construct the network.
-   * @param outputLayer Output layer used to evaluate the network.
-   * @param initializeRule Optional instantiated InitializationRule object
-   *        for initializing the network parameter.
-   * @param performanceFunction Optional instantiated PerformanceFunction
-   *        object used to calculate the error.
-   */
-  template<typename LayerType, typename OutputType>
-  FFN(LayerType &&network,
-      OutputType &&outputLayer,
-      InitializationRuleType initializeRule = InitializationRuleType(),
-      PerformanceFunction performanceFunction = PerformanceFunction());
-
-  /**
-   * Train the feedforward network on the given input data. By default, the
-   * RMSprop optimization algorithm is used, but others can be specified
-   * (such as mlpack::optimization::SGD).
-   *
-   * This will use the existing model parameters as a starting point for the
-   * optimization. If this is not what you want, then you should access the
-   * parameters vector directly with Parameters() and modify it as desired.
-   *
-   * @tparam OptimizerType Type of optimizer to use to train the model.
-   * @param predictors Input training variables.
-   * @param responses Outputs results from input training variables.
-   */
-  template<
-      template<typename> class OptimizerType = mlpack::optimization::RMSprop
-  >
-  void Train(const arma::mat& predictors, const arma::mat& responses);
-
-  /**
-   * Train the feedforward network with the given instantiated optimizer.
-   * Using this overload allows configuring the instantiated optimizer before
-   * training is performed.
-   *
-   * This will use the existing model parameters as a starting point for the
-   * optimization. If this is not what you want, then you should access the
-   * parameters vector directly with Parameters() and modify it as desired.
-   *
-   * @param optimizer Instantiated optimizer used to train the model.
-   */
-  template<
-      template<typename> class OptimizerType = mlpack::optimization::RMSprop
-  >
-  void Train(OptimizerType<NetworkType>& optimizer);
-
-  /**
-   * Train the feedforward network on the given input data using the given
-   * optimizer.
-   *
-   * This will use the existing model parameters as a starting point for the
-   * optimization. If this is not what you want, then you should access the
-   * parameters vector directly with Parameters() and modify it as desired.
-   *
-   * @tparam OptimizerType Type of optimizer to use to train the model.
-   * @param predictors Input training variables.
-   * @param responses Outputs results from input training variables.
-   * @param optimizer Instantiated optimizer used to train the model.
-   */
-  template<
-      template<typename> class OptimizerType = mlpack::optimization::RMSprop
-  >
-  void Train(const arma::mat& predictors,
-             const arma::mat& responses,
-             OptimizerType<NetworkType>& optimizer);
-
-  /**
-   * Predict the responses to a given set of predictors. The responses will
-   * reflect the output of the given output layer as returned by the
-   * OutputClass() function.
-   *
-   * @param predictors Input predictors.
-   * @param responses Matrix to put output predictions of responses into.
-   */
-  void Predict(arma::mat& predictors, arma::mat& responses);
-
-  /**
-   * Evaluate the feedforward network with the given parameters. This function
-   * is usually called by the optimizer to train the model.
-   *
-   * @param parameters Matrix model parameters.
-   * @param i Index of point to use for objective function evaluation.
-   * @param deterministic Whether or not to train or test the model. Note some
-   * layer act differently in training or testing mode.
-   */
-  double Evaluate(const arma::mat& parameters,
-                  const size_t i,
-                  const bool deterministic = true);
-
-  /**
-   * Evaluate the gradient of the feedforward network with the given parameters,
-   * and with respect to only one point in the dataset. This is useful for
-   * optimizers such as SGD, which require a separable objective function.
-   *
-   * @param parameters Matrix of the model parameters to be optimized.
-   * @param i Index of points to use for objective function gradient evaluation.
-   * @param gradient Matrix to output gradient into.
-   */
-  void Gradient(const arma::mat& parameters,
-                const size_t i,
-                arma::mat& gradient);
-
-  //! Return the number of separable functions (the number of predictor points).
-  size_t NumFunctions() const { return numFunctions; }
-
-  //! Return the initial point for the optimization.
-  const arma::mat& Parameters() const { return parameter; }
-  //! Modify the initial point for the optimization.
-  arma::mat& Parameters() { return parameter; }
-
-  //! Serialize the model.
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */);
-
-private:
-  /**
-   * Reset the network by zeroing the layer activations and by setting the
-   * layer status.
-   *
-   * enable_if (SFINAE) is used to iterate through the network. The general
-   * case peels off the first type and recurses, as usual with
-   * variadic function templates.
-   */
-  template<size_t I = 0, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  ResetParameter(std::tuple<Tp...>& /* unused */) { /* Nothing to do here */ }
-
-  template<size_t I = 0, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  ResetParameter(std::tuple<Tp...>& network)
-  {
-    ResetDeterministic(std::get<I>(network));
-    ResetParameter<I + 1, Tp...>(network);
-  }
-
-  /**
-   * Reset the layer status by setting the current deterministic parameter
-   * through all layer that implement the Deterministic function.
-   */
-  template<typename T>
-  typename std::enable_if<
-      HasDeterministicCheck<T, bool&(T::*)(void)>::value, void>::type
-  ResetDeterministic(T& layer)
-  {
-    layer.Deterministic() = deterministic;
-  }
-
-  template<typename T>
-  typename std::enable_if<
-      !HasDeterministicCheck<T, bool&(T::*)(void)>::value, void>::type
-  ResetDeterministic(T& /* unused */) { /* Nothing to do here */ }
-
-  /**
-   * Run a single iteration of the feed forward algorithm, using the given
-   * input and target vector, store the calculated error into the error
-   * vector.
-   */
-  template<size_t I = 0, typename DataType, typename... Tp>
-  void Forward(const DataType& input, std::tuple<Tp...>& network)
-  {
-    std::get<I>(network).InputParameter() = input;
-
-    std::get<I>(network).Forward(std::get<I>(network).InputParameter(),
-        std::get<I>(network).OutputParameter());
-
-    ForwardTail<I + 1, Tp...>(network);
-  }
-
-  template<size_t I = 1, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  ForwardTail(std::tuple<Tp...>& network)
-  {
-    LinkParameter(network);
-  }
-
-  template<size_t I = 1, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  ForwardTail(std::tuple<Tp...>& network)
-  {
-    std::get<I>(network).Forward(std::get<I - 1>(network).OutputParameter(),
-                           std::get<I>(network).OutputParameter());
-
-    ForwardTail<I + 1, Tp...>(network);
-  }
-
-  /**
-   * Link the calculated activation with the connection layer.
-   */
-  template<size_t I = 1, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  LinkParameter(std::tuple<Tp ...>& /* unused */) { /* Nothing to do here */ }
-
-  template<size_t I = 1, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  LinkParameter(std::tuple<Tp...>& network)
-  {
-    if (!LayerTraits<typename std::remove_reference<
-        decltype(std::get<I>(network))>::type>::IsBiasLayer)
-    {
-      std::get<I>(network).InputParameter() = std::get<I - 1>(
-          network).OutputParameter();
-    }
-
-    LinkParameter<I + 1, Tp...>(network);
-  }
-
-  /*
-   * Calculate the output error and update the overall error.
-   */
-  template<typename DataType, typename ErrorType, typename... Tp>
-  double OutputError(const DataType& target,
-                     ErrorType& error,
-                     const std::tuple<Tp...>& network)
-  {
-    // Calculate and store the output error.
-    outputLayer.CalculateError(
-        std::get<sizeof...(Tp) - 1>(network).OutputParameter(), target, error);
-
-    // Measures the network's performance with the specified performance
-    // function.
-    return performanceFunc.Error(network, target, error);
-  }
-
-  /**
-   * Run a single iteration of the feed backward algorithm, using the given
-   * error of the output layer. Note that we iterate backward through the
-   * layer modules.
-   */
-  template<size_t I = 1, typename DataType, typename... Tp>
-  typename std::enable_if<I < (sizeof...(Tp) - 1), void>::type
-  Backward(const DataType& error, std::tuple<Tp ...>& network)
-  {
-    std::get<sizeof...(Tp) - I>(network).Backward(
-        std::get<sizeof...(Tp) - I>(network).OutputParameter(), error,
-        std::get<sizeof...(Tp) - I>(network).Delta());
-
-    BackwardTail<I + 1, DataType, Tp...>(error, network);
-  }
-
-  template<size_t I = 1, typename DataType, typename... Tp>
-  typename std::enable_if<I == (sizeof...(Tp)), void>::type
-  BackwardTail(const DataType& /* unused */,
-               std::tuple<Tp...>& /* unused */) { /* Nothing to do here */ }
-
-  template<size_t I = 1, typename DataType, typename... Tp>
-  typename std::enable_if<I < (sizeof...(Tp)), void>::type
-  BackwardTail(const DataType& error, std::tuple<Tp...>& network)
-  {
-    std::get<sizeof...(Tp) - I>(network).Backward(
-        std::get<sizeof...(Tp) - I>(network).OutputParameter(),
-        std::get<sizeof...(Tp) - I + 1>(network).Delta(),
-        std::get<sizeof...(Tp) - I>(network).Delta());
-
-    BackwardTail<I + 1, DataType, Tp...>(error, network);
-  }
-
-  /**
-   * Iterate through all layer modules and update the the gradient using the
-   * layer defined optimizer.
-   */
-  template<
-      size_t I = 0,
-      size_t Max = std::tuple_size<LayerTypes>::value - 1,
-      typename... Tp
-  >
-  typename std::enable_if<I == Max, void>::type
-  UpdateGradients(std::tuple<Tp...>& /* unused */) { /* Nothing to do here */ }
-
-  template<
-      size_t I = 0,
-      size_t Max = std::tuple_size<LayerTypes>::value - 1,
-      typename... Tp
-  >
-  typename std::enable_if<I < Max, void>::type
-  UpdateGradients(std::tuple<Tp...>& network)
-  {
-    Update(std::get<I>(network), std::get<I>(network).OutputParameter(),
-           std::get<I + 1>(network).Delta());
-
-    UpdateGradients<I + 1, Max, Tp...>(network);
-  }
-
-  template<typename T, typename P, typename D>
-  typename std::enable_if<
-      HasGradientCheck<T, P&(T::*)()>::value, void>::type
-  Update(T& layer, P& /* unused */, D& delta)
-  {
-    layer.Gradient(layer.InputParameter(), delta, layer.Gradient());
-  }
-
-  template<typename T, typename P, typename D>
-  typename std::enable_if<
-      !HasGradientCheck<T, P&(T::*)()>::value, void>::type
-  Update(T& /* unused */, P& /* unused */, D& /* unused */)
-  {
-    /* Nothing to do here */
-  }
-
-  /*
-   * Calculate and store the output activation.
-   */
-  template<typename DataType, typename... Tp>
-  void OutputPrediction(DataType& output, std::tuple<Tp...>& network)
-  {
-    // Calculate and store the output prediction.
-    outputLayer.OutputClass(std::get<sizeof...(Tp) - 1>(
-        network).OutputParameter(), output);
-  }
-
-  //! Instantiated feedforward network.
-  LayerTypes network;
-
-  //! The output layer used to evaluate the network
-  OutputLayerType outputLayer;
-
-  //! Performance strategy used to calculate the error.
-  PerformanceFunction performanceFunc;
-
-  //! The current evaluation mode (training or testing).
-  bool deterministic;
-
-  //! Matrix of (trained) parameters.
-  arma::mat parameter;
-
-  //! The matrix of data points (predictors).
-  arma::mat predictors;
-
-  //! The matrix of responses to the input data points.
-  arma::mat responses;
-
-  //! The number of separable functions (the number of predictor points).
-  size_t numFunctions;
-
-  //! Locally stored backward error.
-  arma::mat error;
-}; // class FFN
-
-} // namespace ann
-} // namespace mlpack
-
-// Include implementation.
-#include "ffn_impl.hpp"
-
-#endif
diff --git a/src/mlpack/methods/ann/init_rules/kathirvalavakumar_subavathi_init.hpp b/src/mlpack/methods/ann/init_rules/kathirvalavakumar_subavathi_init.hpp
deleted file mode 100644
index 82e36ca..0000000
--- a/src/mlpack/methods/ann/init_rules/kathirvalavakumar_subavathi_init.hpp
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * @file kathirvalavakumar_subavathi_init.hpp
- * @author Marcus Edel
- *
- * Definition and implementation of the initialization method by T.
- * Kathirvalavakumar and S. Subavathi. This initialization rule is based on
- * sensitivity analysis using cauchy’s inequality.
- *
- * For more information, see the following paper.
- *
- * @code
- * @inproceedings{KathirvalavakumarJILSA2011,
- *   title={A New Weight Initialization Method Using Cauchy’s Inequality Based
- *   on Sensitivity Analysis},
- *   author={T. Kathirvalavakumar and S. Subavathi},
- *   booktitle={Journal of Intelligent Learning Systems and Applications,
- *   Vol. 3 No. 4},
- *   year={2011}
- * }
- * @endcode
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_INIT_RULES_KATHIRVALAVAKUMAR_SUBAVATHI_INIT_HPP
-#define MLPACK_METHODS_ANN_INIT_RULES_KATHIRVALAVAKUMAR_SUBAVATHI_INIT_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/activation_functions/logistic_function.hpp>
-#include <mlpack/methods/ann/init_rules/random_init.hpp>
-#include <iostream>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * This class is used to initialize the weight matrix with the method proposed
- * by T. Kathirvalavakumar and S. Subavathi. The method is based on sensitivity
- * analysis using using cauchy’s inequality. The method is defined by
- *
- * @f{eqnarray*}{
- * \overline{s} &=& f^{-1}(\overline{t}) \\
- * \Theta^{1}_{p} &\le& \overline{s}
- *     \sqrt{\frac{3}{I \sum_{i = 1}^{I} (x_{ip}^2)}} \\
- * \Theta^1 &=& min(\Theta_{p}^{1}); p=1,2,..,P \\
- * -\Theta^{1} \le w_{i}^{1} &\le& \Theta^{1}
- * @f}
- *
- * where I is the number of inputs including the bias, p refers the pattern
- * considered in training, f is the transfer function and \={s} is the active
- * region in which the derivative of the activation function is greater than 4%
- * of the maximum derivatives.
- */
-class KathirvalavakumarSubavathiInitialization
-{
- public:
-  /**
-   * Initialize the random initialization rule with the given values.
-   *
-   * @param data The input patterns.
-   * @param s Parameter that defines the active region.
-   */
-  template<typename eT>
-  KathirvalavakumarSubavathiInitialization(const arma::Mat<eT>& data,
-                                           const double s) : s(s)
-  {
-    dataSum = arma::sum(data % data);
-  }
-
-  /**
-   * Initialize the elements of the specified weight matrix with the
-   * Kathirvalavakumar-Subavathi method.
-   *
-   * @param W Weight matrix to initialize.
-   * @param rows Number of rows.
-   * @param cols Number of columns.
-   */
-  template<typename eT>
-  void Initialize(arma::Mat<eT>& W, const size_t rows, const size_t cols)
-  {
-    arma::Row<eT> b = s * arma::sqrt(3 / (rows * dataSum));
-    const double theta = b.min();
-    RandomInitialization randomInit(-theta, theta);
-    randomInit.Initialize(W, rows, cols);
-  }
-
-  /**
-   * Initialize the elements of the specified weight 3rd order tensor with the
-   * Kathirvalavakumar-Subavathi method.
-   *
-   * @param W Weight matrix to initialize.
-   * @param rows Number of rows.
-   * @param cols Number of columns.
-   */
-  template<typename eT>
-  void Initialize(arma::Cube<eT>& W,
-                  const size_t rows,
-                  const size_t cols,
-                  const size_t slices)
-  {
-    W = arma::Cube<eT>(rows, cols, slices);
-
-    for (size_t i = 0; i < slices; i++)
-      Initialize(W.slice(i), rows, cols);
-  }
-
- private:
-  //! Parameter that defines the sum of elements in each column.
-  arma::rowvec dataSum;
-
-  //! Parameter that defines the active region.
-  const double s;
-}; // class KathirvalavakumarSubavathiInitialization
-
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/init_rules/nguyen_widrow_init.hpp b/src/mlpack/methods/ann/init_rules/nguyen_widrow_init.hpp
deleted file mode 100644
index 689a2ff..0000000
--- a/src/mlpack/methods/ann/init_rules/nguyen_widrow_init.hpp
+++ /dev/null
@@ -1,117 +0,0 @@
-/**
- * @file nguyen_widrow_init.hpp
- * @author Marcus Edel
- *
- * Definition and implementation of the Nguyen-Widrow method. This
- * initialization rule initialize the weights so that the active regions of the
- * neurons are approximately evenly distributed over the input space.
- *
- * For more information, see the following paper.
- *
- * @code
- * @inproceedings{NguyenIJCNN1990,
- *   title={Improving the learning speed of 2-layer neural networks by choosing
- *   initial values of the adaptive weights},
- *   booktitle={Neural Networks, 1990., 1990 IJCNN International Joint
- *   Conference on},
- *   year={1990}
- * }
- * @endcode
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_INIT_RULES_NGUYEN_WIDROW_INIT_HPP
-#define MLPACK_METHODS_ANN_INIT_RULES_NGUYEN_WIDROW_INIT_HPP
-
-#include <mlpack/prereqs.hpp>
-
-#include "random_init.hpp"
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * This class is used to initialize the weight matrix with the Nguyen-Widrow
- * method. The method is defined by
- *
- * @f{eqnarray*}{
- * \gamma &\le& w_i \le \gamma \\
- * \beta &=& 0.7H^{\frac{1}{I}} \\
- * n &=& \sqrt{\sum_{i=0}{I}w_{i}^{2}} \\
- * w_i &=& \frac{\beta w_i}{n}
- * @f}
- *
- * Where H is the number of neurons in the outgoing layer, I represents the
- * number of neurons in the ingoing layer and gamma defines the random interval
- * that is used to initialize the weights with a random value in a specific
- * range.
- */
-class NguyenWidrowInitialization
-{
- public:
-  /**
-   * Initialize the random initialization rule with the given lower bound and
-   * upper bound.
-   *
-   * @param lowerBound The number used as lower bound.
-   * @param upperBound The number used as upper bound.
-   */
-  NguyenWidrowInitialization(const double lowerBound = -0.5,
-                             const double upperBound = 0.5) :
-      lowerBound(lowerBound), upperBound(upperBound) { }
-
-  /**
-   * Initialize the elements of the specified weight matrix with the
-   * Nguyen-Widrow method.
-   *
-   * @param W Weight matrix to initialize.
-   * @param rows Number of rows.
-   * @param cols Number of columns.
-   */
-  template<typename eT>
-  void Initialize(arma::Mat<eT>& W, const size_t rows, const size_t cols)
-  {
-    RandomInitialization randomInit(lowerBound, upperBound);
-    randomInit.Initialize(W, rows, cols);
-
-    double beta = 0.7 * std::pow(cols, 1 / rows);
-    W *= (beta / arma::norm(W));
-  }
-
-  /**
-   * Initialize the elements of the specified weight 3rd order tensor with the
-   * Nguyen-Widrow method.
-   *
-   * @param W Weight matrix to initialize.
-   * @param rows Number of rows.
-   * @param cols Number of columns.
-   * @param slices Number of slices.
-   */
-  template<typename eT>
-  void Initialize(arma::Cube<eT>& W,
-                  const size_t rows,
-                  const size_t cols,
-                  const size_t slices)
-  {
-    W = arma::Cube<eT>(rows, cols, slices);
-
-    for (size_t i = 0; i < slices; i++)
-      Initialize(W.slice(i), rows, cols);
-  }
-
- private:
-  //! The number used as lower bound.
-  const double lowerBound;
-
-  //! The number used as upper bound.
-  const double upperBound;
-}; // class NguyenWidrowInitialization
-
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/init_rules/oivs_init.hpp b/src/mlpack/methods/ann/init_rules/oivs_init.hpp
deleted file mode 100644
index 5b68753..0000000
--- a/src/mlpack/methods/ann/init_rules/oivs_init.hpp
+++ /dev/null
@@ -1,130 +0,0 @@
-/**
- * @file oivs_init.hpp
- * @author Marcus Edel
- *
- * Definition and implementation of the Optimal Initial Value Setting method
- * (OIVS). This initialization rule is based on geometrical considerations as
- * described by H. Shimodaira.
- *
- * For more information, see the following paper.
- *
- * @code
- * @inproceedings{ShimodairaICTAI1994,
- *   title={A weight value initialization method for improving learning
- *   performance of the backpropagation algorithm in neural networks},
- *   author={Shimodaira, H.},
- *   booktitle={Tools with Artificial Intelligence, 1994. Proceedings.,
- *   Sixth International Conference on},
- *   year={1994}
- * }
- * @endcode
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_INIT_RULES_OIVS_INIT_HPP
-#define MLPACK_METHODS_ANN_INIT_RULES_OIVS_INIT_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/activation_functions/logistic_function.hpp>
-
-#include "random_init.hpp"
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * This class is used to initialize the weight matrix with the oivs method. The
- * method is based on the equations representing the characteristics of the
- * information transformation mechanism of a node. The method is defined by
- *
- * @f{eqnarray*}{
- * b &=& |F^{-1}(1 - \epsilon) - f^{-1}(\epsilon)| \\
- * \hat{w} &=& \frac{b}{k \cdot n} \\
- * \gamma &\le& a_i \le \gamma \\
- * w_i &=& \hat{w} \cdot \sqrt{a_i + 1}
- * @f}
- *
- * Where f is the transfer function epsilon, k custom parameters, n the number of
- * neurons in the outgoing layer and gamma a parameter that defines the random
- * interval.
- *
- * @tparam ActivationFunction The activation function used for the oivs method.
- */
-template<
-    class ActivationFunction = LogisticFunction
->
-class OivsInitialization
-{
- public:
-  /**
-   * Initialize the random initialization rule with the given values.
-   *
-   * @param epsilon Parameter to control the activation region.
-   * @param k Parameter to control the activation region width.
-   * @param gamma Parameter to define the uniform random range.
-   */
-  OivsInitialization(const double epsilon = 0.1,
-                     const int k = 5,
-                     const double gamma = 0.9) :
-      k(k), gamma(gamma),
-      b(std::abs(ActivationFunction::inv(1 - epsilon) -
-                 ActivationFunction::inv(epsilon)))
-  {
-  }
-
-  /**
-   * Initialize the elements of the specified weight matrix with the oivs method.
-   *
-   * @param W Weight matrix to initialize.
-   * @param rows Number of rows.
-   * @param cols Number of columns.
-   */
-  template<typename eT>
-  void Initialize(arma::Mat<eT>& W, const size_t rows, const size_t cols)
-  {
-    RandomInitialization randomInit(-gamma, gamma);
-    randomInit.Initialize(W, rows, cols);
-
-    W = (b / (k  * rows)) * arma::sqrt(W + 1);
-  }
-
-  /**
-   * Initialize the elements of the specified weight 3rd order tensor with the
-   * oivs method.
-   *
-   * @param W 3rd order tensor to initialize.
-   * @param rows Number of rows.
-   * @param cols Number of columns.
-   * @param slices Number of slices.
-   */
-  template<typename eT>
-  void Initialize(arma::Cube<eT>& W,
-                  const size_t rows,
-                  const size_t cols,
-                  const size_t slices)
-  {
-    W = arma::Cube<eT>(rows, cols, slices);
-
-    for (size_t i = 0; i < slices; i++)
-      Initialize(W.slice(i), rows, cols);
-  }
-
- private:
-  //! Parameter to control the activation region width.
-  const int k;
-
-  //! Parameter to define the uniform random range.
-  const double gamma;
-
-  //! Parameter to control the activation region.
-  const double b;
-}; // class OivsInitialization
-
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/init_rules/orthogonal_init.hpp b/src/mlpack/methods/ann/init_rules/orthogonal_init.hpp
deleted file mode 100644
index f7d2954..0000000
--- a/src/mlpack/methods/ann/init_rules/orthogonal_init.hpp
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * @file orthogonal_init.hpp
- * @author Marcus Edel
- *
- * Definition and implementation of the orthogonal matrix initialization method.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_INIT_RULES_ORTHOGONAL_INIT_HPP
-#define MLPACK_METHODS_ANN_INIT_RULES_ORTHOGONAL_INIT_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * This class is used to initialize the weight matrix with the orthogonal
- * matrix initialization
- */
-class OrthogonalInitialization
-{
- public:
-  /**
-   * Initialize the orthogonal matrix initialization rule with the given gain.
-   *
-   * @param gain The gain value.
-   */
-  OrthogonalInitialization(const double gain = 1.0) : gain(gain) { }
-
-  /**
-   * Initialize the elements of the specified weight matrix with the orthogonal
-   * matrix initialization method.
-   *
-   * @param W Weight matrix to initialize.
-   * @param rows Number of rows.
-   * @param cols Number of columns.
-   */
-  template<typename eT>
-  void Initialize(arma::Mat<eT>& W, const size_t rows, const size_t cols)
-  {
-    arma::Mat<eT> V;
-    arma::Col<eT> s;
-
-    arma::svd_econ(W, s, V, arma::randu<arma::Mat<eT> >(rows, cols));
-    W *= gain;
-  }
-
-  /**
-   * Initialize the elements of the specified weight 3rd order tensor with the
-   * orthogonal matrix initialization method.
-   *
-   * @param W Weight matrix to initialize.
-   * @param rows Number of rows.
-   * @param cols Number of columns.
-   * @param slices Number of slices.
-   */
-  template<typename eT>
-  void Initialize(arma::Cube<eT>& W,
-                  const size_t rows,
-                  const size_t cols,
-                  const size_t slices)
-  {
-    W = arma::Cube<eT>(rows, cols, slices);
-
-    for (size_t i = 0; i < slices; i++)
-      Initialize(W.slice(i), rows, cols);
-  }
-
- private:
-  //! The number used as gain.
-  const double gain;
-}; // class OrthogonalInitialization
-
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/init_rules/random_init.hpp b/src/mlpack/methods/ann/init_rules/random_init.hpp
deleted file mode 100644
index 5207a97..0000000
--- a/src/mlpack/methods/ann/init_rules/random_init.hpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * @file random_init.hpp
- * @author Marcus Edel
- *
- * Intialization rule for the neural networks. This simple initialization is
- * performed by assigning a random matrix to the weight matrix.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_INIT_RULES_RANDOM_INIT_HPP
-#define MLPACK_METHODS_ANN_INIT_RULES_RANDOM_INIT_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * This class is used to initialize randomly the weight matrix.
- */
-class RandomInitialization
-{
- public:
-  /**
-   * Initialize the random initialization rule with the given lower bound and
-   * upper bound.
-   *
-   * @param lowerBound The number used as lower bound.
-   * @param upperBound The number used as upper bound.
-   */
-  RandomInitialization(const double lowerBound = -1,
-                       const double upperBound = 1) :
-      lowerBound(lowerBound), upperBound(upperBound) { }
-
-  /**
-   * Initialize the random initialization rule with the given bound.
-   * Using the negative of the bound as lower bound and the positive bound as
-   * upper bound.
-   *
-   * @param bound The number used as lower bound
-   */
-  RandomInitialization(const double bound) :
-      lowerBound(-std::abs(bound)), upperBound(std::abs(bound)) { }
-
-  /**
-   * Initialize randomly the elements of the specified weight matrix.
-   *
-   * @param W Weight matrix to initialize.
-   * @param rows Number of rows.
-   * @param cols Number of columns.
-   */
-  template<typename eT>
-  void Initialize(arma::Mat<eT>& W, const size_t rows, const size_t cols)
-  {
-    W = lowerBound + arma::randu<arma::Mat<eT>>(rows, cols) *
-        (upperBound - lowerBound);
-  }
-
-  /**
-   * Initialize randomly the elements of the specified weight 3rd order tensor.
-   *
-   * @param W Weight matrix to initialize.
-   * @param rows Number of rows.
-   * @param cols Number of columns.
-   */
-  template<typename eT>
-  void Initialize(arma::Cube<eT>& W,
-                  const size_t rows,
-                  const size_t cols,
-                  const size_t slices)
-  {
-    W = arma::Cube<eT>(rows, cols, slices);
-
-    for (size_t i = 0; i < slices; i++)
-      Initialize(W.slice(i), rows, cols);
-  }
-
- private:
-  //! The number used as lower bound.
-  const double lowerBound;
-
-  //! The number used as upper bound.
-  const double upperBound;
-}; // class RandomInitialization
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/init_rules/zero_init.hpp b/src/mlpack/methods/ann/init_rules/zero_init.hpp
deleted file mode 100644
index f6aec7b..0000000
--- a/src/mlpack/methods/ann/init_rules/zero_init.hpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * @file zero_init.hpp
- * @author Marcus Edel
- *
- * Intialization rule for the neural networks. This simple initialization is
- * performed by assigning a zero matrix to the weight matrix.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_INIT_RULES_ZERO_INIT_HPP
-#define MLPACK_METHODS_ANN_INIT_RULES_ZERO_INIT_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * This class is used to initialize randomly the weight matrix.
- */
-class ZeroInitialization
-{
- public:
-  /**
-   *  Create the ZeroInitialization object.
-   */
-  ZeroInitialization() { /* Nothing to do here */ }
-
-  /**
-   * Initialize the elements of the specified weight matrix.
-   *
-   * @param W Weight matrix to initialize.
-   * @param rows Number of rows.
-   * @param cols Number of columns.
-   */
-  template<typename eT>
-  void Initialize(arma::Mat<eT>& W, const size_t rows, const size_t cols)
-  {
-    W = arma::zeros<arma::Mat<eT> >(rows, cols);
-  }
-
-  /**
-   * Initialize the elements of the specified weight (3rd order tensor).
-   *
-   * @param W Weight matrix to initialize.
-   * @param rows Number of rows.
-   * @param cols Number of columns.
-   */
-  template<typename eT>
-  void Initialize(arma::Cube<eT>& W,
-                  const size_t rows,
-                  const size_t cols,
-                  const size_t slices)
-  {
-    W = arma::zeros<arma::Cube<eT> >(rows, cols, slices);
-  }
-}; // class ZeroInitialization
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/base_layer.hpp b/src/mlpack/methods/ann/layer/base_layer.hpp
deleted file mode 100644
index 9af543b..0000000
--- a/src/mlpack/methods/ann/layer/base_layer.hpp
+++ /dev/null
@@ -1,223 +0,0 @@
-/**
- * @file base_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the BaseLayer class, which attaches various functions to the
- * embedding layer.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_BASE_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_BASE_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/activation_functions/logistic_function.hpp>
-#include <mlpack/methods/ann/activation_functions/identity_function.hpp>
-#include <mlpack/methods/ann/activation_functions/rectifier_function.hpp>
-#include <mlpack/methods/ann/activation_functions/tanh_function.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of the base layer. The base layer works as a metaclass which
- * attaches various functions to the embedding layer.
- *
- * A few convenience typedefs are given:
- *
- *  - SigmoidLayer
- *  - IdentityLayer
- *  - ReLULayer
- *  - TanHLayer
- *  - BaseLayer2D
- *
- * @tparam ActivationFunction Activation function used for the embedding layer.
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    class ActivationFunction = LogisticFunction,
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class BaseLayer
-{
- public:
-  /**
-   * Create the BaseLayer object.
-   */
-  BaseLayer()
-  {
-    // Nothing to do here.
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename InputType, typename OutputType>
-  void Forward(const InputType& input, OutputType& output)
-  {
-    ActivationFunction::fn(input, output);
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards through f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename DataType>
-  void Backward(const DataType& input,
-                const DataType& gy,
-                DataType& g)
-  {
-    DataType derivative;
-    ActivationFunction::deriv(input, derivative);
-    g = gy % derivative;
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards through f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename eT>
-  void Backward(const arma::Cube<eT>& input,
-                const arma::Mat<eT>& gy,
-                arma::Cube<eT>& g)
-  {
-    // Generate a cube using the backpropagated error matrix.
-    arma::Cube<eT> mappedError = arma::zeros<arma::cube>(input.n_rows,
-        input.n_cols, input.n_slices);
-
-    for (size_t s = 0, j = 0; s < mappedError.n_slices; s+= gy.n_cols, j++)
-    {
-      for (size_t i = 0; i < gy.n_cols; i++)
-      {
-        arma::Col<eT> temp = gy.col(i).subvec(
-            j * input.n_rows * input.n_cols,
-            (j + 1) * input.n_rows * input.n_cols - 1);
-
-        mappedError.slice(s + i) = arma::Mat<eT>(temp.memptr(),
-            input.n_rows, input.n_cols);
-      }
-    }
-
-    arma::Cube<eT> derivative;
-    ActivationFunction::deriv(input, derivative);
-    g = mappedError % derivative;
-  }
-
-  //! Get the input parameter.
-  InputDataType const& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType const& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType const& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& /* ar */, const unsigned int /* version */)
-  {
-    /* Nothing to do here */
-  }
-
- private:
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-}; // class BaseLayer
-
-// Convenience typedefs.
-
-/**
- * Standard Sigmoid-Layer using the logistic activation function.
- */
-template <
-    class ActivationFunction = LogisticFunction,
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-using SigmoidLayer = BaseLayer<
-    ActivationFunction, InputDataType, OutputDataType>;
-
-/**
- * Standard Identity-Layer using the identity activation function.
- */
-template <
-    class ActivationFunction = IdentityFunction,
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-using IdentityLayer = BaseLayer<
-    ActivationFunction, InputDataType, OutputDataType>;
-
-/**
- * Standard rectified linear unit non-linearity layer.
- */
-template <
-    class ActivationFunction = RectifierFunction,
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-using ReLULayer = BaseLayer<
-    ActivationFunction, InputDataType, OutputDataType>;
-
-/**
- * Standard hyperbolic tangent layer.
- */
-template <
-    class ActivationFunction = TanhFunction,
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-using TanHLayer = BaseLayer<
-    ActivationFunction, InputDataType, OutputDataType>;
-
-/**
- * Standard Base-Layer2D using the logistic activation function.
- */
-template <
-    class ActivationFunction = LogisticFunction,
-    typename InputDataType = arma::cube,
-    typename OutputDataType = arma::cube
->
-using BaseLayer2D = BaseLayer<
-    ActivationFunction, InputDataType, OutputDataType>;
-
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/bias_layer.hpp b/src/mlpack/methods/ann/layer/bias_layer.hpp
deleted file mode 100644
index b40bb56..0000000
--- a/src/mlpack/methods/ann/layer/bias_layer.hpp
+++ /dev/null
@@ -1,208 +0,0 @@
-/**
- * @file bias_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the BiasLayer class.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_BIAS_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_BIAS_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * An implementation of a standard bias layer. The BiasLayer class represents a
- * single layer of a neural network.
- *
- * A convenient typedef is given:
- *
- *  - 2DBiasLayer
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class BiasLayer
-{
- public:
-  /**
-   * Create the BiasLayer object using the specified number of units and bias
-   * parameter.
-   *
-   * @param outSize The number of output units.
-   * @param bias The bias value.
-   */
-  BiasLayer(const size_t outSize, const double bias = 1) :
-      outSize(outSize),
-      bias(bias)
-  {
-    weights.set_size(outSize, 1);
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Mat<eT>& input, arma::Mat<eT>& output)
-  {
-    output = input + (weights * bias);
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Cube<eT>& input, arma::Cube<eT>& output)
-  {
-    output = input;
-    for (size_t s = 0; s < input.n_slices; s++)
-    {
-      output.slice(s) += weights(s) * bias;
-    }
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards trough f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename DataType, typename ErrorType>
-  void Backward(const DataType& /* unused */,
-                const ErrorType& gy,
-                ErrorType& g)
-  {
-    g = gy;
-  }
-
-  /*
-   * Calculate the gradient using the output delta and the bias.
-   *
-   * @param input The propagated input.
-   * @param error The calculated error.
-   * @param gradient The calculated gradient.
-   */
-  template<typename eT, typename ErrorType, typename GradientType>
-  void Gradient(const arma::Mat<eT>& /* input */,
-                const ErrorType& error,
-                GradientType& gradient)
-  {
-    gradient = error * bias;
-  }
-
-  //! Get the weights.
-  InputDataType const& Weights() const { return weights; }
-  //! Modify the weights.
-  InputDataType& Weights() { return weights; }
-
-  //! Get the input parameter.
-  InputDataType const& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType const& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType const& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  //! Get the gradient.
-  InputDataType const& Gradient() const { return gradient; }
-  //! Modify the gradient.
-  InputDataType& Gradient() { return gradient; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-    ar & data::CreateNVP(weights, "weights");
-    ar & data::CreateNVP(bias, "bias");
-  }
-
- private:
-  //! Locally-stored number of output units.
-  size_t outSize;
-
-  //! Locally-stored bias value.
-  double bias;
-
-  //! Locally-stored weight object.
-  InputDataType weights;
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored gradient object.
-  InputDataType gradient;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-}; // class BiasLayer
-
-//! Layer traits for the bias layer.
-template<typename InputDataType, typename OutputDataType>
-class LayerTraits<BiasLayer<InputDataType, OutputDataType> >
-{
- public:
-  static const bool IsBinary = false;
-  static const bool IsOutputLayer = false;
-  static const bool IsBiasLayer = true;
-  static const bool IsLSTMLayer = false;
-  static const bool IsConnection = true;
-};
-
-/**
- * Standard 2D-Bias-Layer.
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::cube
->
-using BiasLayer2D = BiasLayer<InputDataType, OutputDataType>;
-
-/**
- * Standard 2D-Bias-Layer.
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-using AdditionLayer = BiasLayer<InputDataType, OutputDataType>;
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/binary_classification_layer.hpp b/src/mlpack/methods/ann/layer/binary_classification_layer.hpp
deleted file mode 100644
index 90975b3..0000000
--- a/src/mlpack/methods/ann/layer/binary_classification_layer.hpp
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * @file binary_classification_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the BinaryClassificationLayer class, which implements a
- * binary class classification layer that can be used as output layer.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_BINARY_CLASSIFICATION_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_BINARY_CLASSIFICATION_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * An implementation of a binary classification layer that can be used as
- * output layer.
- */
-class BinaryClassificationLayer
-{
- public:
-  /**
-   * Create the BinaryClassificationLayer object.
-   *
-   * @param confidence The confidence used for the output class transformation.
-   */
-  BinaryClassificationLayer(const double confidence = 0.5) :
-      confidence(confidence)
-  {
-    // Nothing to do here.
-  }
-
-  /*
-   * Calculate the error using the specified input activation and the target.
-   * The error is stored into the given error parameter.
-   *
-   * @param inputActivations Input data used for evaluating the network.
-   * @param target Target data used for evaluating the network.
-   * @param error The calculated error with respect to the input activation and
-   * the given target.
-   */
-  template<typename DataType>
-  void CalculateError(const DataType& inputActivations,
-                      const DataType& target,
-                      DataType& error)
-  {
-    error = inputActivations - target;
-  }
-
-  /*
-   * Calculate the output class using the specified input activation.
-   *
-   * @param inputActivations Input data used to calculate the output class.
-   * @param output Output class of the input activation.
-   */
-  template<typename DataType>
-  void OutputClass(const DataType& inputActivations, DataType& output)
-  {
-    output = inputActivations;
-
-    for (size_t i = 0; i < output.n_elem; i++)
-      output(i) = output(i) > confidence ? 1 : 0;
-  }
-
-  //! Get the confidence parameter.
-  double const& Confidence() const { return confidence; }
-  //! Modify the confidence parameter.
-  double& Confidence() { return confidence; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-    ar & data::CreateNVP(confidence, "confidence");
-  }
-
- private:
-   double confidence;
-
-}; // class BinaryClassificationLayer
-
-//! Layer traits for the binary class classification layer.
-template <>
-class LayerTraits<BinaryClassificationLayer>
-{
- public:
-  static const bool IsBinary = true;
-  static const bool IsOutputLayer = true;
-  static const bool IsBiasLayer = false;
-  static const bool IsLSTMLayer = false;
-  static const bool IsConnection = false;
-};
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/constant_layer.hpp b/src/mlpack/methods/ann/layer/constant_layer.hpp
deleted file mode 100644
index 716c0ab..0000000
--- a/src/mlpack/methods/ann/layer/constant_layer.hpp
+++ /dev/null
@@ -1,121 +0,0 @@
-/**
- * @file constant_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the ConstantLayer class, which outputs a constant value given
- * any input.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_CONSTANT_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_CONSTANT_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of the constant layer. The constant layer outputs a given
- * constant value given any input value.
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class ConstantLayer
-{
- public:
-  /**
-   * Create the ConstantLayer object that outputs a given constant scalar value
-   * given any input value.
-   *
-   * @param outSize The number of output units.
-   * @param scalar The constant value used to create the constant output.
-   */
-  ConstantLayer(const size_t outSize, const double scalar)
-  {
-    constantOutput = OutputDataType(outSize, 1);
-    constantOutput.fill(scalar);
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network. The forward pass fills the
-   * output with the specified constant parameter.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Mat<eT>& /* input */, arma::Mat<eT>& output)
-  {
-    output = constantOutput;
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network. The backward pass of the
-   * constant layer is returns always a zero output error matrix.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename eT>
-  void Backward(const arma::Mat<eT>& /* input */,
-                const arma::Mat<eT>& /* gy */,
-                arma::Mat<eT>& g)
-  {
-    g = arma::zeros<arma::Mat<eT> >(inputParameter.n_rows,
-        inputParameter.n_cols);
-  }
-
-  //! Get the input parameter.
-  InputDataType& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-    ar & data::CreateNVP(constantOutput, "constantOutput");
-  }
-
- private:
-  //! Locally-stored constant output matrix.
-  OutputDataType constantOutput;
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-}; // class ConstantLayer
-
-}; // namespace ann
-}; // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/conv_layer.hpp b/src/mlpack/methods/ann/layer/conv_layer.hpp
deleted file mode 100644
index 3dafb6d..0000000
--- a/src/mlpack/methods/ann/layer/conv_layer.hpp
+++ /dev/null
@@ -1,324 +0,0 @@
-/**
- * @file conv_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the ConvLayer class.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_CONV_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_CONV_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-#include <mlpack/methods/ann/convolution_rules/border_modes.hpp>
-#include <mlpack/methods/ann/convolution_rules/naive_convolution.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of the ConvLayer class. The ConvLayer class represents a
- * single layer of a neural network.
- *
- * @tparam ForwardConvolutionRule Convolution to perform forward process.
- * @tparam BackwardConvolutionRule Convolution to perform backward process.
- * @tparam GradientConvolutionRule Convolution to calculate gradient.
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename ForwardConvolutionRule = NaiveConvolution<ValidConvolution>,
-    typename BackwardConvolutionRule = NaiveConvolution<FullConvolution>,
-    typename GradientConvolutionRule = NaiveConvolution<ValidConvolution>,
-    typename InputDataType = arma::cube,
-    typename OutputDataType = arma::cube
->
-class ConvLayer
-{
- public:
-  /**
-   * Create the ConvLayer object using the specified number of input maps,
-   * output maps, filter size, stride and padding parameter.
-   *
-   * @param inMaps The number of input maps.
-   * @param outMaps The number of output maps.
-   * @param wfilter Width of the filter/kernel.
-   * @param wfilter Height of the filter/kernel.
-   * @param xStride Stride of filter application in the x direction.
-   * @param yStride Stride of filter application in the y direction.
-   * @param wPad Spatial padding width of the input.
-   * @param hPad Spatial padding height of the input.
-   */
-  ConvLayer(const size_t inMaps,
-            const size_t outMaps,
-            const size_t wfilter,
-            const size_t hfilter,
-            const size_t xStride = 1,
-            const size_t yStride = 1,
-            const size_t wPad = 0,
-            const size_t hPad = 0) :
-      wfilter(wfilter),
-      hfilter(hfilter),
-      inMaps(inMaps),
-      outMaps(outMaps),
-      xStride(xStride),
-      yStride(yStride),
-      wPad(wPad),
-      hPad(hPad)
-  {
-    weights.set_size(wfilter, hfilter, inMaps * outMaps);
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Cube<eT>& input, arma::Cube<eT>& output)
-  {
-    const size_t wConv = ConvOutSize(input.n_rows, wfilter, xStride, wPad);
-    const size_t hConv = ConvOutSize(input.n_cols, hfilter, yStride, hPad);
-
-    output = arma::zeros<arma::Cube<eT> >(wConv, hConv, outMaps);
-    for (size_t outMap = 0, outMapIdx = 0; outMap < outMaps; outMap++)
-    {
-      for (size_t inMap = 0; inMap < inMaps; inMap++, outMapIdx++)
-      {
-        arma::Mat<eT> convOutput;
-        ForwardConvolutionRule::Convolution(input.slice(inMap),
-            weights.slice(outMap), convOutput);
-
-        output.slice(outMap) += convOutput;
-      }
-    }
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards through f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename eT>
-  void Backward(const arma::Cube<eT>& /* unused */,
-                const arma::Cube<eT>& gy,
-                arma::Cube<eT>& g)
-  {
-    g = arma::zeros<arma::Cube<eT> >(inputParameter.n_rows,
-                                     inputParameter.n_cols,
-                                     inputParameter.n_slices);
-
-    for (size_t outMap = 0, outMapIdx = 0; outMap < inMaps; outMap++)
-    {
-      for (size_t inMap = 0; inMap < outMaps; inMap++, outMapIdx++)
-      {
-        arma::Mat<eT> rotatedFilter;
-        Rotate180(weights.slice(outMap * outMaps + inMap), rotatedFilter);
-
-        arma::Mat<eT> output;
-        BackwardConvolutionRule::Convolution(gy.slice(inMap), rotatedFilter,
-            output);
-
-        g.slice(outMap) += output;
-      }
-    }
-  }
-
-  /*
-   * Calculate the gradient using the output delta and the input activation.
-   *
-   * @param input The input parameter used for calculating the gradient.
-   * @param d The calculated error.
-   * @param g The calculated gradient.
-   */
-  template<typename InputType, typename eT>
-  void Gradient(const InputType& input,
-                const arma::Cube<eT>& d,
-                arma::Cube<eT>& g)
-  {
-    g = arma::zeros<arma::Cube<eT> >(weights.n_rows, weights.n_cols,
-        weights.n_slices);
-
-    for (size_t outMap = 0; outMap < outMaps; outMap++)
-    {
-      for (size_t inMap = 0, s = outMap; inMap < inMaps; inMap++, s += outMaps)
-      {
-        arma::Cube<eT> inputSlices = input.slices(inMap, inMap);
-        arma::Cube<eT> deltaSlices = d.slices(outMap, outMap);
-
-        arma::Cube<eT> output;
-        GradientConvolutionRule::Convolution(inputSlices, deltaSlices, output);
-
-        for (size_t i = 0; i < output.n_slices; i++)
-          g.slice(s) += output.slice(i);
-      }
-    }
-  }
-
-  //! Get the weights.
-  OutputDataType const& Weights() const { return weights; }
-  //! Modify the weights.
-  OutputDataType& Weights() { return weights; }
-
-  //! Get the input parameter.
-  InputDataType const& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType const& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType const& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  //! Get the gradient.
-  OutputDataType const& Gradient() const { return gradient; }
-  //! Modify the gradient.
-  OutputDataType& Gradient() { return gradient; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-    ar & data::CreateNVP(weights, "weights");
-    ar & data::CreateNVP(wfilter, "wfilter");
-    ar & data::CreateNVP(hfilter, "hfilter");
-    ar & data::CreateNVP(inMaps, "inMaps");
-    ar & data::CreateNVP(outMaps, "outMaps");
-    ar & data::CreateNVP(xStride, "xStride");
-    ar & data::CreateNVP(yStride, "yStride");
-    ar & data::CreateNVP(wPad, "wPad");
-    ar & data::CreateNVP(hPad, "hPad");
-  }
-
- private:
-  /*
-   * Rotates a 3rd-order tesor counterclockwise by 180 degrees.
-   *
-   * @param input The input data to be rotated.
-   * @param output The rotated output.
-   */
-  template<typename eT>
-  void Rotate180(const arma::Cube<eT>& input, arma::Cube<eT>& output)
-  {
-    output = arma::Cube<eT>(input.n_rows, input.n_cols, input.n_slices);
-
-    // * left-right flip, up-down flip */
-    for (size_t s = 0; s < output.n_slices; s++)
-      output.slice(s) = arma::fliplr(arma::flipud(input.slice(s)));
-  }
-
-  /*
-   * Rotates a dense matrix counterclockwise by 180 degrees.
-   *
-   * @param input The input data to be rotated.
-   * @param output The rotated output.
-   */
-  template<typename eT>
-  void Rotate180(const arma::Mat<eT>& input, arma::Mat<eT>& output)
-  {
-    // * left-right flip, up-down flip */
-    output = arma::fliplr(arma::flipud(input));
-  }
-
-  /*
-   * Return the convolution output size.
-   *
-   * @param size The size of the input (row or column).
-   * @param k The size of the filter (width or height).
-   * @param s The stride size (x or y direction).
-   * @param p The size of the padding (width or height).
-   * @return The convolution output size.
-   */
-  size_t ConvOutSize(const size_t size,
-                     const size_t k,
-                     const size_t s,
-                     const size_t p)
-  {
-    return std::floor(size + p * 2 - k) / s + 1;
-  }
-
-  //! Locally-stored filter/kernel width.
-  size_t wfilter;
-
-  //! Locally-stored filter/kernel height.
-  size_t hfilter;
-
-  //! Locally-stored number of input maps.
-  size_t inMaps;
-
-  //! Locally-stored number of output maps.
-  size_t outMaps;
-
-  //! Locally-stored stride of the filter in x-direction.
-  size_t xStride;
-
-  //! Locally-stored stride of the filter in y-direction.
-  size_t yStride;
-
-  //! Locally-stored padding width.
-  size_t wPad;
-
-  //! Locally-stored padding height.
-  size_t hPad;
-
-  //! Locally-stored weight object.
-  OutputDataType weights;
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored gradient object.
-  OutputDataType gradient;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-}; // class ConvLayer
-
-//! Layer traits for the convolution layer.
-template<
-    typename ForwardConvolutionRule,
-    typename BackwardConvolutionRule,
-    typename GradientConvolutionRule,
-    typename InputDataType,
-    typename OutputDataType
->
-class LayerTraits<ConvLayer<ForwardConvolutionRule,
-                            BackwardConvolutionRule,
-                            GradientConvolutionRule,
-                            InputDataType,
-                            OutputDataType> >
-{
- public:
-  static const bool IsBinary = false;
-  static const bool IsOutputLayer = false;
-  static const bool IsBiasLayer = false;
-  static const bool IsLSTMLayer = false;
-  static const bool IsConnection = true;
-};
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/dropconnect_layer.hpp b/src/mlpack/methods/ann/layer/dropconnect_layer.hpp
deleted file mode 100644
index ad0687f..0000000
--- a/src/mlpack/methods/ann/layer/dropconnect_layer.hpp
+++ /dev/null
@@ -1,361 +0,0 @@
-/**
- * @file dropconnect_layer.hpp
- * @author Palash Ahuja
- *
- * Definition of the DropConnectLayer class, which implements a regularizer
- * that randomly sets connections to zero. Preventing units from co-adapting.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_DROPCONNECT_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_DROPCONNECT_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-
-#include "empty_layer.hpp"
-#include <mlpack/methods/ann/network_util.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * The DropConnect layer is a regularizer that randomly with probability
- * ratio sets the connection values to zero and scales the remaining
- * elements by factor 1 /(1 - ratio). The output is scaled with 1 / (1 - p)
- * when deterministic is false. In the deterministic mode(during testing),
- * the layer just computes the output. The output is computed according
- * to the input layer. If no input layer is given, it will take a linear layer
- * as default.
- *
- * Note:
- * During training you should set deterministic to false and during testing
- * you should set deterministic to true.
- *
- *  For more information, see the following.
- *
- * @code
- * @inproceedings{WanICML2013,
- *   title={Regularization of Neural Networks using DropConnect},
- *   booktitle = {Proceedings of the 30th International Conference on Machine
- *                Learning(ICML - 13)},
- *   author = {Li Wan and Matthew Zeiler and Sixin Zhang and Yann L. Cun and
- *             Rob Fergus},
- *   year = {2013}
- * }
- * @endcode
- *
- * @tparam InputLayer Layer used instead of the internal linear layer.
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template<
-    typename InputLayer = EmptyLayer<arma::mat, arma::mat>,
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class DropConnectLayer
-{
- public:
- /**
-   * Creates the DropConnect Layer as a Linear Object that takes input size,
-   * output size and ratio as parameter.
-   *
-   * @param inSize The number of input units.
-   * @param outSize The number of output units.
-   * @param ratio The probability of setting a value to zero.
-   */
-  DropConnectLayer (const size_t inSize,
-                    const size_t outSize,
-                    const double ratio = 0.5) :
-      inSize(inSize),
-      outSize(outSize),
-      ratio(ratio),
-      scale(1.0 / (1 - ratio)),
-      uselayer(false)
-  {
-    weights.set_size(outSize, inSize);
-  }
-
-  /**
-   * Create the DropConnectLayer object using the specified ratio and rescale
-   * parameter. This takes the
-   *
-   * @param ratio The probability of setting a connection to zero.
-   * @param inputLayer the layer object that the dropconnect connection would take.
-   */
-  template<typename InputLayerType>
-  DropConnectLayer(InputLayerType &&inputLayer,
-                   const double ratio = 0.5) :
-      baseLayer(std::forward<InputLayerType>(inputLayer)),
-      ratio(ratio),
-      scale(1.0 / (1 - ratio)),
-      uselayer(true)
-  {
-    static_assert(std::is_same<typename std::decay<InputLayerType>::type,
-                  InputLayer>::value,
-                  "The type of the inputLayer must be InputLayerType");
-  }
-  /**
-  * Ordinary feed forward pass of the DropConnect layer.
-  *
-  * @param input Input data used for evaluating the specified function.
-  * @param output Resulting output activation.
-  */
-  template<typename eT>
-  void Forward(const arma::Mat<eT> &input, arma::Mat<eT> &output)
-  {
-    // The DropConnect mask will not be multiplied in the deterministic mode
-    // (during testing).
-    if (deterministic)
-    {
-      if (uselayer)
-      {
-        baseLayer.Forward(input, output);
-      }
-      else
-      {
-        output = weights * input;
-      }
-    }
-    else
-    {
-      if (uselayer)
-      {
-        // Scale with input / (1 - ratio) and set values to zero with
-        // probability ratio.
-        mask = arma::randu<arma::Mat<eT> >(baseLayer.Weights().n_rows,
-            baseLayer.Weights().n_cols);
-        mask.transform([&](double val) { return (val > ratio); });
-
-        // Save weights for denoising.
-        denoise = baseLayer.Weights();
-
-        baseLayer.Weights() = baseLayer.Weights() % mask;
-
-        baseLayer.Forward(input, output);
-      }
-      else
-      {
-        // Scale the input / ( 1 - ratio) and set values to zero with
-        // probability ratio.
-        mask = arma::randu<arma::Mat<eT> >(weights.n_rows, weights.n_cols);
-        mask.transform([&](double val) { return (val > ratio); });
-
-        // Save weights for denoising.
-        denoise = weights;
-
-        weights = weights % mask;
-        output = weights * input;
-      }
-
-      output = output * scale;
-    }
-  }
-
-  /**
-   * Ordinary feed backward pass of the DropConnect layer.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename DataType>
-  void Backward(const DataType& input, const DataType& gy, DataType& g)
-  {
-    if (uselayer)
-    {
-      baseLayer.Backward(input, gy, g);
-    }
-    else
-    {
-      g = weights.t() * gy;
-    }
-  }
-
-  /**
-   * Calculate the gradient using the output delta and the input activation.
-   *
-   * @param input The propagated input.
-   * @param d The calculated error.
-   * @param g The calculated gradient.
-   */
-  template<typename InputType, typename eT, typename GradientDataType>
-  void Gradient(const InputType& input,
-                const arma::Mat<eT>& d,
-                GradientDataType& g)
-  {
-    if (uselayer)
-    {
-      baseLayer.Gradient(input, d, g);
-
-      // Denoise the weights.
-      baseLayer.Weights() = denoise;
-    }
-    else
-    {
-      g = d * input.t();
-
-      // Denoise the weights.
-      weights = denoise;
-    }
-  }
-
-  //! Get the weights.
-  OutputDataType const& Weights() const
-  {
-    if (uselayer)
-      return baseLayer.Weights();
-
-    return weights;
-  }
-
-  //! Modify the weights.
-  OutputDataType& Weights()
-  {
-    if (uselayer)
-      return baseLayer.Weights();
-
-    return weights;
-  }
-
-  //! Get the input parameter.
-  InputDataType &InputParameter() const
-  {
-    if (uselayer)
-      return baseLayer.InputParameter();
-
-    return inputParameter;
-  }
-
-  //! Modify the input parameter.
-  InputDataType &InputParameter()
-  {
-    if (uselayer)
-      return baseLayer.InputParameter();
-
-    return inputParameter;
-  }
-
-  //! Get the output parameter.
-  OutputDataType &OutputParameter() const
-  {
-    if (uselayer)
-      return baseLayer.OutputParameter();
-
-    return outputParameter;
-  }
-
-  //! Modify the output parameter.
-  OutputDataType &OutputParameter()
-  {
-    if (uselayer)
-      return baseLayer.OutputParameter();
-
-    return outputParameter;
-  }
-
-  //! Get the delta.
-  OutputDataType const& Delta() const
-  {
-    if (uselayer)
-      return baseLayer.Delta();
-
-    return delta;
-  }
-
-  //! Modify the delta.
-  OutputDataType& Delta()
-  {
-    if (uselayer)
-      return baseLayer.Delta();
-
-    return delta;
-  }
-
-  //! Get the gradient.
-  OutputDataType const& Gradient() const
-  {
-    if (uselayer)
-      return baseLayer.Gradient();
-
-    return gradient;
-   }
-
-  //! Modify the gradient.
-  OutputDataType& Gradient()
-  {
-    if (uselayer)
-      return baseLayer.Gradient();
-
-    return gradient;
-  }
-
-  //! The value of the deterministic parameter.
-  bool Deterministic() const { return deterministic; }
-
-  //! Modify the value of the deterministic parameter.
-  bool &Deterministic() { return deterministic; }
-
-  //! The probability of setting a value to zero.
-  double Ratio() const { return ratio; }
-
-  //! Modify the probability of setting a value to zero.
-  void Ratio(const double r)
-  {
-    ratio = r;
-    scale = 1.0 / (1.0 - ratio);
-  }
-
-private:
-  //! Locally-stored layer object.
-  InputLayer baseLayer;
-
-  //! Locally stored number of input units.
-  size_t inSize;
-
-  //! Locally-stored number of output units.
-  size_t outSize;
-
-  //! The probability of setting a value to zero.
-  double ratio;
-
-  //! The scale fraction.
-  double scale;
-
-  //! If true the default layer is used otherwise a new layer will be created.
-  bool uselayer;
-
-  //! Locally-stored weight object.
-  OutputDataType weights;
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored gradient object.
-  OutputDataType gradient;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-
-  //! Locally-stored mast object.
-  OutputDataType mask;
-
-  //! If true dropout and scaling is disabled, see notes above.
-  bool deterministic;
-
-  //! Denoise mask for the weights.
-  OutputDataType denoise;
-}; // class DropConnectLayer.
-
-}  // namespace ann
-}  // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/dropout_layer.hpp b/src/mlpack/methods/ann/layer/dropout_layer.hpp
deleted file mode 100644
index 2596698..0000000
--- a/src/mlpack/methods/ann/layer/dropout_layer.hpp
+++ /dev/null
@@ -1,252 +0,0 @@
-/**
- * @file dropout_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the DropoutLayer class, which implements a regularizer that
- * randomly sets units to zero. Preventing units from co-adapting.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_DROPOUT_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_DROPOUT_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * The dropout layer is a regularizer that randomly with probability ratio
- * sets input values to zero and scales the remaining elements by factor 1 /
- * (1 - ratio). If rescale is true the input is scaled with 1 / (1-p) when
- * deterministic is false. In the deterministic mode (during testing), the layer
- * just scales the output.
- *
- * Note: During training you should set deterministic to false and during
- * testing you should set deterministic to true.
- *
- * For more information, see the following.
- *
- * @code
- * @article{Hinton2012,
- *   author  = {Geoffrey E. Hinton, Nitish Srivastava, Alex Krizhevsky,
- *              Ilya Sutskever, Ruslan Salakhutdinov},
- *   title   = {Improving neural networks by preventing co-adaptation of feature
- *              detectors},
- *   journal = {CoRR},
- *   volume  = {abs/1207.0580},
- *   year    = {2012},
- * }
- * @endcode
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class DropoutLayer
-{
- public:
-
-  /**
-   * Create the DropoutLayer object using the specified ratio and rescale
-   * parameter.
-   *
-   * @param ratio The probability of setting a value to zero.
-   * @param rescale If true the input is rescaled when deterministic is False.
-   */
-  DropoutLayer(const double ratio = 0.5,
-               const bool rescale = true) :
-      ratio(ratio),
-      scale(1.0 / (1.0 - ratio)),
-      rescale(rescale)
-  {
-    // Nothing to do here.
-  }
-
-  /**
-   * Ordinary feed forward pass of the dropout layer.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Mat<eT>& input, arma::Mat<eT>& output)
-  {
-    // The dropout mask will not be multiplied in the deterministic mode
-    // (during testing).
-    if (deterministic)
-    {
-      if (!rescale)
-      {
-        output = input;
-      }
-      else
-      {
-        output = input * scale;
-      }
-    }
-    else
-    {
-      // Scale with input / (1 - ratio) and set values to zero with probability
-      // ratio.
-      mask = arma::randu<arma::Mat<eT> >(input.n_rows, input.n_cols);
-      mask.transform( [&](double val) { return (val > ratio); } );
-      output = input % mask * scale;
-    }
-  }
-
-  /**
-   * Ordinary feed forward pass of the dropout layer.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Cube<eT>& input, arma::Cube<eT>& output)
-  {
-    // The dropout mask will not be multiplied in the deterministic mode
-    // (during testing).
-    if (deterministic)
-    {
-      if (!rescale)
-      {
-        output = input;
-      }
-      else
-      {
-        output = input * scale;
-      }
-    }
-    else
-    {
-      // Scale with input / (1 - ratio) and set values to zero with probability
-      // ratio.
-      mask = arma::randu<arma::Cube<eT> >(input.n_rows, input.n_cols,
-          input.n_slices);
-      mask.transform( [&](double val) { return (val > ratio); } );
-      output = input % mask * scale;
-    }
-  }
-
-  /**
-   * Ordinary feed backward pass of the dropout layer.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename DataType>
-  void Backward(const DataType& /* unused */,
-                const DataType& gy,
-                DataType& g)
-  {
-    g = gy % mask * scale;
-  }
-
-  //! Get the input parameter.
-  InputDataType const& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType const& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the detla.
-  OutputDataType const& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  //! The value of the deterministic parameter.
-  bool Deterministic() const { return deterministic; }
-  //! Modify the value of the deterministic parameter.
-  bool& Deterministic() { return deterministic; }
-
-  //! The probability of setting a value to zero.
-  double Ratio() const { return ratio; }
-
-  //! Modify the probability of setting a value to zero.
-  void Ratio(const double r)
-  {
-    ratio = r;
-    scale = 1.0 / (1.0 - ratio);
-  }
-
-  //! The value of the rescale parameter.
-  bool Rescale() const {return rescale; }
-  //! Modify the value of the rescale parameter.
-  bool& Rescale() {return rescale; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-    ar & data::CreateNVP(ratio, "ratio");
-    ar & data::CreateNVP(rescale, "rescale");
-  }
-
- private:
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-
-  //! Locally-stored mast object.
-  OutputDataType mask;
-
-  //! The probability of setting a value to zero.
-  double ratio;
-
-  //! The scale fraction.
-  double scale;
-
-  //! If true dropout and scaling is disabled, see notes above.
-  bool deterministic;
-
-  //! If true the input is rescaled when deterministic is False.
-  bool rescale;
-}; // class DropoutLayer
-
-//! Layer traits for the bias layer.
-template <
-  typename InputDataType,
-  typename OutputDataType
->
-class LayerTraits<DropoutLayer<InputDataType, OutputDataType> >
-{
- public:
-  static const bool IsBinary = false;
-  static const bool IsOutputLayer = false;
-  static const bool IsBiasLayer = false;
-  static const bool IsLSTMLayer = false;
-  static const bool IsConnection = true;
-};
-
-/**
- * Standard Dropout-Layer2D.
- */
-template <
-    typename InputDataType = arma::cube,
-    typename OutputDataType = arma::cube
->
-using DropoutLayer2D = DropoutLayer<InputDataType, OutputDataType>;
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/glimpse_layer.hpp b/src/mlpack/methods/ann/layer/glimpse_layer.hpp
deleted file mode 100644
index 64f04d5..0000000
--- a/src/mlpack/methods/ann/layer/glimpse_layer.hpp
+++ /dev/null
@@ -1,484 +0,0 @@
-/**
- * @file glimpse_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the GlimpseLayer class, which takes an input image and a
- * location to extract a retina-like representation of the input image at
- * different increasing scales.
- *
- * For more information, see the following.
- *
- * @code
- * @article{CoRR2014,
- *   author  = {Volodymyr Mnih, Nicolas Heess, Alex Graves, Koray Kavukcuoglu},
- *   title   = {Recurrent Models of Visual Attention},
- *   journal = {CoRR},
- *   volume  = {abs/1406.6247},
- *   year    = {2014},
- * }
- * @endcode
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_GLIMPSE_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_GLIMPSE_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/pooling_rules/mean_pooling.hpp>
-#include <algorithm>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * The glimpse layer returns a retina-like representation
- * (down-scaled cropped images) of increasing scale around a given location in a
- * given image.
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::cube,
-    typename OutputDataType = arma::cube
->
-class GlimpseLayer
-{
- public:
-
-  /**
-   * Create the GlimpseLayer object using the specified ratio and rescale
-   * parameter.
-   *
-   * @param inSize The size of the input units.
-   * @param size The used glimpse size (height = width).
-   * @param depth The number of patches to crop per glimpse.
-   * @param scale The scaling factor used to create the increasing retina-like
-   *        representation.
-   */
-  GlimpseLayer(const size_t inSize,
-               const size_t size,
-               const size_t depth = 3,
-               const size_t scale = 2) :
-      inSize(inSize),
-      size(size),
-      depth(depth),
-      scale(scale)
-  {
-    // Nothing to do here.
-  }
-
-  /**
-   * Ordinary feed forward pass of the glimpse layer.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Cube<eT>& input, arma::Cube<eT>& output)
-  {
-    output = arma::Cube<eT>(size, size, depth * input.n_slices);
-
-    inputDepth = input.n_slices / inSize;
-
-    for (size_t inputIdx = 0; inputIdx < inSize; inputIdx++)
-    {
-      for (size_t depthIdx = 0, glimpseSize = size;
-          depthIdx < depth; depthIdx++, glimpseSize *= scale)
-      {
-        size_t padSize = std::floor((glimpseSize - 1) / 2);
-
-        arma::Cube<eT> inputPadded = arma::zeros<arma::Cube<eT> >(
-            input.n_rows + padSize * 2, input.n_cols + padSize * 2,
-            input.n_slices / inSize);
-
-        inputPadded.tube(padSize, padSize, padSize + input.n_rows - 1,
-            padSize + input.n_cols - 1) = input.subcube(0, 0,
-            inputIdx * inputDepth, input.n_rows - 1, input.n_cols - 1,
-            (inputIdx + 1) * inputDepth - 1);
-
-        size_t h = inputPadded.n_rows - glimpseSize;
-        size_t w = inputPadded.n_cols - glimpseSize;
-
-        size_t x = std::min(h, (size_t) std::max(0.0,
-            (location(0, inputIdx) + 1) / 2.0 * h));
-        size_t y = std::min(w, (size_t) std::max(0.0,
-            (location(1, inputIdx) + 1) / 2.0 * w));
-
-        if (depthIdx == 0)
-        {
-          for (size_t j = (inputIdx + depthIdx), paddedSlice = 0;
-              j < output.n_slices; j += (inSize * depth), paddedSlice++)
-          {
-            output.slice(j) = inputPadded.subcube(x, y,
-                paddedSlice, x + glimpseSize - 1, y + glimpseSize - 1,
-                paddedSlice);
-          }
-        }
-        else
-        {
-          for (size_t j = (inputIdx + depthIdx * (depth - 1)), paddedSlice = 0;
-              j < output.n_slices; j += (inSize * depth), paddedSlice++)
-          {
-            arma::Mat<eT> poolingInput = inputPadded.subcube(x, y,
-                paddedSlice, x + glimpseSize - 1, y + glimpseSize - 1,
-                paddedSlice);
-
-            if (scale == 2)
-            {
-              Pooling(glimpseSize / size, poolingInput, output.slice(j));
-            }
-            else
-            {
-              ReSampling(poolingInput, output.slice(j));
-            }
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Ordinary feed backward pass of the glimpse layer.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename InputType, typename ErrorType, typename eT>
-  void Backward(const InputType& input,
-                const ErrorType& gy,
-                arma::Cube<eT>& g)
-  {
-    // Generate a cube using the backpropagated error matrix.
-    arma::Cube<eT> mappedError = arma::zeros<arma::cube>(input.n_rows,
-        input.n_cols, input.n_slices);
-
-    for (size_t s = 0, j = 0; s < mappedError.n_slices; s+= gy.n_cols, j++)
-    {
-      for (size_t i = 0; i < gy.n_cols; i++)
-      {
-        arma::Col<eT> temp = gy.col(i).subvec(
-            j * input.n_rows * input.n_cols,
-            (j + 1) * input.n_rows * input.n_cols - 1);
-
-        mappedError.slice(s + i) = arma::Mat<eT>(temp.memptr(),
-            input.n_rows, input.n_cols);
-      }
-    }
-
-    g = arma::zeros<arma::cube>(inputParameter.n_rows, inputParameter.n_cols,
-        inputParameter.n_slices);
-
-    for (size_t inputIdx = 0; inputIdx < inSize; inputIdx++)
-    {
-      for (size_t depthIdx = 0, glimpseSize = size;
-          depthIdx < depth; depthIdx++, glimpseSize *= scale)
-      {
-        size_t padSize = std::floor((glimpseSize - 1) / 2);
-
-        arma::Cube<eT> inputPadded = arma::zeros<arma::Cube<eT> >(
-            inputParameter.n_rows + padSize * 2, inputParameter.n_cols +
-            padSize * 2, inputParameter.n_slices / inSize);
-
-        size_t h = inputPadded.n_rows - glimpseSize;
-        size_t w = inputPadded.n_cols - glimpseSize;
-
-        size_t x = std::min(h, (size_t) std::max(0.0,
-            (location(0, inputIdx) + 1) / 2.0 * h));
-        size_t y = std::min(w, (size_t) std::max(0.0,
-            (location(1, inputIdx) + 1) / 2.0 * w));
-
-        if (depthIdx == 0)
-        {
-          for (size_t j = (inputIdx + depthIdx), paddedSlice = 0;
-              j < mappedError.n_slices; j += (inSize * depth), paddedSlice++)
-          {
-            inputPadded.subcube(x, y,
-            paddedSlice, x + glimpseSize - 1, y + glimpseSize - 1,
-            paddedSlice) = mappedError.slice(j);
-          }
-        }
-        else
-        {
-          for (size_t j = (inputIdx + depthIdx * (depth - 1)), paddedSlice = 0;
-              j < mappedError.n_slices; j += (inSize * depth), paddedSlice++)
-          {
-            arma::Mat<eT> poolingOutput = inputPadded.subcube(x, y,
-                 paddedSlice, x + glimpseSize - 1, y + glimpseSize - 1,
-                 paddedSlice);
-
-            if (scale == 2)
-            {
-              Unpooling(inputParameter.slice(paddedSlice), mappedError.slice(j),
-                  poolingOutput);
-            }
-            else
-            {
-              DownwardReSampling(inputParameter.slice(paddedSlice),
-                  mappedError.slice(j), poolingOutput);
-            }
-
-            inputPadded.subcube(x, y,
-                paddedSlice, x + glimpseSize - 1, y + glimpseSize - 1,
-                paddedSlice) = poolingOutput;
-          }
-        }
-
-        g += inputPadded.tube(padSize, padSize, padSize +
-            inputParameter.n_rows - 1, padSize + inputParameter.n_cols - 1);
-      }
-    }
-
-    Transform(g);
-  }
-
-  //! Get the input parameter.
-  InputDataType& InputParameter() const {return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType& OutputParameter() const {return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the detla.
-  OutputDataType& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  //! Set the locationthe x and y coordinate of the center of the output
-  //! glimpse.
-  void Location(const arma::mat& location)
-  {
-    this->location = location;
-  }
-
- private:
-  /*
-   * Transform the given input by changing rows to columns.
-   *
-   * @param w The input matrix used to perform the transformation.
-   */
-  void Transform(arma::mat& w)
-  {
-    arma::mat t = w;
-
-    for (size_t i = 0, k = 0; i < w.n_elem; k++)
-    {
-      for (size_t j = 0; j < w.n_cols; j++, i++)
-      {
-        w(k, j) = t(i);
-      }
-    }
-  }
-
-  /*
-   * Transform the given input by changing rows to columns.
-   *
-   * @param w The input matrix used to perform the transformation.
-   */
-  void Transform(arma::cube& w)
-  {
-    for (size_t i = 0; i < w.n_slices; i++)
-    {
-      arma::mat t = w.slice(i);
-      Transform(t);
-      w.slice(i) = t;
-    }
-  }
-
-  /**
-   * Apply pooling to the input and store the results to the output parameter.
-   *
-   * @param kSize the kernel size used to perform the pooling operation.
-   * @param input The input to be apply the pooling rule.
-   * @param output The pooled result.
-   */
-  template<typename eT>
-  void Pooling(const size_t kSize,
-               const arma::Mat<eT>& input,
-               arma::Mat<eT>& output)
-  {
-
-    const size_t rStep = kSize;
-    const size_t cStep = kSize;
-
-    for (size_t j = 0; j < input.n_cols; j += cStep)
-    {
-      for (size_t i = 0; i < input.n_rows; i += rStep)
-      {
-        output(i / rStep, j / cStep) += pooling.Pooling(
-            input(arma::span(i, i + rStep - 1), arma::span(j, j + cStep - 1)));
-      }
-    }
-  }
-
-  /**
-   * Apply unpooling to the input and store the results.
-   *
-   * @param input The input to be apply the unpooling rule.
-   * @param error The error used to perform the unpooling operation.
-   * @param output The pooled result.
-   */
-  template<typename eT>
-  void Unpooling(const arma::Mat<eT>& input,
-                 const arma::Mat<eT>& error,
-                 arma::Mat<eT>& output)
-  {
-    const size_t rStep = input.n_rows / error.n_rows;
-    const size_t cStep = input.n_cols / error.n_cols;
-
-    arma::Mat<eT> unpooledError;
-    for (size_t j = 0; j < input.n_cols; j += cStep)
-    {
-      for (size_t i = 0; i < input.n_rows; i += rStep)
-      {
-        const arma::Mat<eT>& inputArea = input(arma::span(i, i + rStep - 1),
-                                               arma::span(j, j + cStep - 1));
-
-        pooling.Unpooling(inputArea, error(i / rStep, j / cStep),
-            unpooledError);
-
-        output(arma::span(i, i + rStep - 1),
-            arma::span(j, j + cStep - 1)) += unpooledError;
-      }
-    }
-  }
-
-  /**
-   * Apply ReSampling to the input and store the results in the output
-   * parameter.
-   *
-   * @param input The input to be apply the ReSampling rule.
-   * @param output The pooled result.
-   */
-  template<typename eT>
-  void ReSampling(const arma::Mat<eT>& input, arma::Mat<eT>& output)
-  {
-    double wRatio = (double) (input.n_rows - 1) / (size - 1);
-    double hRatio = (double) (input.n_cols - 1) / (size - 1);
-
-    double iWidth = input.n_rows - 1;
-    double iHeight = input.n_cols - 1;
-
-    for (size_t y = 0; y < size; y++)
-    {
-      for (size_t x = 0; x < size; x++)
-      {
-        double ix = wRatio * x;
-        double iy = hRatio * y;
-
-        // Get the 4 nearest neighbors.
-        double ixNw = std::floor(ix);
-        double iyNw = std::floor(iy);
-        double ixNe = ixNw + 1;
-        double iySw = iyNw + 1;
-
-        // Get surfaces to each neighbor.
-        double se = (ix - ixNw) * (iy - iyNw);
-        double sw = (ixNe - ix) * (iy - iyNw);
-        double ne = (ix - ixNw) * (iySw - iy);
-        double nw = (ixNe - ix) * (iySw - iy);
-
-        // Calculate the weighted sum.
-        output(y, x) = input(iyNw, ixNw) * nw +
-            input(iyNw, std::min(ixNe,  iWidth)) * ne +
-            input(std::min(iySw, iHeight), ixNw) * sw +
-            input(std::min(iySw, iHeight), std::min(ixNe, iWidth)) * se;
-      }
-    }
-  }
-
-  /**
-   * Apply DownwardReSampling to the input and store the results into the output
-   * parameter.
-   *
-   * @param input The input to be apply the DownwardReSampling rule.
-   * @param error The error used to perform the DownwardReSampling operation.
-   * @param output The DownwardReSampled result.
-   */
-  template<typename eT>
-  void DownwardReSampling(const arma::Mat<eT>& input,
-                          const arma::Mat<eT>& error,
-                          arma::Mat<eT>& output)
-  {
-    double iWidth = input.n_rows - 1;
-    double iHeight = input.n_cols - 1;
-
-    double wRatio = iWidth / (size - 1);
-    double hRatio = iHeight / (size - 1);
-
-    for (size_t y = 0; y < size; y++)
-    {
-      for (size_t x = 0; x < size; x++)
-      {
-        double ix = wRatio * x;
-        double iy = hRatio * y;
-
-        // Get the 4 nearest neighbors.
-        double ixNw = std::floor(ix);
-        double iyNw = std::floor(iy);
-        double ixNe = ixNw + 1;
-        double iySw = iyNw + 1;
-
-        // Get surfaces to each neighbor.
-        double se = (ix - ixNw) * (iy - iyNw);
-        double sw = (ixNe - ix) * (iy - iyNw);
-        double ne = (ix - ixNw) * (iySw - iy);
-        double nw = (ixNe - ix) * (iySw - iy);
-
-        double ograd = error(y, x);
-
-        output(iyNw, ixNw) = output(iyNw, ixNw) + nw * ograd;
-        output(iyNw, std::min(ixNe, iWidth)) = output(iyNw,
-            std::min(ixNe, iWidth)) + ne * ograd;
-        output(std::min(iySw, iHeight), ixNw) = output(std::min(iySw, iHeight),
-            ixNw) + sw * ograd;
-        output(std::min(iySw, iHeight), std::min(ixNe, iWidth)) = output(
-            std::min(iySw, iHeight), std::min(ixNe, iWidth)) + se * ograd;
-      }
-    }
-  }
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-
-  //! Locally-stored depth of the input.
-  size_t inputDepth;
-
-  //! The size of the input units.
-  size_t inSize;
-
-  //! The used glimpse size (height = width).
-  size_t size;
-
-  //! The number of patches to crop per glimpse.
-  size_t depth;
-
-  //! The scale fraction.
-  size_t scale;
-
-  //! The x and y coordinate of the center of the output glimpse.
-  arma::mat location;
-
-  //! Locally-stored object to perform the mean pooling operation.
-  MeanPooling pooling;
-}; // class GlimpseLayer
-
-}; // namespace ann
-}; // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/hard_tanh_layer.hpp b/src/mlpack/methods/ann/layer/hard_tanh_layer.hpp
deleted file mode 100644
index 5ebe613..0000000
--- a/src/mlpack/methods/ann/layer/hard_tanh_layer.hpp
+++ /dev/null
@@ -1,259 +0,0 @@
-/**
- * @file hard_tanh_layer.hpp
- * @author Dhawal Arora
- *
- * Definition and implementation of the HardTanHLayer layer.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_HARD_TANH_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_HARD_TANH_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * The Hard Tanh activation function, defined by
- *
- * @f{eqnarray*}{
- * f(x) &=& \left\{
- *   \begin{array}{lr}
- *     max & : x > maxValue \\
- *     min & : x \le minValue \\
- *     x   & : otherwise
- *   \end{array}
- * \right. \\
- * f'(x) &=& \left\{
- *   \begin{array}{lr}
- *     0 & : x > maxValue \\
- *     0 & : x \le minValue \\
- *     1 & : otherwise
- *   \end{array}
- * \right.
- * @f}
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class HardTanHLayer
-{
- public:
-  /**
-   * Create the HardTanHLayer object using the specified parameters. The range
-   * of the linear region can be adjusted by specifying the maxValue and
-   * minValue. Default (maxValue = 1, minValue = -1).
-   *
-   * @param maxValue Range of the linear region maximum value.
-   * @param minValue Range of the linear region minimum value.
-   */
-  HardTanHLayer(const double maxValue = 1, const double minValue = -1) :
-      maxValue(maxValue), minValue(minValue)
-  {
-     // Nothing to do here.
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename InputType, typename OutputType>
-  void Forward(const InputType& input, OutputType& output)
-  {
-    Fn(input, output);
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards through f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename DataType>
-  void Backward(const DataType& input,
-                const DataType& gy,
-                DataType& g)
-  {
-    DataType derivative;
-    Deriv(input, derivative);
-    g = gy % derivative;
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards through f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename eT>
-  void Backward(const arma::Cube<eT>& input,
-                const arma::Mat<eT>& gy,
-                arma::Cube<eT>& g)
-  {
-    // Generate a cube using the backpropagated error matrix.
-    arma::Cube<eT> mappedError = arma::zeros<arma::cube>(input.n_rows,
-        input.n_cols, input.n_slices);
-
-    for (size_t s = 0, j = 0; s < mappedError.n_slices; s+= gy.n_cols, j++)
-    {
-      for (size_t i = 0; i < gy.n_cols; i++)
-      {
-        arma::Col<eT> temp = gy.col(i).subvec(
-            j * input.n_rows * input.n_cols,
-            (j + 1) * input.n_rows * input.n_cols - 1);
-
-        mappedError.slice(s + i) = arma::Mat<eT>(temp.memptr(),
-            input.n_rows, input.n_cols);
-      }
-    }
-
-    arma::Cube<eT> derivative;
-    Deriv(input, derivative);
-    g = mappedError % derivative;
-  }
-
-  //! Get the input parameter.
-  InputDataType const& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType const& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType const& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  //! Get the maximum value.
-  double const& MaxValue() const { return maxValue; }
-  //! Modify the maximum value.
-  double& MaxValue() { return maxValue; }
-
-  //! Get the minimum value.
-  double const& MinValue() const { return minValue; }
-  //! Modify the minimum value.
-  double& MinValue() { return minValue; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-    ar & data::CreateNVP(maxValue, "maxValue");
-    ar & data::CreateNVP(minValue, "minValue");
-  }
-
- private:
-  /**
-   * Computes the HardTanH function.
-   *
-   * @param x Input data.
-   * @return f(x).
-   */
-  double Fn(const double x)
-  {
-    if (x > maxValue)
-      return maxValue;
-    else if (x < minValue)
-      return minValue;
-    return x;
-  }
-
-  /**
-   * Computes the HardTanH function using a dense matrix as input.
-   *
-   * @param x Input data.
-   * @param y The resulting output activation.
-   */
-
-  template<typename eT>
-  void Fn(const arma::Mat<eT>& x, arma::Mat<eT>& y)
-  {
-    y = x;
-    y.transform( [&](eT val) { return std::min(
-        std::max( val, minValue ), maxValue ); } );
-  }
-
-  /**
-   * Computes the HardTanH function using a 3rd-order tensor as input.
-   *
-   * @param x Input data.
-   * @param y The resulting output activation.
-   */
-  template<typename eT>
-  void Fn(const arma::Cube<eT>& x, arma::Cube<eT>& y)
-  {
-    y = x;
-    for (size_t s = 0; s < x.n_slices; s++)
-      Fn(x.slice(s), y.slice(s));
-  }
-
-  /**
-   * Computes the first derivative of the HardTanH function.
-   *
-   * @param x Input data.
-   * @return f'(x)
-   */
-  double Deriv(const double x)
-  {
-    return (x > maxValue || x < minValue) ? 0 : 1;
-  }
-
-  /**
-   * Computes the first derivative of the HardTanH function.
-   *
-   * @param y Input activations.
-   * @param x The resulting derivatives.
-   */
-  template<typename InputType, typename OutputType>
-  void Deriv(const InputType& x, OutputType& y)
-  {
-    y = x;
-
-    for (size_t i = 0; i < x.n_elem; i++)
-      y(i) = Deriv(x(i));
-  }
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-
-  //! Maximum value for the HardTanH function.
-  double maxValue;
-
-  //! Minimum value for the HardTanH function.
-  double minValue;
-}; // class HardTanHLayer
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/leaky_relu_layer.hpp b/src/mlpack/methods/ann/layer/leaky_relu_layer.hpp
deleted file mode 100644
index deaecb3..0000000
--- a/src/mlpack/methods/ann/layer/leaky_relu_layer.hpp
+++ /dev/null
@@ -1,240 +0,0 @@
-/**
- * @file leaky_relu_layer.hpp
- * @author Dhawal Arora
- *
- * Definition and implementation of LeakyReLULayer layer first introduced
- * in the acoustic model, Andrew L. Maas, Awni Y. Hannun, Andrew Y. Ng,
- * "Rectifier Nonlinearities Improve Neural Network Acoustic Models", 2014
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_LEAKYRELU_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_LEAKYRELU_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * The LeakyReLU activation function, defined by
- *
- * @f{eqnarray*}{
- * f(x) &=& \max(x, alpha*x) \\
- * f'(x) &=& \left\{
- *   \begin{array}{lr}
- *     1 & : x > 0 \\
- *     alpha & : x \le 0
- *   \end{array}
- * \right.
- * @f}
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class LeakyReLULayer
-{
- public:
-  /**
-   * Create the LeakyReLULayer object using the specified parameters.
-   * The non zero gradient can be adjusted by specifying tha parameter
-   * alpha in the range 0 to 1. Default (alpha = 0.03)
-   *
-   * @param alpha Non zero gradient
-   */
-  LeakyReLULayer(const double alpha = 0.03) : alpha(alpha)
-  {
-     // Nothing to do here.
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename InputType, typename OutputType>
-  void Forward(const InputType& input, OutputType& output)
-  {
-    Fn(input, output);
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards through f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename DataType>
-  void Backward(const DataType& input,
-                const DataType& gy,
-                DataType& g)
-  {
-    DataType derivative;
-    Deriv(input, derivative);
-    g = gy % derivative;
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards through f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename eT>
-  void Backward(const arma::Cube<eT>& input,
-                const arma::Mat<eT>& gy,
-                arma::Cube<eT>& g)
-  {
-    // Generate a cube using the backpropagated error matrix.
-    arma::Cube<eT> mappedError = arma::zeros<arma::cube>(input.n_rows,
-        input.n_cols, input.n_slices);
-
-    for (size_t s = 0, j = 0; s < mappedError.n_slices; s+= gy.n_cols, j++)
-    {
-      for (size_t i = 0; i < gy.n_cols; i++)
-      {
-        arma::Col<eT> temp = gy.col(i).subvec(
-            j * input.n_rows * input.n_cols,
-            (j + 1) * input.n_rows * input.n_cols - 1);
-
-        mappedError.slice(s + i) = arma::Mat<eT>(temp.memptr(),
-            input.n_rows, input.n_cols);
-      }
-    }
-
-    arma::Cube<eT> derivative;
-    Deriv(input, derivative);
-    g = mappedError % derivative;
-  }
-
-  //! Get the input parameter.
-  InputDataType const& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType const& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType const& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  //! Get the non zero gradient.
-  double const& Alpha() const { return alpha; }
-  //! Modify the non zero gradient.
-  double& Alpha() { return alpha; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-    ar & data::CreateNVP(alpha, "alpha");
-  }
-
- private:
-  /**
-   * Computes the LeakReLU function
-   *
-   * @param x Input data.
-   * @return f(x).
-   */
-  double Fn(const double x)
-  {
-    return std::max(x, alpha * x);
-  }
-
-  /**
-   * Computes the Leaky ReLU function using a dense matrix as input.
-   *
-   * @param x Input data.
-   * @param y The resulting output activation.
-   */
-  template<typename eT>
-  void Fn(const arma::Mat<eT>& x, arma::Mat<eT>& y)
-  {
-    y = arma::max(x, alpha * x);
-  }
-
-  /**
-   * Computes the LeakyReLU function using a 3rd-order tensor as input.
-   *
-   * @param x Input data.
-   * @param y The resulting output activation.
-   */
-  template<typename eT>
-  void Fn(const arma::Cube<eT>& x, arma::Cube<eT>& y)
-  {
-    y = x;
-    for (size_t s = 0; s < x.n_slices; s++)
-      fn(x.slice(s), y.slice(s));
-  }
-
-  /**
-   * Computes the first derivative of the LeakyReLU function.
-   *
-   * @param x Input data.
-   * @return f'(x)
-   */
-  double Deriv(const double x)
-  {
-    return (x >= 0) ? 1 : alpha;
-  }
-
-  /**
-   * Computes the first derivative of the LeakyReLU function.
-   *
-   * @param y Input activations.
-   * @param x The resulting derivatives.
-   */
-
-  template<typename InputType, typename OutputType>
-  void Deriv(const InputType& x, OutputType& y)
-  {
-    y = x;
-
-    for (size_t i = 0; i < x.n_elem; i++)
-      y(i) = Deriv(x(i));
-  }
-
-
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-
-  //! Leakyness Parameter in the range 0 <alpha< 1
-  double alpha;
-
-}; // class LeakyReLULayer
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/linear_layer.hpp b/src/mlpack/methods/ann/layer/linear_layer.hpp
deleted file mode 100644
index 17c4626..0000000
--- a/src/mlpack/methods/ann/layer/linear_layer.hpp
+++ /dev/null
@@ -1,289 +0,0 @@
-/**
- * @file linear_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the LinearLayer class also known as fully-connected layer or
- * affine transformation.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_LINEAR_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_LINEAR_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of the LinearLayer class. The LinearLayer class represents a
- * single layer of a neural network.
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class LinearLayer
-{
- public:
-  /**
-   * Create the LinearLayer object using the specified number of units.
-   *
-   * @param inSize The number of input units.
-   * @param outSize The number of output units.
-   */
-  LinearLayer(const size_t inSize, const size_t outSize) :
-      inSize(inSize),
-      outSize(outSize)
-  {
-    weights.set_size(outSize, inSize);
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Mat<eT>& input, arma::Mat<eT>& output)
-  {
-    output = weights * input;
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Cube<eT>& input, arma::Mat<eT>& output)
-  {
-    arma::Mat<eT> data(input.n_elem, 1);
-
-    for (size_t s = 0, c = 0; s < input.n_slices / data.n_cols; s++)
-    {
-      for (size_t i = 0; i < data.n_cols; i++, c++)
-      {
-        data.col(i).subvec(s * input.n_rows * input.n_cols, (s + 1) *
-            input.n_rows * input.n_cols - 1) = arma::trans(arma::vectorise(
-            input.slice(c), 1));
-      }
-    }
-
-    output = weights * data;
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards trough f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename InputType, typename eT>
-  void Backward(const InputType& /* unused */,
-                const arma::Mat<eT>& gy,
-                arma::Mat<eT>& g)
-  {
-    g = weights.t() * gy;
-  }
-
-  /*
-   * Calculate the gradient using the output delta and the input activation.
-   *
-   * @param input The propagated input.
-   * @param error The calculated error.
-   * @param gradient The calculated gradient.
-   */
-  template<typename InputType, typename ErrorType, typename GradientType>
-  void Gradient(const InputType& input,
-                const ErrorType& error,
-                GradientType& gradient)
-  {
-    GradientDelta(input, error, gradient);
-  }
-
-  //! Get the weights.
-  OutputDataType const& Weights() const { return weights; }
-  //! Modify the weights.
-  OutputDataType& Weights() { return weights; }
-
-  //! Get the input parameter.
-  InputDataType const& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType const& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType const& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  //! Get the gradient.
-  OutputDataType const& Gradient() const { return gradient; }
-  //! Modify the gradient.
-  OutputDataType& Gradient() { return gradient; }
-
-  /**
-   * Serialize the layer
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-    ar & data::CreateNVP(weights, "weights");
-  }
-
- private:
-  /*
-   * Calculate the gradient using the output delta (3rd order tensor) and the
-   * input activation (3rd order tensor).
-   *
-   * @param input The input parameter used for calculating the gradient.
-   * @param d The output delta.
-   * @param g The calculated gradient.
-   */
-  template<typename eT>
-  void GradientDelta(const arma::Cube<eT>& input,
-                     const arma::Mat<eT>& d,
-                     arma::Cube<eT>& g)
-  {
-    g = arma::Cube<eT>(weights.n_rows, weights.n_cols, 1);
-    arma::Mat<eT> data = arma::Mat<eT>(d.n_cols,
-        input.n_elem / d.n_cols);
-
-    for (size_t s = 0, c = 0; s < input.n_slices /
-        data.n_rows; s++)
-    {
-      for (size_t i = 0; i < data.n_rows; i++, c++)
-      {
-        data.row(i).subvec(s * input.n_rows *
-            input.n_cols, (s + 1) *
-            input.n_rows *
-        input.n_cols - 1) = arma::vectorise(
-                input.slice(c), 1);
-      }
-    }
-
-    g.slice(0) = d * data / d.n_cols;
-  }
-
-  /*
-   * Calculate the gradient (3rd order tensor) using the output delta
-   * (dense matrix) and the input activation (dense matrix).
-   *
-   * @param input The input parameter used for calculating the gradient.
-   * @param d The output delta.
-   * @param g The calculated gradient.
-   */
-  template<typename eT>
-  void GradientDelta(const arma::Mat<eT>& input,
-                     const arma::Mat<eT>& d,
-                     arma::Cube<eT>& g)
-  {
-    g = arma::Cube<eT>(weights.n_rows, weights.n_cols, 1);
-    Gradient(input, d, g.slice(0));
-  }
-
-  /*
-   * Calculate the gradient (dense matrix) using the output delta
-   * (dense matrix) and the input activation (3rd order tensor).
-   *
-   * @param input The input parameter used for calculating the gradient.
-   * @param d The output delta.
-   * @param g The calculated gradient.
-   */
-  template<typename eT>
-  void GradientDelta(const arma::Cube<eT>& input,
-                     const arma::Mat<eT>& d,
-                     arma::Mat<eT>& g)
-  {
-    arma::Cube<eT> grad = arma::Cube<eT>(weights.n_rows, weights.n_cols, 1);
-    Gradient(input, d, grad);
-    g = grad.slice(0);
-  }
-
-  /*
-   * Calculate the gradient (dense matrix) using the output delta
-   * (dense matrix) and the input activation (dense matrix).
-   *
-   * @param input The input parameter used for calculating the gradient.
-   * @param d The output delta.
-   * @param g The calculated gradient.
-   */
-  template<typename eT>
-  void GradientDelta(const arma::Mat<eT>& input,
-                     const arma::Mat<eT>& d,
-                     arma::Mat<eT>& g)
-  {
-    g = d * input.t();
-  }
-
-  //! Locally-stored number of input units.
-  size_t inSize;
-
-  //! Locally-stored number of output units.
-  size_t outSize;
-
-  //! Locally-stored weight object.
-  OutputDataType weights;
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored gradient object.
-  OutputDataType gradient;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-}; // class LinearLayer
-
-/**
- * Linear Mapping layer to map between 3rd order tensors and dense matrices.
- */
-template <
-    typename InputDataType = arma::cube,
-    typename OutputDataType = arma::mat
->
-using LinearMappingLayer = LinearLayer<InputDataType, OutputDataType>;
-
-//! Layer traits for the linear layer.
-template<
-    typename InputDataType,
-    typename OutputDataType
->
-class LayerTraits<LinearLayer<InputDataType, OutputDataType> >
-{
- public:
-  static const bool IsBinary = false;
-  static const bool IsOutputLayer = false;
-  static const bool IsBiasLayer = false;
-  static const bool IsLSTMLayer = false;
-  static const bool IsConnection = true;
-};
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/log_softmax_layer.hpp b/src/mlpack/methods/ann/layer/log_softmax_layer.hpp
deleted file mode 100644
index 32aa2d5..0000000
--- a/src/mlpack/methods/ann/layer/log_softmax_layer.hpp
+++ /dev/null
@@ -1,131 +0,0 @@
-/**
- * @file log_softmax_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the LogSoftmaxLayer class.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_LOG_SOFTMAX_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_LOG_SOFTMAX_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of the log softmax layer. The log softmax loss layer computes
- * the multinomial logistic loss of the softmax of its inputs. This layer is
- * meant to be used in combination with the negative log likelihood layer
- * (NegativeLogLikelihoodLayer), which expects that the input contains
- * log-probabilities for each class.
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class LogSoftmaxLayer
-{
- public:
-  /**
-   * Create the LogSoftmaxLayer object.
-   */
-  LogSoftmaxLayer() { /* Nothing to do here. */ }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Mat<eT>& input, arma::Mat<eT>& output)
-  {
-    arma::mat maxInput = arma::repmat(arma::max(input), input.n_rows, 1);
-    output = (maxInput - input);
-
-    // Approximation of the hyperbolic tangent. The acuracy however is
-    // about 0.00001 lower as using tanh. Credits go to Leon Bottou.
-    output.transform( [](double x)
-    {
-      //! Fast approximation of exp(-x) for x positive.
-      static constexpr double A0 = 1.0;
-      static constexpr double A1 = 0.125;
-      static constexpr double A2 = 0.0078125;
-      static constexpr double A3 = 0.00032552083;
-      static constexpr double A4 = 1.0172526e-5;
-
-      if (x < 13.0)
-      {
-        double y = A0 + x * (A1 + x * (A2 + x * (A3 + x * A4)));
-        y *= y;
-        y *= y;
-        y *= y;
-        y = 1 / y;
-
-        return y;
-      }
-
-      return 0.0;
-    } );
-
-    output = input - (maxInput + std::log(arma::accu(output)));
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards trough f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename eT>
-  void Backward(const arma::Mat<eT>& input,
-                const arma::Mat<eT>& gy,
-                arma::Mat<eT>& g)
-  {
-    g = gy - arma::exp(input) * arma::accu(gy);
-  }
-
-  //! Get the input parameter.
-  InputDataType& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  InputDataType& Delta() const { return delta; }
-  //! Modify the delta.
-  InputDataType& Delta() { return delta; }
-
- private:
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-}; // class LogSoftmaxLayer
-
-}; // namespace ann
-}; // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/lstm_layer.hpp b/src/mlpack/methods/ann/layer/lstm_layer.hpp
deleted file mode 100644
index 7ffe1a8..0000000
--- a/src/mlpack/methods/ann/layer/lstm_layer.hpp
+++ /dev/null
@@ -1,418 +0,0 @@
-/**
- * @file lstm_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the LSTMLayer class, which implements a lstm network
- * layer.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_LSTM_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_LSTM_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * An implementation of a lstm network layer.
- *
- * This class allows specification of the type of the activation functions used
- * for the gates and cells and also of the type of the function used to
- * initialize and update the peephole weights.
- *
- * @tparam GateActivationFunction Activation function used for the gates.
- * @tparam StateActivationFunction Activation function used for the state.
- * @tparam OutputActivationFunction Activation function used for the output.
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    class GateActivationFunction = LogisticFunction,
-    class StateActivationFunction = TanhFunction,
-    class OutputActivationFunction = TanhFunction,
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class LSTMLayer
-{
- public:
-  /**
-   * Create the LSTMLayer object using the specified parameters.
-   *
-   * @param outSize The number of output units.
-   * @param peepholes The flag used to indicate if peephole connections should
-   *        be used (Default: false).
-   * @param WeightInitRule The weight initialization rule used to initialize the
-   *        weight matrix.
-   */
-  LSTMLayer(const size_t outSize, const bool peepholes = false) :
-      outSize(outSize),
-      peepholes(peepholes),
-      seqLen(1),
-      offset(0)
-  {
-    if (peepholes)
-    {
-      peepholeWeights.set_size(outSize, 3);
-      peepholeDerivatives = arma::zeros<OutputDataType>(outSize, 3);
-    }
-    else
-    {
-      peepholeWeights.set_size(0, 0);
-    }
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Mat<eT>& input, arma::Mat<eT>& output)
-  {
-    if (inGate.n_cols < seqLen)
-    {
-      inGate = arma::zeros<InputDataType>(outSize, seqLen);
-      inGateAct = arma::zeros<InputDataType>(outSize, seqLen);
-      inGateError = arma::zeros<InputDataType>(outSize, seqLen);
-      outGate = arma::zeros<InputDataType>(outSize, seqLen);
-      outGateAct = arma::zeros<InputDataType>(outSize, seqLen);
-      outGateError = arma::zeros<InputDataType>(outSize, seqLen);
-      forgetGate = arma::zeros<InputDataType>(outSize, seqLen);
-      forgetGateAct = arma::zeros<InputDataType>(outSize, seqLen);
-      forgetGateError = arma::zeros<InputDataType>(outSize, seqLen);
-      state = arma::zeros<InputDataType>(outSize, seqLen);
-      stateError = arma::zeros<InputDataType>(outSize, seqLen);
-      cellAct = arma::zeros<InputDataType>(outSize, seqLen);
-    }
-
-    // Split up the inputactivation into the 3 parts (inGate, forgetGate,
-    // outGate).
-    inGate.col(offset) = input.submat(0, 0, outSize - 1, 0);
-
-    forgetGate.col(offset) = input.submat(outSize, 0, (outSize * 2) - 1, 0);
-    outGate.col(offset) = input.submat(outSize * 3, 0, (outSize * 4) - 1, 0);
-
-    if (peepholes && offset > 0)
-    {
-      inGate.col(offset) += peepholeWeights.col(0) % state.col(offset - 1);
-      forgetGate.col(offset) += peepholeWeights.col(1) %
-          state.col(offset - 1);
-    }
-
-    arma::Col<eT> inGateActivation = inGateAct.unsafe_col(offset);
-    GateActivationFunction::fn(inGate.unsafe_col(offset), inGateActivation);
-
-    arma::Col<eT> forgetGateActivation = forgetGateAct.unsafe_col(offset);
-    GateActivationFunction::fn(forgetGate.unsafe_col(offset),
-        forgetGateActivation);
-
-    arma::Col<eT> cellActivation = cellAct.unsafe_col(offset);
-    StateActivationFunction::fn(input.submat(outSize * 2, 0,
-        (outSize * 3) - 1, 0), cellActivation);
-
-    state.col(offset) = inGateAct.col(offset) % cellActivation;
-
-    if (offset > 0)
-      state.col(offset) += forgetGateAct.col(offset) % state.col(offset - 1);
-
-    if (peepholes)
-      outGate.col(offset) += peepholeWeights.col(2) % state.col(offset);
-
-    arma::Col<eT> outGateActivation = outGateAct.unsafe_col(offset);
-    GateActivationFunction::fn(outGate.unsafe_col(offset), outGateActivation);
-
-    OutputActivationFunction::fn(state.unsafe_col(offset), output);
-    output = outGateAct.col(offset) % output;
-
-    offset = (offset + 1) % seqLen;
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards trough f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename InputType, typename eT>
-  void Backward(const InputType& /* unused */,
-                const arma::Mat<eT>& gy,
-                arma::Mat<eT>& g)
-  {
-    queryOffset = seqLen - offset - 1;
-
-    arma::Col<eT> outGateDerivative;
-    GateActivationFunction::deriv(outGateAct.unsafe_col(queryOffset),
-        outGateDerivative);
-
-    arma::Col<eT> stateActivation;
-    StateActivationFunction::fn(state.unsafe_col(queryOffset), stateActivation);
-
-    outGateError.col(queryOffset) = outGateDerivative % gy % stateActivation;
-
-    arma::Col<eT> stateDerivative;
-    StateActivationFunction::deriv(stateActivation, stateDerivative);
-
-    stateError.col(queryOffset) = gy % outGateAct.col(queryOffset) %
-        stateDerivative;
-
-    if (queryOffset < (seqLen - 1))
-    {
-      stateError.col(queryOffset) += stateError.col(queryOffset + 1) %
-          forgetGateAct.col(queryOffset + 1);
-
-      if (peepholes)
-      {
-        stateError.col(queryOffset) += inGateError.col(queryOffset + 1) %
-            peepholeWeights.col(0);
-        stateError.col(queryOffset) += forgetGateError.col(queryOffset + 1) %
-            peepholeWeights.col(1);
-      }
-    }
-
-    if (peepholes)
-    {
-      stateError.col(queryOffset) += outGateError.col(queryOffset) %
-          peepholeWeights.col(2);
-    }
-
-    arma::Col<eT> cellDerivative;
-    StateActivationFunction::deriv(cellAct.col(queryOffset), cellDerivative);
-
-    arma::Col<eT> cellError = inGateAct.col(queryOffset) % cellDerivative %
-        stateError.col(queryOffset);
-
-    if (queryOffset > 0)
-    {
-      arma::Col<eT> forgetGateDerivative;
-      GateActivationFunction::deriv(forgetGateAct.col(queryOffset),
-          forgetGateDerivative);
-
-      forgetGateError.col(queryOffset) = forgetGateDerivative %
-          stateError.col(queryOffset) % state.col(queryOffset - 1);
-    }
-
-    arma::Col<eT> inGateDerivative;
-    GateActivationFunction::deriv(inGateAct.col(queryOffset), inGateDerivative);
-
-    inGateError.col(queryOffset) = inGateDerivative %
-        stateError.col(queryOffset) % cellAct.col(queryOffset);
-
-    if (peepholes)
-    {
-      peepholeDerivatives.col(2) += outGateError.col(queryOffset) %
-          state.col(queryOffset);
-
-      if (queryOffset > 0)
-      {
-        peepholeDerivatives.col(0) += inGateError.col(queryOffset) %
-            state.col(queryOffset - 1);
-        peepholeDerivatives.col(1) += forgetGateError.col(queryOffset) %
-            state.col(queryOffset - 1);
-      }
-    }
-
-    g = arma::zeros<arma::Mat<eT> >(outSize * 4, 1);
-    g.submat(0, 0, outSize - 1, 0) = inGateError.col(queryOffset);
-    g.submat(outSize, 0, (outSize * 2) - 1, 0) =
-        forgetGateError.col(queryOffset);
-    g.submat(outSize * 2, 0, (outSize * 3) - 1, 0) = cellError;
-    g.submat(outSize * 3, 0, (outSize * 4) - 1, 0) =
-        outGateError.col(queryOffset);
-
-    offset = (offset + 1) % seqLen;
-  }
-
-  /**
-   * Ordinary feed backward pass of the lstm layer.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename InputType, typename eT, typename GradientDataType>
-  void Gradient(const InputType& /* input */,
-                const arma::Mat<eT>& /* gy */,
-                GradientDataType& /* g */)
-  {
-    if (peepholes && offset == 0)
-    {
-      peepholeGradient.col(0) = arma::trans((peepholeWeights.col(0).t() *
-          (inGateError.col(queryOffset) % peepholeDerivatives.col(0))) *
-          inGate.col(queryOffset).t());
-
-      peepholeGradient.col(1) = arma::trans((peepholeWeights.col(1).t() *
-          (forgetGateError.col(queryOffset) % peepholeDerivatives.col(1))) *
-          forgetGate.col(queryOffset).t());
-
-      peepholeGradient.col(2) = arma::trans((peepholeWeights.col(2).t() *
-          (outGateError.col(queryOffset) % peepholeDerivatives.col(2))) *
-          outGate.col(queryOffset).t());
-
-      peepholeDerivatives.zeros();
-    }
-  }
-
-  //! Get the peephole weights.
-  OutputDataType const& Weights() const { return peepholeWeights; }
-  //! Modify the peephole weights.
-  OutputDataType& Weights() { return peepholeWeights; }
-
-  //! Get the input parameter.
-  InputDataType const& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType const& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType const& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  //! Get the peephole gradient.
-  OutputDataType const& Gradient() const { return peepholeGradient; }
-  //! Modify the peephole gradient.
-  OutputDataType& Gradient() { return peepholeGradient; }
-
-  //! Get the sequence length.
-  size_t SeqLen() const { return seqLen; }
-  //! Modify the sequence length.
-  size_t& SeqLen() { return seqLen; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-    ar & data::CreateNVP(peepholes, "peepholes");
-
-    if (peepholes)
-    {
-      ar & data::CreateNVP(peepholeWeights, "peepholeWeights");
-
-      if (Archive::is_loading::value)
-      {
-        peepholeDerivatives = arma::zeros<OutputDataType>(
-            peepholeWeights.n_rows, 3);
-      }
-    }
-  }
-
- private:
-  //! Locally-stored number of output units.
-  size_t outSize;
-
-  //! Locally-stored peephole indication flag.
-  bool peepholes;
-
-  //! Locally-stored length of the the input sequence.
-  size_t seqLen;
-
-  //! Locally-stored sequence offset.
-  size_t offset;
-
-  //! Locally-stored query offset.
-  size_t queryOffset;
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored gradient object.
-  OutputDataType gradient;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-
-  //! Locally-stored ingate object.
-  InputDataType inGate;
-
-  //! Locally-stored ingate activation object.
-  InputDataType inGateAct;
-
-  //! Locally-stored ingate error object.
-  InputDataType inGateError;
-
-  //! Locally-stored outgate object.
-  InputDataType outGate;
-
-  //! Locally-stored outgate activation object.
-  InputDataType outGateAct;
-
-  //! Locally-stored outgate error object.
-  InputDataType outGateError;
-
-  //! Locally-stored forget object.
-  InputDataType forgetGate;
-
-  //! Locally-stored forget activation object.
-  InputDataType forgetGateAct;
-
-  //! Locally-stored forget error object.
-  InputDataType forgetGateError;
-
-  //! Locally-stored state object.
-  InputDataType state;
-
-  //! Locally-stored state erro object.
-  InputDataType stateError;
-
-  //! Locally-stored cell activation object.
-  InputDataType cellAct;
-
-  //! Locally-stored peephole weight object.
-  OutputDataType peepholeWeights;
-
-  //! Locally-stored derivatives object.
-  OutputDataType peepholeDerivatives;
-
-  //! Locally-stored peephole gradient object.
-  OutputDataType peepholeGradient;
-}; // class LSTMLayer
-
-//! Layer traits for the lstm layer.
-template<
-    class GateActivationFunction,
-    class StateActivationFunction,
-    class OutputActivationFunction,
-    typename InputDataType,
-    typename OutputDataType
->
-class LayerTraits<LSTMLayer<GateActivationFunction,
-                            StateActivationFunction,
-                            OutputActivationFunction,
-                            InputDataType,
-                            OutputDataType> >
-{
- public:
-  static const bool IsBinary = false;
-  static const bool IsOutputLayer = false;
-  static const bool IsBiasLayer = false;
-  static const bool IsLSTMLayer = true;
-  static const bool IsConnection = false;
-};
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/multiclass_classification_layer.hpp b/src/mlpack/methods/ann/layer/multiclass_classification_layer.hpp
deleted file mode 100644
index 440db78..0000000
--- a/src/mlpack/methods/ann/layer/multiclass_classification_layer.hpp
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * @file multiclass_classification_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the MulticlassClassificationLayer class, which implements a
- * multiclass classification layer that can be used as output layer.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_MULTICLASS_CLASSIFICATION_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_MULTICLASS_CLASSIFICATION_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * An implementation of a multiclass classification layer that can be used as
- * output layer.
- *
- * A convenience typedef is given:
- *
- *  - ClassificationLayer
- */
-class MulticlassClassificationLayer
-{
- public:
-  /**
-   * Create the MulticlassClassificationLayer object.
-   */
-  MulticlassClassificationLayer()
-  {
-    // Nothing to do here.
-  }
-
-  /*
-   * Calculate the error using the specified input activation and the target.
-   * The error is stored into the given error parameter.
-   *
-   * @param inputActivations Input data used for evaluating the network.
-   * @param target Target data used for evaluating the network.
-   * @param error The calculated error with respect to the input activation and
-   * the given target.
-   */
-  template<typename DataType>
-  void CalculateError(const DataType& inputActivations,
-                      const DataType& target,
-                      DataType& error)
-  {
-    error = inputActivations - target;
-  }
-
-  /*
-   * Calculate the output class using the specified input activation.
-   *
-   * @param inputActivations Input data used to calculate the output class.
-   * @param output Output class of the input activation.
-   */
-  template<typename DataType>
-  void OutputClass(const DataType& inputActivations, DataType& output)
-  {
-    output = inputActivations;
-  }
-
-  /**
-   * Serialize the layer
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-  }
-}; // class MulticlassClassificationLayer
-
-//! Layer traits for the multiclass classification layer.
-template <>
-class LayerTraits<MulticlassClassificationLayer>
-{
- public:
-  static const bool IsBinary = false;
-  static const bool IsOutputLayer = true;
-  static const bool IsBiasLayer = false;
-  static const bool IsConnection = false;
-};
-
-/***
- * Alias ClassificationLayer.
- */
-using ClassificationLayer = MulticlassClassificationLayer;
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/multiply_constant_layer.hpp b/src/mlpack/methods/ann/layer/multiply_constant_layer.hpp
deleted file mode 100644
index d2f5fe8..0000000
--- a/src/mlpack/methods/ann/layer/multiply_constant_layer.hpp
+++ /dev/null
@@ -1,113 +0,0 @@
-/**
- * @file multiply_constant_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the MultiplyConstantLayer class, which multiplies the input by
- * a (non-learnable) constant.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_MULTIPLY_CONSTANT_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_MULTIPLY_CONSTANT_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of the multiply constant layer. The multiply constant layer
- * multiplies the input by a (non-learnable) constant.
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class MultiplyConstantLayer
-{
- public:
-  /**
-   * Create the BaseLayer object.
-   */
-  MultiplyConstantLayer(const double scalar) : scalar(scalar)
-  {
-    // Nothing to do here.
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network. Multiply the input with the
-   * specified constant scalar value.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename InputType, typename OutputType>
-  void Forward(const InputType& input, OutputType& output)
-  {
-    output = input * scalar;
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network. The backward pass
-   * multiplies the error with the specified constant scalar value.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename DataType>
-  void Backward(const DataType& /* input */, const DataType& gy, DataType& g)
-  {
-    g = gy * scalar;
-  }
-
-  //! Get the input parameter.
-  InputDataType& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-    ar & data::CreateNVP(scalar, "scalar");
-  }
-
- private:
-  //! Locally-stored constant scalar value.
-  const double scalar;
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-}; // class MultiplyConstantLayer
-
-}; // namespace ann
-}; // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/negative_log_likelihood_layer.hpp b/src/mlpack/methods/ann/layer/negative_log_likelihood_layer.hpp
deleted file mode 100644
index 1cfaef6..0000000
--- a/src/mlpack/methods/ann/layer/negative_log_likelihood_layer.hpp
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * @file negative_log_likelihood_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the NegativeLogLikelihoodLayer class.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_NEGATIVE_LOG_LIKELIHOOD_Layer_HPP
-#define MLPACK_METHODS_ANN_LAYER_NEGATIVE_LOG_LIKELIHOOD_Layer_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of the negative log likelihood layer. The negative log
- * likelihood layer expects that the input contains log-probabilities for each
- * class. The layer also expects a class index, in the range between 1 and the
- * number of classes, as target when calling the Forward function.
- *
- * @tparam ActivationFunction Activation function used for the embedding layer.
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class NegativeLogLikelihoodLayer
-{
- public:
-  /**
-   * Create the NegativeLogLikelihoodLayer object.
-   */
-  NegativeLogLikelihoodLayer() { /* Nothing to do here. */ }
-
-  /**
-   * Ordinary feed forward pass of a neural network. The negative log
-   * likelihood layer expects that the input contains log-probabilities for
-   * each class. The layer also expects a class index, in the range between 1
-   * and the number of classes, as target when calling the Forward function.
-   *
-   * @param input Input data that contains the log-probabilities for each class.
-   * @param target The target vector, that contains the class index in the range
-   *        between 1 and the number of classes.
-   */
-  template<typename eT>
-  double Forward(const arma::Mat<eT>& input, const arma::Mat<eT>& target)
-  {
-    double output = 0;
-
-    for (size_t i = 0; i < input.n_cols; ++i)
-    {
-      size_t currentTarget = target(i) - 1;
-      Log::Assert(currentTarget >= 0 && currentTarget < input.n_rows,
-          "Target class out of range.");
-
-      output -= input(currentTarget, i);
-    }
-
-    return output;
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network. The negative log
-   * likelihood layer expects that the input contains log-probabilities for
-   * each class. The layer also expects a class index, in the range between 1
-   * and the number of classes, as target when calling the Forward function.
-   *
-   * @param input The propagated input activation.
-   * @param target The target vector, that contains the class index in the range
-   *        between 1 and the number of classes.
-   * @param output The calculated error.
-   */
-  template<typename eT>
-  void Backward(const arma::Mat<eT>& input,
-                const arma::Mat<eT>& target,
-                arma::Mat<eT>& output)
-  {
-    output = arma::zeros<arma::Mat<eT> >(input.n_rows, input.n_cols);
-    for (size_t i = 0; i < input.n_cols; ++i)
-    {
-      size_t currentTarget = target(i) - 1;
-      Log::Assert(currentTarget >= 0 && currentTarget < input.n_rows,
-          "Target class out of range.");
-
-      output(currentTarget, i) = -1;
-    }
-  }
-
-  //! Get the input parameter.
-  InputDataType& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
- private:
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-}; // class NegativeLogLikelihoodLayer
-
-}; // namespace ann
-}; // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/one_hot_layer.hpp b/src/mlpack/methods/ann/layer/one_hot_layer.hpp
deleted file mode 100644
index f39dd3b..0000000
--- a/src/mlpack/methods/ann/layer/one_hot_layer.hpp
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- * @file one_hot_layer.hpp
- * @author Shangtong Zhang
- *
- * Definition of the OneHotLayer class, which implements a standard network
- * layer.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_ONE_HOT_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_ONE_HOT_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * An implementation of a one hot classification layer that can be used as
- * output layer.
- */
-class OneHotLayer
-{
- public:
-  /**
-   * Create the OneHotLayer object.
-   */
-  OneHotLayer()
-  {
-    // Nothing to do here.
-  }
-
-  /*
-   * Calculate the error using the specified input activation and the target.
-   * The error is stored into the given error parameter.
-   *
-   * @param inputActivations Input data used for evaluating the network.
-   * @param target Target data used for evaluating the network.
-   * @param error The calculated error with respect to the input activation and
-   * the given target.
-   */
-  template<typename DataType>
-  void CalculateError(const DataType& inputActivations,
-                      const DataType& target,
-                      DataType& error)
-  {
-    error = inputActivations - target;
-  }
-
-  /*
-   * Calculate the output class using the specified input activation.
-   *
-   * @param inputActivations Input data used to calculate the output class.
-   * @param output Output class of the input activation.
-   */
-  template<typename DataType>
-  void OutputClass(const DataType& inputActivations, DataType& output)
-  {
-    output = inputActivations;
-    output.zeros();
-
-    arma::uword maxIndex = 0;
-    inputActivations.max(maxIndex);
-    output(maxIndex) = 1;
-  }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& /* ar */, const unsigned int /* version */)
-  {
-    /* Nothing to do here */
-  }
-}; // class OneHotLayer
-
-//! Layer traits for the one-hot class classification layer.
-template <>
-class LayerTraits<OneHotLayer>
-{
- public:
-  static const bool IsBinary = true;
-  static const bool IsOutputLayer = true;
-  static const bool IsBiasLayer = false;
-  static const bool IsConnection = false;
-};
-
-} // namespace ann
-} // namespace mlpack
-
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/pooling_layer.hpp b/src/mlpack/methods/ann/layer/pooling_layer.hpp
deleted file mode 100644
index 7961e3d..0000000
--- a/src/mlpack/methods/ann/layer/pooling_layer.hpp
+++ /dev/null
@@ -1,267 +0,0 @@
-/**
- * @file pooling_layer.hpp
- * @author Marcus Edel
- * @author Nilay Jain
- *
- * Definition of the PoolingLayer class, which attaches various pooling
- * functions to the embedding layer.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_POOLING_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_POOLING_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/pooling_rules/mean_pooling.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of the pooling layer. The pooling layer works as a metaclass
- * which attaches various functions to the embedding layer.
- *
- * @tparam PoolingRule Pooling function used for the embedding layer.
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename PoolingRule = MeanPooling,
-    typename InputDataType = arma::cube,
-    typename OutputDataType = arma::cube
->
-class PoolingLayer
-{
- public:
-  /**
-   * Create the PoolingLayer object using the specified number of units.
-   *
-   * @param kSize Size of the pooling window.
-   * @param stride The stride of the convolution operation.
-   * @param pooling The pooling strategy.
-   */
-  PoolingLayer(const size_t kSize,
-               const size_t stride = 1,
-               PoolingRule pooling = PoolingRule()) :
-      kSize(kSize),
-      stride(stride),
-      pooling(pooling)
-  {
-    // Nothing to do here.
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Mat<eT>& input, arma::Mat<eT>& output)
-  {
-    Pooling(input, output);
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Cube<eT>& input, arma::Cube<eT>& output)
-  {
-    output = arma::zeros<arma::Cube<eT> >((input.n_rows - kSize) / stride + 1,
-        (input.n_cols - kSize) / stride + 1, input.n_slices);
-
-    for (size_t s = 0; s < input.n_slices; s++)
-      Pooling(input.slice(s), output.slice(s));
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, using 3rd-order tensors as
-   * input, calculating the function f(x) by propagating x backwards through f.
-   * Using the results from the feed forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename eT>
-  void Backward(const arma::Cube<eT>& /* unused */,
-                const arma::Cube<eT>& gy,
-                arma::Cube<eT>& g)
-  {
-    g = arma::zeros<arma::Cube<eT> >(inputParameter.n_rows,
-        inputParameter.n_cols, inputParameter.n_slices);
-
-    for (size_t s = 0; s < gy.n_slices; s++)
-    {
-      Unpooling(inputParameter.slice(s), gy.slice(s), g.slice(s));
-    }
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, using 3rd-order tensors as
-   * input, calculating the function f(x) by propagating x backwards through f.
-   * Using the results from the feed forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename eT>
-  void Backward(const arma::Cube<eT>& /* unused */,
-                const arma::Mat<eT>& gy,
-                arma::Cube<eT>& g)
-  {
-    // Generate a cube from the error matrix.
-    arma::Cube<eT> mappedError = arma::zeros<arma::cube>(outputParameter.n_rows,
-        outputParameter.n_cols, outputParameter.n_slices);
-
-    for (size_t s = 0, j = 0; s < mappedError.n_slices; s+= gy.n_cols, j++)
-    {
-      for (size_t i = 0; i < gy.n_cols; i++)
-      {
-        arma::Col<eT> temp = gy.col(i).subvec(
-            j * outputParameter.n_rows * outputParameter.n_cols,
-            (j + 1) * outputParameter.n_rows * outputParameter.n_cols - 1);
-
-        mappedError.slice(s + i) = arma::Mat<eT>(temp.memptr(),
-            outputParameter.n_rows, outputParameter.n_cols);
-      }
-    }
-
-    Backward(inputParameter, mappedError, g);
-  }
-
-  //! Get the input parameter.
-  InputDataType const& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  InputDataType const& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  InputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType const& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-    ar & data::CreateNVP(kSize, "kSize");
-    ar & data::CreateNVP(pooling, "pooling");
-    ar & data::CreateNVP(stride, "stride");
-  }
-
- private:
-  /**
-   * Apply pooling to the input and store the results.
-   *
-   * @param input The input to be apply the pooling rule.
-   * @param output The pooled result.
-   */
-  template<typename eT>
-  void Pooling(const arma::Mat<eT>& input, arma::Mat<eT>& output)
-  {
-    const size_t rStep = kSize;
-    const size_t cStep = kSize;
-
-    for (size_t j = 0, colidx = 0; j < output.n_cols; ++j, colidx += stride)
-    {
-      for (size_t i = 0, rowidx = 0; i < output.n_rows; ++i, rowidx += stride)
-      {
-        output(i, j) += pooling.Pooling(input(
-            arma::span(rowidx, rowidx + rStep - 1),
-            arma::span(colidx, colidx + cStep - 1)));
-      }
-    }
-  }
-
-  /**
-   * Apply unpooling to the input and store the results.
-   *
-   * @param input The input to be apply the unpooling rule.
-   * @param output The pooled result.
-   */
-  template<typename eT>
-  void Unpooling(const arma::Mat<eT>& input,
-                 const arma::Mat<eT>& error,
-                 arma::Mat<eT>& output)
-  {
-    const size_t rStep = input.n_rows / error.n_rows;
-    const size_t cStep = input.n_cols / error.n_cols;
-
-    arma::Mat<eT> unpooledError;
-    for (size_t j = 0; j < input.n_cols; j += cStep)
-    {
-      for (size_t i = 0; i < input.n_rows; i += rStep)
-      {
-        const arma::Mat<eT>& inputArea = input(arma::span(i, i + rStep - 1),
-            arma::span(j, j + cStep - 1));
-
-        pooling.Unpooling(inputArea, error(i / rStep, j / cStep),
-            unpooledError);
-
-        output(arma::span(i, i + rStep - 1),
-            arma::span(j, j + cStep - 1)) += unpooledError;
-      }
-    }
-  }
-
-  //! Locally-stored size of the pooling window.
-  size_t kSize;
-
-  //! Locally-stored stride value by which we move filter.
-  size_t stride;
-
-  //! Locally-stored pooling strategy.
-  PoolingRule pooling;
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-}; // class PoolingLayer
-
-//! Layer traits for the pooling layer.
-template<
-    typename PoolingRule,
-    typename InputDataType,
-    typename OutputDataType
->
-class LayerTraits<PoolingLayer<PoolingRule, InputDataType, OutputDataType> >
-{
- public:
-  static const bool IsBinary = false;
-  static const bool IsOutputLayer = false;
-  static const bool IsBiasLayer = false;
-  static const bool IsLSTMLayer = false;
-  static const bool IsConnection = true;
-};
-
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
-
diff --git a/src/mlpack/methods/ann/layer/recurrent_layer.hpp b/src/mlpack/methods/ann/layer/recurrent_layer.hpp
deleted file mode 100644
index 00ffbbe..0000000
--- a/src/mlpack/methods/ann/layer/recurrent_layer.hpp
+++ /dev/null
@@ -1,192 +0,0 @@
-/**
- * @file recurrent_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the RecurrentLayer class.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_RECURRENT_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_RECURRENT_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of the RecurrentLayer class. Recurrent layers can be used
- * similarly to feed-forward layers except that the input isn't stored in the
- * inputParameter, instead it's in stored in the recurrentParameter.
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class RecurrentLayer
-{
- public:
-  /**
-   * Create the RecurrentLayer object using the specified number of units.
-   *
-   * @param inSize The number of input units.
-   * @param outSize The number of output units.
-   */
-  RecurrentLayer(const size_t inSize, const size_t outSize) :
-      inSize(outSize),
-      outSize(outSize),
-      recurrentParameter(arma::zeros<InputDataType>(inSize, 1))
-  {
-    weights.set_size(outSize, inSize);
-  }
-
-  /**
-   * Create the RecurrentLayer object using the specified number of units.
-   *
-   * @param outSize The number of output units.
-   */
-  RecurrentLayer(const size_t outSize) :
-      inSize(outSize),
-      outSize(outSize),
-      recurrentParameter(arma::zeros<InputDataType>(outSize, 1))
-  {
-    weights.set_size(outSize, inSize);
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Mat<eT>& input, arma::Mat<eT>& output)
-  {
-    output = input + weights * recurrentParameter;
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards trough f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename InputType, typename eT>
-  void Backward(const InputType& /* unused */,
-                const arma::Mat<eT>& gy,
-                arma::mat& g)
-  {
-    g = (weights).t() * gy;
-  }
-
-  /*
-   * Calculate the gradient using the output delta and the input activation.
-   *
-   * @param input The propagated input activation.
-   * @param d The calculated error.
-   * @param g The calculated gradient.
-   */
-  template<typename InputType, typename eT, typename GradientDataType>
-  void Gradient(const InputType& /* input */,
-                const arma::Mat<eT>& d,
-                GradientDataType& g)
-  {
-    g = d * recurrentParameter.t();
-  }
-
-  //! Get the weights.
-  OutputDataType const& Weights() const { return weights; }
-  //! Modify the weights.
-  OutputDataType& Weights() { return weights; }
-
-  //! Get the input parameter.
-  InputDataType const& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the input parameter.
-  InputDataType const& RecurrentParameter() const { return recurrentParameter; }
-  //! Modify the input parameter.
-  InputDataType& RecurrentParameter() { return recurrentParameter; }
-
-  //! Get the output parameter.
-  OutputDataType const& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType const& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  //! Get the gradient.
-  OutputDataType const& Gradient() const { return gradient; }
-  //! Modify the gradient.
-  OutputDataType& Gradient() { return gradient; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-    ar & data::CreateNVP(recurrentParameter, "recurrentParameter");
-    ar & data::CreateNVP(weights, "weights");
-  }
-
- private:
-  //! Locally-stored number of input units.
-  size_t inSize;
-
-  //! Locally-stored number of output units.
-  size_t outSize;
-
-  //! Locally-stored weight object.
-  OutputDataType weights;
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored gradient object.
-  OutputDataType gradient;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-
-  //! Locally-stored recurrent parameter object.
-  InputDataType recurrentParameter;
-}; // class RecurrentLayer
-
-//! Layer traits for the recurrent layer.
-template<typename InputDataType, typename OutputDataType
->
-class LayerTraits<RecurrentLayer<InputDataType, OutputDataType> >
-{
- public:
-  static const bool IsBinary = false;
-  static const bool IsOutputLayer = false;
-  static const bool IsBiasLayer = false;
-  static const bool IsLSTMLayer = false;
-  static const bool IsConnection = true;
-};
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/reinforce_normal_layer.hpp b/src/mlpack/methods/ann/layer/reinforce_normal_layer.hpp
deleted file mode 100644
index c033a51..0000000
--- a/src/mlpack/methods/ann/layer/reinforce_normal_layer.hpp
+++ /dev/null
@@ -1,139 +0,0 @@
-/**
- * @file reinforce_normal_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the ReinforceNormalLayer class, which implements the REINFORCE
- * algorithm for the normal distribution.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_REINFORCE_NORMAL_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_REINFORCE_NORMAL_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of the reinforce normal layer. The reinforce normal layer
- * implements the REINFORCE algorithm for the normal distribution.
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class ReinforceNormalLayer
-{
- public:
-  /**
-   * Create the ReinforceNormalLayer object.
-   *
-   * @param stdev Standard deviation used during the forward and backward pass.
-   */
-  ReinforceNormalLayer(const double stdev) : stdev(stdev)
-  {
-    // Nothing to do here.
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Mat<eT>& input, arma::Mat<eT>& output)
-  {
-    if (!deterministic)
-    {
-      // Multiply by standard deviations and re-center the means to the mean.
-      output = arma::randn<arma::Mat<eT> >(input.n_rows, input.n_cols) *
-          stdev + input;
-    }
-    else
-    {
-      // Use maximum a posteriori.
-      output = input;
-    }
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards through f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename DataType>
-  void Backward(const DataType& input,
-                const DataType& /* gy */,
-                DataType& g)
-  {
-    g = (input - inputParameter) / std::pow(stdev, 2.0);
-
-    // Multiply by reward and multiply by -1.
-    g *= -reward;
-  }
-
-
-  //! Get the input parameter.
-  InputDataType& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  //! Get the value of the deterministic parameter.
-  bool Deterministic() const { return deterministic; }
-  //! Modify the value of the deterministic parameter.
-  bool& Deterministic() { return deterministic; }
-
-  //! Get the value of the reward parameter.
-  double Reward() const { return reward; }
-  //! Modify the value of the deterministic parameter.
-  double& Reward() { return reward; }
-
- private:
-  //! Standard deviation used during the forward and backward pass.
-  const double stdev;
-
-  //! Locally-stored reward parameter.
-  double reward;
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-
-  //! If true use maximum a posteriori during the forward pass.
-  bool deterministic;
-}; // class ReinforceNormalLayer
-
-}; // namespace ann
-}; // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/softmax_layer.hpp b/src/mlpack/methods/ann/layer/softmax_layer.hpp
deleted file mode 100644
index 7b38de9..0000000
--- a/src/mlpack/methods/ann/layer/softmax_layer.hpp
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * @file softmax_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the SoftmaxLayer class.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_SOFTMAX_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_SOFTMAX_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of the softmax layer. The softmax loss layer computes the
- * multinomial logistic loss of the softmax of its inputs.
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class SoftmaxLayer
-{
- public:
-  /**
-   * Create the SoftmaxLayer object.
-   */
-  SoftmaxLayer()
-  {
-    // Nothing to do here.
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Mat<eT>& input, arma::Mat<eT>& output)
-  {
-    output = arma::trunc_exp(input -
-        arma::repmat(arma::max(input), input.n_rows, 1));
-    output /= arma::accu(output);
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards trough f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename eT>
-  void Backward(const arma::Mat<eT>& /* unused */,
-                const arma::Mat<eT>& gy,
-                arma::Mat<eT>& g)
-  {
-    g = gy;
-  }
-
-  //! Get the input parameter.
-  InputDataType const& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType const& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  InputDataType const& Delta() const { return delta; }
-  //! Modify the delta.
-  InputDataType& Delta() { return delta; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& /* ar */, const unsigned int /* version */)
-  {
-    /* Nothing to do here */
-  }
-
- private:
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-}; // class SoftmaxLayer
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/sparse_bias_layer.hpp b/src/mlpack/methods/ann/layer/sparse_bias_layer.hpp
deleted file mode 100644
index 8b14ecb..0000000
--- a/src/mlpack/methods/ann/layer/sparse_bias_layer.hpp
+++ /dev/null
@@ -1,177 +0,0 @@
-/**
- * @file sparse_bias_layer.hpp
- * @author Tham Ngap Wei
- *
- * Definition of the SparseBiasLayer class.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_SPARSE_BIAS_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_SPARSE_BIAS_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * An implementation of a bias layer design for sparse autoencoder.
- * The BiasLayer class represents a single layer of a neural network.
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class SparseBiasLayer
-{
- public:
-  /**
-   * Create the SparseBiasLayer object using the specified number of units and
-   * bias parameter.
-   *
-   * @param outSize The number of output units.
-   * @param batchSize The batch size used to train the network.
-   * @param bias The bias value.
-   */
-  SparseBiasLayer(const size_t outSize, const size_t batchSize) :
-      outSize(outSize),
-      batchSize(batchSize)
-  {
-    weights.set_size(outSize, 1);
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Mat<eT>& input, arma::Mat<eT>& output)
-  {
-    output = input + arma::repmat(weights, 1, input.n_cols);
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards trough f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename DataType, typename ErrorType>
-  void Backward(const DataType& /* unused */,
-                const ErrorType& gy,
-                ErrorType& g)
-  {
-    g = gy;
-  }
-
-  /*
-   * Calculate the gradient using the output delta and the bias.
-   *
-   * @param input The propagated input.
-   * @param d The calculated error.
-   * @param g The calculated gradient.
-   */
-  template<typename InputType, typename eT>
-  void Gradient(const InputType& /* input */,
-                const arma::Mat<eT>& d,
-                InputDataType& g)
-  {
-    g = arma::sum(d, 1) / static_cast<typename InputDataType::value_type>(
-        batchSize);
-  }
-
-  //! Get the batch size
-  size_t BatchSize() const { return batchSize; }
-  //! Modify the batch size
-  size_t& BatchSize() { return batchSize; }
-
-  //! Get the weights.
-  InputDataType const& Weights() const { return weights; }
-  //! Modify the weights.
-  InputDataType& Weights() { return weights; }
-
-  //! Get the input parameter.
-  InputDataType const& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType const& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType const& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  //! Get the gradient.
-  InputDataType const& Gradient() const { return gradient; }
-  //! Modify the gradient.
-  InputDataType& Gradient() { return gradient; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-    ar & data::CreateNVP(weights, "weights");
-    ar & data::CreateNVP(batchSize, "batchSize");
-  }
-
- private:
-  //! Locally-stored number of output units.
-  size_t outSize;
-
-  //! The batch size used to train the network.
-  size_t batchSize;
-
-  //! Locally-stored weight object.
-  InputDataType weights;
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored gradient object.
-  InputDataType gradient;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-}; // class SparseBiasLayer
-
-//! Layer traits for the bias layer.
-template<typename InputDataType, typename OutputDataType
->
-class LayerTraits<SparseBiasLayer<InputDataType, OutputDataType> >
-{
- public:
-  static const bool IsBinary = false;
-  static const bool IsOutputLayer = false;
-  static const bool IsBiasLayer = true;
-  static const bool IsLSTMLayer = false;
-  static const bool IsConnection = true;
-};
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/sparse_input_layer.hpp b/src/mlpack/methods/ann/layer/sparse_input_layer.hpp
deleted file mode 100644
index 0e4aa54..0000000
--- a/src/mlpack/methods/ann/layer/sparse_input_layer.hpp
+++ /dev/null
@@ -1,180 +0,0 @@
-/**
- * @file sparse_input_layer.hpp
- * @author Tham Ngap Wei
- *
- * Definition of the sparse input class which serve as the first layer
- * of the sparse autoencoder
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_SPARSE_INPUT_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_SPARSE_INPUT_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-
-#include <type_traits>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of the SparseInputLayer. The SparseInputLayer class represents
- * the first layer of sparse autoencoder
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
-    >
-class SparseInputLayer
-{
- public:
-  /**
-   * Create the SparseInputLayer object using the specified number of units.
-   *
-   * @param inSize The number of input units.
-   * @param outSize The number of output units.
-   * @param lambda L2-regularization parameter.
-   */
-  SparseInputLayer(const size_t inSize,
-                   const size_t outSize,
-                   const double lambda = 0.0001) :
-    inSize(inSize),
-    outSize(outSize),
-    lambda(lambda)
-  {
-    weights.set_size(outSize, inSize);
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Mat<eT>& input, arma::Mat<eT>& output)
-  {
-    output = weights * input;
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards trough f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename InputType, typename eT>
-  void Backward(const InputType& /* unused */,
-                const arma::Mat<eT>& gy,
-                arma::Mat<eT>& g)
-  {
-    g = gy;
-  }
-
-  /*
-   * Calculate the gradient using the output delta and the input activation.
-   *
-   * @param input The propagated input.
-   * @param d The calculated error.
-   * @param g The calculated gradient.
-   */
-  template<typename InputType, typename eT, typename GradientDataType>
-  void Gradient(const InputType& input,
-                const arma::Mat<eT>& d,
-                GradientDataType& g)
-  {
-    g = d * input.t() / static_cast<typename InputType::value_type>(
-        input.n_cols) + lambda * weights;
-  }
-
-  //! Get the weights.
-  OutputDataType const& Weights() const { return weights; }
-  //! Modify the weights.
-  OutputDataType& Weights() { return weights; }
-
-  //! Get the input parameter.
-  InputDataType const& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType const& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType const& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  //! Get the gradient.
-  OutputDataType const& Gradient() const { return gradient; }
-  //! Modify the gradient.
-  OutputDataType& Gradient() { return gradient; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-    ar & data::CreateNVP(weights, "weights");
-    ar & data::CreateNVP(lambda, "lambda");
-  }
-
- private:
-  //! Locally-stored number of input units.
-  size_t inSize;
-
-  //! Locally-stored number of output units.
-  size_t outSize;
-
-  //! L2-regularization parameter.
-  double lambda;
-
-  //! Locally-stored weight object.
-  OutputDataType weights;
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored gradient object.
-  OutputDataType gradient;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-}; // class SparseInputLayer
-
-//! Layer traits for the SparseInputLayer.
-template<typename InputDataType, typename OutputDataType
->
-class LayerTraits<SparseInputLayer<InputDataType, OutputDataType> >
-{
-public:
-  static const bool IsBinary = false;
-  static const bool IsOutputLayer = false;
-  static const bool IsBiasLayer = false;
-  static const bool IsLSTMLayer = false;
-  static const bool IsConnection = true;
-};
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/sparse_output_layer.hpp b/src/mlpack/methods/ann/layer/sparse_output_layer.hpp
deleted file mode 100644
index 371c200..0000000
--- a/src/mlpack/methods/ann/layer/sparse_output_layer.hpp
+++ /dev/null
@@ -1,227 +0,0 @@
-/**
- * @file sparse_output_layer.hpp
- * @author Tham Ngap Wei
- *
- * This is the fourth layer of sparse autoencoder.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_SPARSE_OUTPUT_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_SPARSE_OUTPUT_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of the SparseOutputLayer class. The SparseOutputLayer class
- * represents  the fourth layer of the sparse autoencoder.
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::mat,
-    typename OutputDataType = arma::mat
->
-class SparseOutputLayer
-{
- public:
-  /**
-   * Create the SparseLayer object using the specified number of units.
-   *
-   * @param inSize The number of input units.
-   * @param outSize The number of output units.
-   */
-  SparseOutputLayer(const size_t inSize,
-                    const size_t outSize,
-                    const double lambda = 0.0001,
-                    const double beta = 3,
-                    const double rho = 0.01) :
-    inSize(inSize),
-    outSize(outSize),
-    lambda(lambda),
-    beta(beta),
-    rho(rho)
-  {
-    weights.set_size(outSize, inSize);
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data used for evaluating the specified function.
-   * @param output Resulting output activation.
-   */
-  template<typename eT>
-  void Forward(const arma::Mat<eT>& input, arma::Mat<eT>& output)
-  {
-    output = weights * input;
-    // Average activations of the hidden layer.
-    rhoCap = arma::sum(input, 1) / static_cast<double>(input.n_cols);
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards trough f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename InputType, typename eT>
-  void Backward(const InputType& input,
-                const arma::Mat<eT>& gy,
-                arma::Mat<eT>& g)
-  {
-    const arma::mat klDivGrad = beta * (-(rho / rhoCap) + (1 - rho) /
-          (1 - rhoCap));
-
-    // NOTE: if the armadillo version high enough, find_nonfinite can prevents
-    // overflow value:
-    // klDivGrad.elem(arma::find_nonfinite(klDivGrad)).zeros();
-    g = weights.t() * gy +
-        arma::repmat(klDivGrad, 1, input.n_cols);
-  }
-
-  /*
-   * Calculate the gradient using the output delta and the input activation.
-   *
-   * @param input The propagated input.
-   * @param d The calculated error.
-   * @param g The calculated gradient.
-   */
-  template<typename InputType, typename eT>
-  void Gradient(const InputType input, const arma::Mat<eT>& d, arma::Mat<eT>& g)
-  {
-    g = d * input.t() / static_cast<typename InputType::value_type>(
-        input.n_cols) + lambda * weights;
-  }
-
-  //! Sets the KL divergence parameter.
-  void Beta(const double b)
-  {
-    beta = b;
-  }
-
-  //! Gets the KL divergence parameter.
-  double Beta() const
-  {
-    return beta;
-  }
-
-  //! Sets the sparsity parameter.
-  void Rho(const double r)
-  {
-    rho = r;
-  }
-
-  //! Gets the sparsity parameter.
-  double Rho() const
-  {
-    return rho;
-  }
-
-  //! Get the weights.
-  OutputDataType const& Weights() const { return weights; }
-  //! Modify the weights.
-  OutputDataType& Weights() { return weights; }
-
-  //! Get the RhoCap.
-  OutputDataType const& RhoCap() const { return rhoCap; }
-  //! Modify the RhoCap.
-  OutputDataType& RhoCap() { return rhoCap; }
-
-  //! Get the input parameter.
-  InputDataType const& InputParameter() const { return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType const& OutputParameter() const { return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType const& Delta() const { return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  //! Get the gradient.
-  OutputDataType const& Gradient() const { return gradient; }
-  //! Modify the gradient.
-  OutputDataType& Gradient() { return gradient; }
-
-  /**
-   * Serialize the layer.
-   */
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */)
-  {
-    ar & data::CreateNVP(weights, "weights");
-    ar & data::CreateNVP(lambda, "lambda");
-    ar & data::CreateNVP(beta, "beta");
-    ar & data::CreateNVP(rho, "rho");
-  }
-
- private:
-  //! Locally-stored number of input units.
-  size_t inSize;
-
-  //! Locally-stored number of output units.
-  size_t outSize;
-
-  //! L2-regularization parameter.
-  double lambda;
-
-  //! KL divergence parameter.
-  double beta;
-
-  //! Sparsity parameter.
-  double rho;
-
-  //! Locally-stored weight object.
-  OutputDataType weights;
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored gradient object.
-  OutputDataType gradient;
-
-  //! Average activations of the hidden layer.
-  OutputDataType rhoCap;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-}; // class SparseOutputLayer
-
-//! Layer traits for the SparseOutputLayer.
-template<typename InputDataType, typename OutputDataType
-    >
-class LayerTraits<SparseOutputLayer<InputDataType, OutputDataType> >
-{
-public:
-  static const bool IsBinary = false;
-  static const bool IsOutputLayer = false;
-  static const bool IsBiasLayer = false;
-  static const bool IsLSTMLayer = false;
-  static const bool IsConnection = true;
-};
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/vr_class_reward_layer.hpp b/src/mlpack/methods/ann/layer/vr_class_reward_layer.hpp
deleted file mode 100644
index 393dbcd..0000000
--- a/src/mlpack/methods/ann/layer/vr_class_reward_layer.hpp
+++ /dev/null
@@ -1,171 +0,0 @@
-/**
- * @file vr_class_reward_layer.hpp
- * @author Marcus Edel
- *
- * Definition of the VRClassRewardLayer class, which implements the variance
- * reduced classification reinforcement layer.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_LAYER_VR_CLASS_REWARD_LAYER_HPP
-#define MLPACK_METHODS_ANN_LAYER_VR_CLASS_REWARD_LAYER_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of the variance reduced classification reinforcement layer.
- * This layer is meant to be used in combination with the reinforce normal layer
- * (ReinforceNormalLayer), which expects that an reward:
- * (1 for success, 0 otherwise).
- *
- * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
- *         arma::sp_mat or arma::cube).
- */
-template <
-    typename InputDataType = arma::field<arma::mat>,
-    typename OutputDataType = arma::field<arma::mat>
->
-class VRClassRewardLayer
-{
- public:
-  /**
-   * Create the VRClassRewardLayer object.
-   *
-   * @param scale Parameter used to scale the reward.
-   * @param sizeAverage Take the average over all batches.
-   */
-  VRClassRewardLayer(const double scale = 1, const bool sizeAverage = true) :
-      scale(scale),
-      sizeAverage(sizeAverage)
-  {
-    // Nothing to do here.
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data that contains the log-probabilities for each class.
-   * @param target The target vector, that contains the class index in the range
-   *        between 1 and the number of classes.
-   */
-  template<typename eT>
-  double Forward(const arma::field<arma::Mat<eT> >& input,
-                 const arma::Mat<eT>& target)
-  {
-    return Forward(input(0, 0), target);
-  }
-
-  /**
-   * Ordinary feed forward pass of a neural network, evaluating the function
-   * f(x) by propagating the activity forward through f.
-   *
-   * @param input Input data that contains the log-probabilities for each class.
-   * @param target The target vector, that contains the class index in the range
-   *        between 1 and the number of classes.
-   */
-  template<typename eT>
-  double Forward(const arma::Mat<eT>& input, const arma::Mat<eT>& target)
-  {
-    reward = 0;
-    arma::uword index = 0;
-
-    for (size_t i = 0; i < input.n_cols; i++)
-    {
-      input.unsafe_col(i).max(index);
-      reward = ((index + 1) == target(i)) * scale;
-    }
-
-    if (sizeAverage)
-    {
-      return -reward / input.n_cols;
-    }
-
-    return -reward;
-  }
-
-  /**
-   * Ordinary feed backward pass of a neural network, calculating the function
-   * f(x) by propagating x backwards through f. Using the results from the feed
-   * forward pass.
-   *
-   * @param input The propagated input activation.
-   * @param gy The backpropagated error.
-   * @param g The calculated gradient.
-   */
-  template<typename eT>
-  double Backward(const arma::field<arma::Mat<eT> >& input,
-                const arma::Mat<eT>& /* gy */,
-                arma::field<arma::Mat<eT> >& g)
-  {
-    g = arma::field<arma::Mat<eT> >(2, 1);
-    g(0, 0) = arma::zeros(input(0, 0).n_rows, input(0, 0).n_cols);
-
-    double vrReward = reward - arma::as_scalar(input(1, 0));
-    if (sizeAverage)
-    {
-      vrReward /= input(0, 0).n_cols;
-    }
-
-    const double norm = sizeAverage ? 2.0 / input.n_cols : 2.0;
-
-    g(1, 0) = norm * (input(1, 0) - reward);
-
-    return vrReward;
-  }
-
-  //! Get the input parameter.
-  InputDataType& InputParameter() const {return inputParameter; }
-  //! Modify the input parameter.
-  InputDataType& InputParameter() { return inputParameter; }
-
-  //! Get the output parameter.
-  OutputDataType& OutputParameter() const {return outputParameter; }
-  //! Modify the output parameter.
-  OutputDataType& OutputParameter() { return outputParameter; }
-
-  //! Get the delta.
-  OutputDataType& Delta() const {return delta; }
-  //! Modify the delta.
-  OutputDataType& Delta() { return delta; }
-
-  //! Get the value of the deterministic parameter.
-  bool Deterministic() const { return deterministic; }
-  //! Modify the value of the deterministic parameter.
-  bool& Deterministic() { return deterministic; }
-
- private:
-  //! Locally-stored value to scale the reward.
-  const double scale;
-
-  //! If true take the average over all batches.
-  const bool sizeAverage;
-
-  //! Locally stored reward parameter.
-  double reward;
-
-  //! Locally-stored delta object.
-  OutputDataType delta;
-
-  //! Locally-stored input parameter object.
-  InputDataType inputParameter;
-
-  //! Locally-stored output parameter object.
-  OutputDataType outputParameter;
-
-  //! If true dropout and scaling is disabled, see notes above.
-  bool deterministic;
-}; // class VRClassRewardLayer
-
-}; // namespace ann
-}; // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/network_util.hpp b/src/mlpack/methods/ann/network_util.hpp
deleted file mode 100644
index 109e4fe..0000000
--- a/src/mlpack/methods/ann/network_util.hpp
+++ /dev/null
@@ -1,247 +0,0 @@
-/**
- * @file network_util.hpp
- * @author Marcus Edel
- *
- * Neural network utilities.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_NETWORK_UTIL_HPP
-#define MLPACK_METHODS_ANN_NETWORK_UTIL_HPP
-
-#include <mlpack/prereqs.hpp>
-
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-
-/**
- * Neural network utility functions.
- */
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Auxiliary function to get the number of weights of the specified network.
- *
- * @param network The network used for specifying the number of weights.
- * @return The number of weights.
- */
-template<size_t I = 0, typename... Tp>
-typename std::enable_if<I < sizeof...(Tp), size_t>::type
-NetworkSize(std::tuple<Tp...>& network);
-
-template<size_t I, typename... Tp>
-typename std::enable_if<I == sizeof...(Tp), size_t>::type
-NetworkSize(std::tuple<Tp...>& network);
-
-/**
- * Auxiliary function to get the number of weights of the specified layer.
- *
- * @param layer The layer used for specifying the number of weights.
- * @param output The layer output parameter.
- * @return The number of weights.
- */
-template<typename T, typename P>
-typename std::enable_if<
-    !HasWeightsCheck<T, P&(T::*)()>::value, size_t>::type
-LayerSize(T& layer, P& output);
-
-template<typename T, typename P>
-typename std::enable_if<
-    HasWeightsCheck<T, P&(T::*)()>::value, size_t>::type
-LayerSize(T& layer, P& output);
-
-/**
- * Auxiliary function to set the weights of the specified network.
- *
- * @param weights The weights used to set the weights of the network.
- * @param network The network used to set the weights.
- * @param offset The memory offset of the weights.
- */
-template<size_t I = 0, typename... Tp>
-typename std::enable_if<I < sizeof...(Tp), void>::type
-NetworkWeights(arma::mat& weights,
-               std::tuple<Tp...>& network,
-               size_t offset = 0);
-
-template<size_t I, typename... Tp>
-typename std::enable_if<I == sizeof...(Tp), void>::type
-NetworkWeights(arma::mat& weights,
-               std::tuple<Tp...>& network,
-               size_t offset = 0);
-
-/**
- * Auxiliary function to set the weights of the specified layer.
- *
- * @param layer The layer used to set the weights.
- * @param weights The weights used to set the weights of the layer.
- * @param offset The memory offset of the weights.
- * @param output The output parameter of the layer.
- * @return The number of weights.
- */
-template<typename T>
-typename std::enable_if<
-    HasWeightsCheck<T, arma::mat&(T::*)()>::value, size_t>::type
-LayerWeights(T& layer, arma::mat& weights, size_t offset, arma::mat& output);
-
-template<typename T>
-typename std::enable_if<
-    HasWeightsCheck<T, arma::cube&(T::*)()>::value, size_t>::type
-LayerWeights(T& layer, arma::mat& weights, size_t offset, arma::cube& output);
-
-template<typename T, typename P>
-typename std::enable_if<
-    !HasWeightsCheck<T, P&(T::*)()>::value, size_t>::type
-LayerWeights(T& layer, arma::mat& weights, size_t offset, P& output);
-
-/**
- * Auxiliary function to set the gradients of the specified network.
- *
- * @param gradients The gradients used to set the gradient of the network.
- * @param network The network used to set the gradients.
- * @param offset The memory offset of the gradients.
- * return The number of gradients.
- */
-template<size_t I = 0, typename... Tp>
-typename std::enable_if<I < sizeof...(Tp), void>::type
-NetworkGradients(arma::mat& gradients,
-               std::tuple<Tp...>& network,
-               size_t offset = 0);
-
-template<size_t I, typename... Tp>
-typename std::enable_if<I == sizeof...(Tp), void>::type
-NetworkGradients(arma::mat& gradients,
-               std::tuple<Tp...>& network,
-               size_t offset = 0);
-
-/**
- * Auxiliary function to set the gradients of the specified layer.
- *
- * @param layer The layer used to set the gradients.
- * @param gradients The gradients used to set the gradient of the layer.
- * @param offset The memory offset of the gradients.
- * @param output The output parameter of the layer.
- * @return The number of gradients.
- */
-template<typename T>
-typename std::enable_if<
-    HasGradientCheck<T, arma::mat&(T::*)()>::value, size_t>::type
-LayerGradients(T& layer,
-               arma::mat& gradients,
-               size_t offset,
-               arma::mat& output);
-
-template<typename T>
-typename std::enable_if<
-    HasGradientCheck<T, arma::cube&(T::*)()>::value, size_t>::type
-LayerGradients(T& layer,
-               arma::mat& gradients,
-               size_t offset,
-               arma::cube& output);
-
-template<typename T, typename P>
-typename std::enable_if<
-    !HasGradientCheck<T, P&(T::*)()>::value, size_t>::type
-LayerGradients(T& layer, arma::mat& gradients, size_t offset, P& output);
-
-/**
- * Auxiliary function to get the input size of the specified network.
- *
- * @param network The network used for specifying the input size.
- * @return The input size.
- */
-template<size_t I = 0, typename... Tp>
-typename std::enable_if<I < sizeof...(Tp), size_t>::type
-NetworkInputSize(std::tuple<Tp...>& network);
-
-template<size_t I, typename... Tp>
-typename std::enable_if<I == sizeof...(Tp), size_t>::type
-NetworkInputSize(std::tuple<Tp...>& network);
-
-/**
- * Auxiliary function to get the input size of the specified layer.
- *
- * @param layer The layer used for specifying the input size.
- * @param output The layer output parameter.
- * @return The input size.
- */
-template<typename T, typename P>
-typename std::enable_if<
-    !HasWeightsCheck<T, P&(T::*)()>::value, size_t>::type
-LayerInputSize(T& layer, P& output);
-
-template<typename T, typename P>
-typename std::enable_if<
-    HasWeightsCheck<T, P&(T::*)()>::value, size_t>::type
-LayerInputSize(T& layer, P& output);
-
-/**
- * Auxiliary function to set the weights of the specified network using a given
- * initialize rule.
- *
- * @param initializeRule The rule used to initialize the network weights.
- * @param weights The weights used to set the weights of the network.
- * @param network The network used to set the weights.
- * @param offset The memory offset of the weights.
- */
-template<size_t I = 0, typename InitializationRuleType, typename... Tp>
-typename std::enable_if<I < sizeof...(Tp), void>::type
-NetworkWeights(InitializationRuleType& initializeRule,
-               arma::mat& weights,
-               std::tuple<Tp...>& network,
-               size_t offset = 0);
-
-template<size_t I, typename InitializationRuleType, typename... Tp>
-typename std::enable_if<I == sizeof...(Tp), void>::type
-NetworkWeights(InitializationRuleType& initializeRule,
-               arma::mat& weights,
-               std::tuple<Tp...>& network,
-               size_t offset = 0);
-
-/**
- * Auxiliary function to set the weights of the specified layer using the given
- * initialize rule.
- *
- * @param initializeRule The rule used to initialize the layer weights.
- * @param layer The layer used to set the weights.
- * @param weights The weights used to set the weights of the layer.
- * @param offset The memory offset of the weights.
- * @param output The output parameter of the layer.
- * @return The number of weights.
- */
-template<typename InitializationRuleType, typename T>
-typename std::enable_if<
-    HasWeightsCheck<T, arma::mat&(T::*)()>::value, size_t>::type
-LayerWeights(InitializationRuleType& initializeRule,
-             T& layer,
-             arma::mat& weights,
-             size_t offset,
-             arma::mat& output);
-
-template<typename InitializationRuleType, typename T>
-typename std::enable_if<
-    HasWeightsCheck<T, arma::cube&(T::*)()>::value, size_t>::type
-LayerWeights(InitializationRuleType& initializeRule,
-             T& layer,
-             arma::mat& weights,
-             size_t offset,
-             arma::cube& output);
-
-template<typename InitializationRuleType, typename T, typename P>
-typename std::enable_if<
-    !HasWeightsCheck<T, P&(T::*)()>::value, size_t>::type
-LayerWeights(InitializationRuleType& initializeRule,
-             T& layer,
-             arma::mat& weights,
-             size_t offset,
-             P& output);
-
-} // namespace ann
-} // namespace mlpack
-
-// Include implementation.
-#include "network_util_impl.hpp"
-
-#endif
diff --git a/src/mlpack/methods/ann/performance_functions/cee_function.hpp b/src/mlpack/methods/ann/performance_functions/cee_function.hpp
deleted file mode 100644
index 11098e0..0000000
--- a/src/mlpack/methods/ann/performance_functions/cee_function.hpp
+++ /dev/null
@@ -1,74 +0,0 @@
-/**
- * @file cee_function.hpp
- * @author Marcus Edel
- *
- * Definition and implementation of the cross-entropy error performance
- * function.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_PERFORMANCE_FUNCTIONS_CEE_FUNCTION_HPP
-#define MLPACK_METHODS_ANN_PERFORMANCE_FUNCTIONS_CEE_FUNCTION_HPP
-
-#include <mlpack/prereqs.hpp>
-#include <mlpack/methods/ann/layer/linear_layer.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * The cross-entropy error performance function measures the network's
- * performance according to the cross entropy errors. The log in the cross-
- * entropy take sinto account the closeness of a prediction and is a more
- * granular way to calculate the error.
- *
- * @tparam Layer The layer that is connected with the output layer.
- */
-template<
-    class Layer = LinearLayer< >
->
-class CrossEntropyErrorFunction
-{
- public:
-  /**
-   * Computes the cross-entropy error function..
-   *
-   * @param network Network type of FFN, CNN or RNN
-   * @param target Target data.
-   * @param error same as place holder
-   * @return sum of squared errors.
-   */
-  template<typename DataType, typename... Tp>
-  static double Error(const std::tuple<Tp...>& network,
-                      const DataType& target, const DataType &error)
-  {
-    return Error(std::get<sizeof...(Tp) - 1>(network).OutputParameter(),
-                 target, error);
-  }
-
-  /**
-   * Computes the cross-entropy error function.
-   *
-   * @param input Input data.
-   * @param target Target data.
-   * @return cross-entropy error.
-   */
-  template<typename DataType>
-  static double Error(const DataType& input, const DataType& target, const DataType&)
-  {
-    if (LayerTraits<Layer>::IsBinary)
-      return -arma::dot(arma::trunc_log(arma::abs(target - input)), target);
-
-    return -arma::dot(arma::trunc_log(input), target);
-  }
-
-}; // class CrossEntropyErrorFunction
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/performance_functions/mse_function.hpp b/src/mlpack/methods/ann/performance_functions/mse_function.hpp
deleted file mode 100644
index 76322b5..0000000
--- a/src/mlpack/methods/ann/performance_functions/mse_function.hpp
+++ /dev/null
@@ -1,61 +0,0 @@
-/**
- * @file mse_function.hpp
- * @author Marcus Edel
- *
- * Definition and implementation of the mean squared error performance function.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_PERFORMANCE_FUNCTIONS_MSE_FUNCTION_HPP
-#define MLPACK_METHODS_ANN_PERFORMANCE_FUNCTIONS_MSE_FUNCTION_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * The mean squared error performance function measures the network's
- * performance according to the mean of squared errors.
- */
-class MeanSquaredErrorFunction
-{
-  public:
-  /**
-   * Computes the mean squared error function.
-   *
-   * @param network Network type of FFN, CNN or RNN
-   * @param target Target data.
-   * @param error same as place holder
-   * @return sum of squared errors.
-   */
-  template<typename DataType, typename... Tp>
-  static double Error(const std::tuple<Tp...>& network,
-                      const DataType& target, const DataType &error)
-  {
-    return Error(std::get<sizeof...(Tp) - 1>(network).OutputParameter(),
-                 target, error);
-  }
-
-  /**
-   * Computes the mean squared error function.
-   *
-   * @param input Input data.
-   * @param target Target data.
-   * @return mean of squared errors.
-   */
-  template<typename DataType>
-  static double Error(const DataType& input, const DataType& target, const DataType&)
-  {
-    return arma::mean(arma::mean(arma::square(target - input)));
-  }
-
-}; // class MeanSquaredErrorFunction
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/performance_functions/sparse_function.hpp b/src/mlpack/methods/ann/performance_functions/sparse_function.hpp
deleted file mode 100644
index 4586470..0000000
--- a/src/mlpack/methods/ann/performance_functions/sparse_function.hpp
+++ /dev/null
@@ -1,141 +0,0 @@
-/**
- * @file sparse_function.hpp
- * @author Siddharth Agrawal
- * @author Tham Ngap Wei
- *
- * Definition and implementation of the sparse performance function.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-
-#ifndef MLPACK_METHODS_ANN_PERFORMANCE_FUNCTIONS_SPARSE_FUNCTION_HPP
-#define MLPACK_METHODS_ANN_PERFORMANCE_FUNCTIONS_SPARSE_FUNCTION_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * The cost function design for the sparse autoencoder.
- */
-template<typename DataType = arma::mat>
-class SparseErrorFunction
-{
- public:
-  /**
-   * Computes the cost of sparse autoencoder.
-   *
-   * @param lambda L2-regularization parameter.
-   * @param beta KL divergence parameter.
-   * @param rho Sparsity parameter.
-   */
-  SparseErrorFunction(const double lambda = 0.0001,
-                      const double beta = 3,
-                      const double rho = 0.01) :
-    lambda(lambda), beta(beta), rho(rho)
-  {
-    // Nothing to do here.
-  }
-
-  SparseErrorFunction(SparseErrorFunction &&layer) noexcept
-  {
-    *this = std::move(layer);
-  }
-
-  SparseErrorFunction& operator=(SparseErrorFunction &&layer) noexcept
-  {
-    lambda = layer.lambda;
-    beta = layer.beta;
-    rho = layer.rho;
-
-    return *this;
-  }
-
-  //! Get the KL divergence parameter.
-  double Beta() const { return beta; }
-  //! Modify the KL divergence parameter.
-  void Beta(double value) { beta = value;}
-
-  //! Get the L2-regularization parameter.
-  double Lambda() const { return lambda; }
-  //! Modify the L2-regularization parameter.
-  void Lambda(double value) { lambda = value;}
-
-  //! Get the sparsity parameter.
-  double Rho() const { return rho; }
-  //! Modify the sparsity parameter.
-  void Rho(double value) { rho = value;}
-
-  /**
-   * Computes the cost of sparse autoencoder.
-   *
-   * @param network Network type of FFN, CNN or RNN
-   * @param target Target data.
-   * @param error different between output and the input
-   * @return sum of squared errors.
-   */
-  template<typename InType, typename Tp>
-  double Error(const Tp& network,
-               const InType& target, const InType &error)
-  {
-    return Error(std::get<0>(network).Weights(), std::get<3>(network).Weights(),
-        std::get<3>(network).RhoCap(), target, error);
-  }
-
-  /**
-   * Computes the cost of sparse autoencoder.
-   *
-   * @param w1 weights of hidden layer
-   * @param w2 weights of output layer
-   * @param rhoCap Average activations of the hidden layer
-   * @param target Target data.
-   * @param error different between output and the input
-   * @return sum of squared errors.
-   */
-  template<typename InType>
-  double Error(const InType& w1, const InType& w2,
-               const InType& rhoCap, const InType& target,
-               const InType& error)
-  {
-    // Calculate squared L2-norms of w1 and w2.
-    const double wL2SquaredNorm =
-        arma::accu(w1 % w1) + arma::accu(w2 % w2);
-
-    // Calculate the reconstruction error, the regularization cost and the KL
-    // divergence cost terms. 'sumOfSquaresError' is the average squared l2-norm
-    // of the reconstructed data difference. 'weightDecay' is the squared l2-norm
-    // of the weights w1 and w2. 'klDivergence' is the cost of the hidden layer
-    // activations not being low. It is given by the following formula:
-    // KL = sum_over_hSize(rho*log(rho/rhoCaq) + (1-rho)*log((1-rho)/(1-rhoCap)))
-    const double sumOfSquaresError =
-        0.5 * arma::accu(error % error) / target.n_cols;
-
-    const double weightDecay = 0.5 * lambda * wL2SquaredNorm;
-    const double klDivergence =
-        beta * arma::accu(rho * arma::trunc_log(rho / rhoCap) + (1 - rho) *
-                          arma::trunc_log((1 - rho) / (1 - rhoCap)));
-
-    // The cost is the sum of the terms calculated above.
-    return sumOfSquaresError + weightDecay + klDivergence;
-  }
-
- private:
-  //! Locally stored L2-regularization parameter.
-  double lambda;
-
-  //! Locally stored KL divergence parameter.
-  double beta;
-
-  //! Locally stored sparsity parameter.
-  double rho;
-
-}; // class SparseErrorFunction
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/performance_functions/sse_function.hpp b/src/mlpack/methods/ann/performance_functions/sse_function.hpp
deleted file mode 100644
index a8d96f5..0000000
--- a/src/mlpack/methods/ann/performance_functions/sse_function.hpp
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * @file sse_function.hpp
- * @author Marcus Edel
- *
- * Definition and implementation of the sum squared error performance function.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_PERFORMANCE_FUNCTIONS_SSE_FUNCTION_HPP
-#define MLPACK_METHODS_ANN_PERFORMANCE_FUNCTIONS_SSE_FUNCTION_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * The sum squared error performance function measures the network's performance
- * according to the sum of squared errors.
- */
-class SumSquaredErrorFunction
-{
-  public:
-  /**
-   * Computes the sum squared error function.
-   *
-   * @param network Network type of FFN, CNN or RNN
-   * @param target Target data.
-   * @param error same as place holder
-   * @return sum of squared errors.
-   */
-  template<typename DataType, typename... Tp>
-  static double Error(const std::tuple<Tp...>& network,
-                      const DataType& target,
-                      const DataType &error)
-  {
-    return Error(std::get<sizeof...(Tp) - 1>(network).OutputParameter(),
-                 target, error);
-  }
-
-  /**
-   * Computes the sum squared error function.
-   *
-   * @param input Input data.
-   * @param target Target data.
-   * @return sum of squared errors.
-   */
-  template<typename DataType>
-  static double Error(const DataType& input,
-                      const DataType& target,
-                      const DataType&)
-  {
-    return arma::sum(arma::square(target - input));
-  }
-
-}; // class SumSquaredErrorFunction
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/pooling_rules/max_pooling.hpp b/src/mlpack/methods/ann/pooling_rules/max_pooling.hpp
deleted file mode 100644
index f165f2b..0000000
--- a/src/mlpack/methods/ann/pooling_rules/max_pooling.hpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * @file max_pooling.hpp
- * @author Shangtong Zhang
- *
- * Definition of the MaxPooling class, which implements max pooling.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_POOLING_RULES_MAX_POOLING_HPP
-#define MLPACK_METHODS_ANN_POOLING_RULES_MAX_POOLING_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/*
- * The max pooling rule for convolution neural networks. Take the maximum value
- * within the receptive block.
- */
-class MaxPooling
-{
- public:
-  /*
-   * Return the maximum value within the receptive block.
-   *
-   * @param input Input used to perform the pooling operation.
-   */
-  template<typename MatType>
-  double Pooling(const MatType& input)
-  {
-    return input.max();
-  }
-
-  /*
-   * Set the maximum value within the receptive block.
-   *
-   * @param input Input used to perform the pooling operation.
-   * @param value The unpooled value.
-   * @param output The unpooled output data.
-   */
-  template<typename MatType>
-  void Unpooling(const MatType& input, const double value, MatType& output)
-  {
-    output = MatType(input.n_rows, input.n_cols);
-    output.fill(value / input.n_elem);
-  }
-};
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/pooling_rules/mean_pooling.hpp b/src/mlpack/methods/ann/pooling_rules/mean_pooling.hpp
deleted file mode 100644
index f921e10..0000000
--- a/src/mlpack/methods/ann/pooling_rules/mean_pooling.hpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * @file mean_pooling.hpp
- * @author Shangtong Zhang
- *
- * Definition of the MeanPooling class, which implements mean pooling.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_POOLING_RULES_MEAN_POOLING_HPP
-#define MLPACK_METHODS_ANN_POOLING_RULES_MEAN_POOLING_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/*
- * The mean pooling rule for convolution neural networks. Average all values
- * within the receptive block.
- */
-class MeanPooling
-{
- public:
-  /*
-   * Return the average value within the receptive block.
-   *
-   * @param input Input used to perform the pooling operation.
-   */
-  template<typename MatType>
-  double Pooling(const MatType& input)
-  {
-    return arma::mean(arma::mean(input));
-  }
-
-  /*
-   * Set the average value within the receptive block.
-   *
-   * @param input Input used to perform the pooling operation.
-   * @param value The unpooled value.
-   * @param output The unpooled output data.
-   */
-  template<typename MatType>
-  void Unpooling(const MatType& input, const double value, MatType& output)
-  {
-    output = MatType(input.n_rows, input.n_cols);
-    output.fill(value / input.n_elem);
-  }
-};
-
-} // namespace ann
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/rnn.hpp b/src/mlpack/methods/ann/rnn.hpp
deleted file mode 100644
index d3c4521..0000000
--- a/src/mlpack/methods/ann/rnn.hpp
+++ /dev/null
@@ -1,799 +0,0 @@
-/**
- * @file rnn.hpp
- * @author Marcus Edel
- *
- * Definition of the RNN class, which implements recurrent neural networks.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_ANN_RNN_HPP
-#define MLPACK_METHODS_ANN_RNN_HPP
-
-#include <mlpack/prereqs.hpp>
-
-#include <boost/ptr_container/ptr_vector.hpp>
-
-#include <mlpack/methods/ann/network_util.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-#include <mlpack/methods/ann/init_rules/nguyen_widrow_init.hpp>
-#include <mlpack/methods/ann/performance_functions/cee_function.hpp>
-#include <mlpack/core/optimizers/sgd/sgd.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * Implementation of a standard recurrent neural network.
- *
- * @tparam LayerTypes Contains all layer modules used to construct the network.
- * @tparam OutputLayerType The output layer type used to evaluate the network.
- * @tparam InitializationRuleType Rule used to initialize the weight matrix.
- * @tparam PerformanceFunction Performance strategy used to calculate the error.
- */
-template <
-  typename LayerTypes,
-  typename OutputLayerType,
-  typename InitializationRuleType = NguyenWidrowInitialization,
-  class PerformanceFunction = CrossEntropyErrorFunction<>
->
-class RNN
-{
- public:
-  //! Convenience typedef for the internal model construction.
-  using NetworkType = RNN<LayerTypes,
-                          OutputLayerType,
-                          InitializationRuleType,
-                          PerformanceFunction>;
-
-  /**
-   * Create the RNN object with the given predictors and responses set (this is
-   * the set that is used to train the network) and the given optimizer.
-   * Optionally, specify which initialize rule and performance function should
-   * be used.
-   *
-   * @param network Network modules used to construct the network.
-   * @param outputLayer Output layer used to evaluate the network.
-   * @param predictors Input training variables.
-   * @param responses Outputs resulting from input training variables.
-   * @param optimizer Instantiated optimizer used to train the model.
-   * @param initializeRule Optional instantiated InitializationRule object
-   *        for initializing the network parameter.
-   * @param performanceFunction Optional instantiated PerformanceFunction
-   *        object used to calculate the error.
-   */
-  template<typename LayerType,
-           typename OutputType,
-           template<typename> class OptimizerType>
-  RNN(LayerType &&network,
-      OutputType &&outputLayer,
-      const arma::mat& predictors,
-      const arma::mat& responses,
-      OptimizerType<NetworkType>& optimizer,
-      InitializationRuleType initializeRule = InitializationRuleType(),
-      PerformanceFunction performanceFunction = PerformanceFunction());
-
-  /**
-   * Create the RNN object with the given predictors and responses set (this is
-   * the set that is used to train the network). Optionally, specify which
-   * initialize rule and performance function should be used.
-   *
-   * @param network Network modules used to construct the network.
-   * @param outputLayer Output layer used to evaluate the network.
-   * @param predictors Input training variables.
-   * @param responses Outputs resulting from input training variables.
-   * @param initializeRule Optional instantiated InitializationRule object
-   *        for initializing the network parameter.
-   * @param performanceFunction Optional instantiated PerformanceFunction
-   *        object used to calculate the error.
-   */
-  template<typename LayerType, typename OutputType>
-  RNN(LayerType &&network,
-      OutputType &&outputLayer,
-      const arma::mat& predictors,
-      const arma::mat& responses,
-      InitializationRuleType initializeRule = InitializationRuleType(),
-      PerformanceFunction performanceFunction = PerformanceFunction());
-
-  /**
-   * Create the RNN object with an empty predictors and responses set and
-   * default optimizer. Make sure to call Train(predictors, responses) when
-   * training.
-   *
-   * @param network Network modules used to construct the network.
-   * @param outputLayer Output layer used to evaluate the network.
-   * @param initializeRule Optional instantiated InitializationRule object
-   *        for initializing the network parameter.
-   * @param performanceFunction Optional instantiated PerformanceFunction
-   *        object used to calculate the error.
-   */
-  template<typename LayerType, typename OutputType>
-  RNN(LayerType &&network,
-      OutputType &&outputLayer,
-      InitializationRuleType initializeRule = InitializationRuleType(),
-      PerformanceFunction performanceFunction = PerformanceFunction());
-
-  /**
-   * Train the recurrent neural network on the given input data. By default, the
-   * SGD optimization algorithm is used, but others can be specified
-   * (such as mlpack::optimization::RMSprop).
-   *
-   * This will use the existing model parameters as a starting point for the
-   * optimization. If this is not what you want, then you should access the
-   * parameters vector directly with Parameters() and modify it as desired.
-   *
-   * @tparam OptimizerType Type of optimizer to use to train the model.
-   * @param predictors Input training variables.
-   * @param responses Outputs results from input training variables.
-   */
-  template<
-      template<typename> class OptimizerType = mlpack::optimization::SGD
-  >
-  void Train(const arma::mat& predictors, const arma::mat& responses);
-
-  /**
-   * Train the recurrent neural network with the given instantiated optimizer.
-   * Using this overload allows configuring the instantiated optimizer before
-   * training is performed.
-   *
-   * This will use the existing model parameters as a starting point for the
-   * optimization. If this is not what you want, then you should access the
-   * parameters vector directly with Parameters() and modify it as desired.
-   *
-   * @param optimizer Instantiated optimizer used to train the model.
-   */
-  template<
-      template<typename> class OptimizerType = mlpack::optimization::SGD
-  >
-  void Train(OptimizerType<NetworkType>& optimizer);
-
-  /**
-   * Train the recurrent neural network on the given input data using the given
-   * optimizer.
-   *
-   * This will use the existing model parameters as a starting point for the
-   * optimization. If this is not what you want, then you should access the
-   * parameters vector directly with Parameters() and modify it as desired.
-   *
-   * @tparam OptimizerType Type of optimizer to use to train the model.
-   * @param predictors Input training variables.
-   * @param responses Outputs results from input training variables.
-   * @param optimizer Instantiated optimizer used to train the model.
-   */
-  template<
-      template<typename> class OptimizerType = mlpack::optimization::SGD
-  >
-  void Train(const arma::mat& predictors,
-             const arma::mat& responses,
-             OptimizerType<NetworkType>& optimizer);
-
-  /**
-   * Predict the responses to a given set of predictors. The responses will
-   * reflect the output of the given output layer as returned by the
-   * OutputClass() function.
-   *
-   * @param predictors Input predictors.
-   * @param responses Matrix to put output predictions of responses into.
-   */
-  void Predict(arma::mat& predictors, arma::mat& responses);
-
-  /**
-   * Evaluate the recurrent neural network with the given parameters. This
-   * function is usually called by the optimizer to train the model.
-   *
-   * @param parameters Matrix model parameters.
-   * @param i Index of point to use for objective function evaluation.
-   * @param deterministic Whether or not to train or test the model. Note some
-   * layer act differently in training or testing mode.
-   */
-  double Evaluate(const arma::mat& parameters,
-                  const size_t i,
-                  const bool deterministic = true);
-
-  /**
-   * Evaluate the gradient of the recurrent neural network with the given
-   * parameters, and with respect to only one point in the dataset. This is
-   * useful for optimizers such as SGD, which require a separable objective
-   * function.
-   *
-   * @param parameters Matrix of the model parameters to be optimized.
-   * @param i Index of points to use for objective function gradient evaluation.
-   * @param gradient Matrix to output gradient into.
-   */
-  void Gradient(const arma::mat& parameters,
-                const size_t i,
-                arma::mat& gradient);
-
-  //! Return the number of separable functions (the number of predictor points).
-  size_t NumFunctions() const { return numFunctions; }
-
-  //! Return the initial point for the optimization.
-  const arma::mat& Parameters() const { return parameter; }
-  //! Modify the initial point for the optimization.
-  arma::mat& Parameters() { return parameter; }
-
-  //! Serialize the model.
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */);
-
- private:
-  /*
-   * Predict the response of the given input matrix.
-   */
-  template <typename DataType>
-  void SinglePredict(const DataType& input, DataType& output)
-  {
-    deterministic = true;
-    seqLen = input.n_rows / inputSize;
-    ResetParameter(network);
-
-    // Iterate through the input sequence and perform the feed forward pass.
-    for (seqNum = 0; seqNum < seqLen; seqNum++)
-    {
-      // Perform the forward pass and save the activations.
-      Forward(input.rows(seqNum * inputSize, (seqNum + 1) * inputSize - 1),
-          network);
-      SaveActivations(network);
-
-      // Retrieve output of the subsequence.
-      if (seqOutput)
-      {
-        DataType seqOutput;
-        OutputPrediction(seqOutput, network);
-        output = arma::join_cols(output, seqOutput);
-      }
-    }
-
-    // Retrieve output of the complete sequence.
-    if (!seqOutput)
-      OutputPrediction(output, network);
-  }
-
-  /**
-   * Reset the network by clearing the layer activations and by setting the
-   * layer status.
-   */
-  template<size_t I = 0, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  ResetParameter(std::tuple<Tp...>& /* unused */)
-  {
-    activations.clear();
-  }
-
-  template<size_t I = 0, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  ResetParameter(std::tuple<Tp...>& network)
-  {
-    ResetDeterministic(std::get<I>(network));
-    ResetSeqLen(std::get<I>(network));
-    ResetRecurrent(std::get<I>(network), std::get<I>(network).InputParameter());
-    std::get<I>(network).Delta().zeros();
-
-    ResetParameter<I + 1, Tp...>(network);
-  }
-
-  /**
-   * Reset the layer status by setting the current deterministic parameter
-   * for all layer that implement the Deterministic function.
-   */
-  template<typename T>
-  typename std::enable_if<
-      HasDeterministicCheck<T, bool&(T::*)(void)>::value, void>::type
-  ResetDeterministic(T& layer)
-  {
-    layer.Deterministic() = deterministic;
-  }
-
-  template<typename T>
-  typename std::enable_if<
-      !HasDeterministicCheck<T, bool&(T::*)(void)>::value, void>::type
-  ResetDeterministic(T& /* unused */) { /* Nothing to do here */ }
-
-  /**
-   * Reset the layer sequence length by setting the current seqLen parameter
-   * for all layer that implement the SeqLen function.
-   */
-  template<typename T>
-  typename std::enable_if<
-      HasSeqLenCheck<T, size_t&(T::*)(void)>::value, void>::type
-  ResetSeqLen(T& layer)
-  {
-    layer.SeqLen() = seqLen;
-  }
-
-  template<typename T>
-  typename std::enable_if<
-      !HasSeqLenCheck<T, size_t&(T::*)(void)>::value, void>::type
-  ResetSeqLen(T& /* unused */) { /* Nothing to do here */ }
-
-  /**
-   * Distinguish between recurrent layer and non-recurrent layer when resetting
-   * the recurrent parameter.
-   */
-  template<typename T, typename P>
-  typename std::enable_if<
-      HasRecurrentParameterCheck<T, P&(T::*)()>::value, void>::type
-  ResetRecurrent(T& layer, P& /* unused */)
-  {
-    layer.RecurrentParameter().zeros();
-  }
-
-  template<typename T, typename P>
-  typename std::enable_if<
-      !HasRecurrentParameterCheck<T, P&(T::*)()>::value, void>::type
-  ResetRecurrent(T& /* unused */, P& /* unused */)
-  {
-    /* Nothing to do here */
-  }
-
-  /**
-   * Initialize the network by setting the input size and output size.
-   */
-  template<size_t I = 0, typename InputDataType, typename TargetDataType,
-      typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp) - 1, void>::type
-  InitLayer(const InputDataType& /* unused */,
-            const TargetDataType& target,
-            std::tuple<Tp...>& /* unused */)
-  {
-    seqOutput = outputSize < target.n_elem ? true : false;
-  }
-
-  template<size_t I = 0, typename InputDataType, typename TargetDataType,
-      typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp) - 1, void>::type
-  InitLayer(const InputDataType& input,
-            const TargetDataType& target,
-            std::tuple<Tp...>& network)
-  {
-    Init(std::get<I>(network), std::get<I>(network).OutputParameter(),
-       std::get<I + 1>(network).Delta());
-
-    InitLayer<I + 1, InputDataType, TargetDataType, Tp...>(input, target,
-        network);
-  }
-
-  /**
-   * Retrieve the weight matrix for all layer that implement the Weights
-   * function to extract the input size and output size.
-   */
-  template<typename T, typename P, typename D>
-  typename std::enable_if<
-      HasGradientCheck<T, P&(T::*)()>::value, void>::type
-  Init(T& layer, P& /* unused */, D& /* unused */)
-  {
-    // Initialize the input size only once.
-    if (!inputSize)
-      inputSize = layer.Weights().n_cols;
-
-    outputSize = layer.Weights().n_rows;
-  }
-
-  template<typename T, typename P, typename D>
-  typename std::enable_if<
-      !HasGradientCheck<T, P&(T::*)()>::value, void>::type
-  Init(T& /* unused */, P& /* unused */, D& /* unused */)
-  {
-    /* Nothing to do here */
-  }
-
-  /**
-   * Save the network layer activations.
-   */
-  template<
-      size_t I = 0,
-      size_t Max = std::tuple_size<LayerTypes>::value - 1,
-      typename... Tp
-  >
-  typename std::enable_if<I == Max, void>::type
-  SaveActivations(std::tuple<Tp...>& /* unused */)
-  {
-    Save(I, std::get<I>(network), std::get<I>(network).InputParameter());
-    LinkRecurrent(network);
-  }
-
-  template<
-      size_t I = 0,
-      size_t Max = std::tuple_size<LayerTypes>::value - 1,
-      typename... Tp
-  >
-  typename std::enable_if<I < Max, void>::type
-  SaveActivations(std::tuple<Tp...>& network)
-  {
-    Save(I, std::get<I>(network), std::get<I>(network).InputParameter());
-    SaveActivations<I + 1, Max, Tp...>(network);
-  }
-
-  /**
-   * Distinguish between recurrent layer and non-recurrent layer when storing
-   * the activations.
-   */
-  template<typename T, typename P>
-  typename std::enable_if<
-      HasRecurrentParameterCheck<T, P&(T::*)()>::value, void>::type
-  Save(const size_t layerNumber, T& layer, P& /* unused */)
-  {
-    if (activations.size() == layerNumber)
-    {
-      activations.push_back(new arma::mat(layer.RecurrentParameter().n_rows,
-          seqLen));
-    }
-
-    activations[layerNumber].unsafe_col(seqNum) = layer.RecurrentParameter();
-  }
-
-  template<typename T, typename P>
-  typename std::enable_if<
-      !HasRecurrentParameterCheck<T, P&(T::*)()>::value, void>::type
-  Save(const size_t layerNumber, T& layer, P& /* unused */)
-  {
-    if (activations.size() == layerNumber)
-    {
-      activations.push_back(new arma::mat(layer.OutputParameter().n_rows,
-          seqLen));
-    }
-
-    activations[layerNumber].unsafe_col(seqNum) = layer.OutputParameter();
-  }
-
-  /**
-   * Load the network layer activations.
-   */
-  template<
-      size_t I = 0,
-      size_t Max = std::tuple_size<LayerTypes>::value - 1,
-      typename DataType, typename... Tp
-  >
-  typename std::enable_if<I == Max, void>::type
-  LoadActivations(DataType& input, std::tuple<Tp...>& network)
-  {
-    Load(I, std::get<I>(network), std::get<I>(network).InputParameter());
-    std::get<0>(network).InputParameter() = input;
-  }
-
-  template<
-      size_t I = 0,
-      size_t Max = std::tuple_size<LayerTypes>::value - 1,
-      typename DataType, typename... Tp
-  >
-  typename std::enable_if<I < Max, void>::type
-  LoadActivations(DataType& input, std::tuple<Tp...>& network)
-  {
-    Load(I, std::get<I>(network), std::get<I>(network).InputParameter());
-    LoadActivations<I + 1, Max, DataType, Tp...>(input, network);
-  }
-
-  /**
-   * Distinguish between recurrent layer and non-recurrent layer when storing
-   * the activations.
-   */
-  template<typename T, typename P>
-  typename std::enable_if<
-      HasRecurrentParameterCheck<T, P&(T::*)()>::value, void>::type
-  Load(const size_t layerNumber, T& layer, P& /* unused */)
-  {
-    layer.RecurrentParameter() = activations[layerNumber].unsafe_col(seqNum);
-  }
-
-  template<typename T, typename P>
-  typename std::enable_if<
-      !HasRecurrentParameterCheck<T, P&(T::*)()>::value, void>::type
-  Load(const size_t layerNumber, T& layer, P& /* unused */)
-  {
-    layer.OutputParameter() = activations[layerNumber].unsafe_col(seqNum);
-  }
-
-  /**
-   * Run a single iteration of the feed forward algorithm, using the given
-   * input and target vector, store the calculated error into the error
-   * vector.
-   */
-  template<size_t I = 0, typename DataType, typename... Tp>
-  void Forward(const DataType& input, std::tuple<Tp...>& network)
-  {
-    std::get<I>(network).InputParameter() = input;
-    std::get<I>(network).Forward(std::get<I>(network).InputParameter(),
-        std::get<I>(network).OutputParameter());
-
-    ForwardTail<I + 1, Tp...>(network);
-  }
-
-  template<size_t I = 1, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  ForwardTail(std::tuple<Tp...>& /* unused */) { /* Nothing to do here */ }
-
-  template<size_t I = 1, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  ForwardTail(std::tuple<Tp...>& network)
-  {
-    std::get<I>(network).Forward(std::get<I - 1>(network).OutputParameter(),
-        std::get<I>(network).OutputParameter());
-
-    ForwardTail<I + 1, Tp...>(network);
-  }
-
-  /**
-   * Link the calculated activation with the correct layer.
-   */
-  template<
-      size_t I = 1,
-      size_t Max = std::tuple_size<LayerTypes>::value - 1,
-      typename... Tp
-  >
-  typename std::enable_if<I == Max, void>::type
-  LinkParameter(std::tuple<Tp ...>& /* unused */)
-  {
-    if (!LayerTraits<typename std::remove_reference<
-        decltype(std::get<I>(network))>::type>::IsBiasLayer)
-    {
-      std::get<I>(network).InputParameter() = std::get<I - 1>(
-          network).OutputParameter();
-    }
-  }
-
-  template<
-      size_t I = 1,
-      size_t Max = std::tuple_size<LayerTypes>::value - 1,
-      typename... Tp
-  >
-  typename std::enable_if<I < Max, void>::type
-  LinkParameter(std::tuple<Tp...>& network)
-  {
-    if (!LayerTraits<typename std::remove_reference<
-        decltype(std::get<I>(network))>::type>::IsBiasLayer)
-    {
-      std::get<I>(network).InputParameter() = std::get<I - 1>(
-          network).OutputParameter();
-    }
-
-    LinkParameter<I + 1, Max, Tp...>(network);
-  }
-
-  /**
-   * Link the calculated activation with the correct recurrent layer.
-   */
-  template<
-      size_t I = 0,
-      size_t Max = std::tuple_size<LayerTypes>::value - 1,
-      typename... Tp
-  >
-  typename std::enable_if<I == Max, void>::type
-  LinkRecurrent(std::tuple<Tp ...>& /* unused */) { /* Nothing to do here */ }
-
-  template<
-      size_t I = 0,
-      size_t Max = std::tuple_size<LayerTypes>::value - 1,
-      typename... Tp
-  >
-  typename std::enable_if<I < Max, void>::type
-  LinkRecurrent(std::tuple<Tp...>& network)
-  {
-    UpdateRecurrent(std::get<I>(network), std::get<I>(network).InputParameter(),
-        std::get<I + 1>(network).OutputParameter());
-    LinkRecurrent<I + 1, Max, Tp...>(network);
-  }
-
-  /**
-   * Distinguish between recurrent layer and non-recurrent layer when updating
-   * the recurrent activations.
-   */
-  template<typename T, typename P, typename D>
-  typename std::enable_if<
-      HasRecurrentParameterCheck<T, P&(T::*)()>::value, void>::type
-  UpdateRecurrent(T& layer, P& /* unused */, D& output)
-  {
-    layer.RecurrentParameter() = output;
-  }
-
-  template<typename T, typename P, typename D>
-  typename std::enable_if<
-      !HasRecurrentParameterCheck<T, P&(T::*)()>::value, void>::type
-  UpdateRecurrent(T& /* unused */, P& /* unused */, D& /* unused */)
-  {
-    /* Nothing to do here */
-  }
-
-  /*
-   * Calculate the output error and update the overall error.
-   */
-  template<typename DataType, typename ErrorType, typename... Tp>
-  double OutputError(const DataType& target,
-                     ErrorType& error,
-                     const std::tuple<Tp...>& network)
-  {
-    // Calculate and store the output error.
-    outputLayer.CalculateError(
-        std::get<sizeof...(Tp) - 1>(network).OutputParameter(), target, error);
-
-    // Masures the network's performance with the specified performance
-    // function.
-    return performanceFunc.Error(network, target, error);
-  }
-
-  /**
-   * Run a single iteration of the feed backward algorithm, using the given
-   * error of the output layer. Note that we iterate backward through the
-   * layer modules.
-   */
-  template<size_t I = 1, typename DataType, typename... Tp>
-  void Backward(DataType& error, std::tuple<Tp ...>& network)
-  {
-    std::get<sizeof...(Tp) - I>(network).Backward(
-        std::get<sizeof...(Tp) - I>(network).OutputParameter(), error,
-        std::get<sizeof...(Tp) - I>(network).Delta());
-
-    BackwardTail<I + 1, DataType, Tp...>(error, network);
-  }
-
-  template<size_t I = 1, typename DataType, typename... Tp>
-  typename std::enable_if<I == (sizeof...(Tp)), void>::type
-  BackwardTail(const DataType& /* unused */, std::tuple<Tp...>& /* unused */)
-  {
-    /* Nothing to do here */
-  }
-
-  template<size_t I = 1, typename DataType, typename... Tp>
-  typename std::enable_if<I < (sizeof...(Tp)), void>::type
-  BackwardTail(const DataType& error, std::tuple<Tp...>& network)
-  {
-    BackwardRecurrent(std::get<sizeof...(Tp) - I - 1>(network),
-        std::get<sizeof...(Tp) - I - 1>(network).InputParameter(),
-        std::get<sizeof...(Tp) - I + 1>(network).Delta());
-
-    std::get<sizeof...(Tp) - I>(network).Backward(
-        std::get<sizeof...(Tp) - I>(network).OutputParameter(),
-        std::get<sizeof...(Tp) - I + 1>(network).Delta(),
-        std::get<sizeof...(Tp) - I>(network).Delta());
-
-    BackwardTail<I + 1, DataType, Tp...>(error, network);
-  }
-
-  /*
-   * Update the delta of the recurrent layer.
-   */
-  template<typename T, typename P, typename D>
-  typename std::enable_if<
-      HasRecurrentParameterCheck<T, P&(T::*)()>::value, void>::type
-  BackwardRecurrent(T& layer, P& /* unused */, D& delta)
-  {
-    if (!layer.Delta().is_empty())
-      delta += layer.Delta();
-  }
-
-  template<typename T, typename P, typename D>
-  typename std::enable_if<
-      !HasRecurrentParameterCheck<T, P&(T::*)()>::value, void>::type
-  BackwardRecurrent(T& /* unused */, P& /* unused */, D& /* unused */)
-  {
-    /* Nothing to do here */
-  }
-
-  /**
-   * Iterate through all layer modules and update the the gradient using the
-   * layer defined optimizer.
-   */
-  template<size_t I = 0, size_t Max = std::tuple_size<LayerTypes>::value - 2,
-      typename... Tp>
-  typename std::enable_if<I == Max, void>::type
-  UpdateGradients(std::tuple<Tp...>& network)
-  {
-    Update(std::get<I>(network), std::get<I>(network).OutputParameter(),
-        std::get<I + 1>(network).Delta(), std::get<I + 1>(network),
-        std::get<I + 1>(network).InputParameter(),
-        std::get<I + 1>(network).Delta());
-  }
-
-  template<size_t I = 0, size_t Max = std::tuple_size<LayerTypes>::value - 2,
-      typename... Tp>
-  typename std::enable_if<I < Max, void>::type
-  UpdateGradients(std::tuple<Tp...>& network)
-  {
-    Update(std::get<I>(network), std::get<I>(network).OutputParameter(),
-        std::get<I + 1>(network).Delta(), std::get<I + 1>(network),
-        std::get<I + 1>(network).InputParameter(),
-        std::get<I + 2>(network).Delta());
-
-    UpdateGradients<I + 1, Max, Tp...>(network);
-  }
-
-  template<typename T1, typename P1, typename D1, typename T2, typename P2,
-      typename D2>
-  typename std::enable_if<
-      HasGradientCheck<T1, P1&(T1::*)()>::value &&
-      HasRecurrentParameterCheck<T2, P2&(T2::*)()>::value, void>::type
-  Update(T1& layer, P1& /* unused */, D1& /* unused */, T2& /* unused */,
-         P2& /* unused */, D2& delta2)
-  {
-    layer.Gradient(layer.InputParameter(), delta2, layer.Gradient());
-  }
-
-  template<typename T1, typename P1, typename D1, typename T2, typename P2,
-      typename D2>
-  typename std::enable_if<
-      (!HasGradientCheck<T1, P1&(T1::*)()>::value &&
-      !HasRecurrentParameterCheck<T2, P2&(T2::*)()>::value) ||
-      (!HasGradientCheck<T1, P1&(T1::*)()>::value &&
-      HasRecurrentParameterCheck<T2, P2&(T2::*)()>::value), void>::type
-  Update(T1& /* unused */, P1& /* unused */, D1& /* unused */, T2& /* unused */,
-         P2& /* unused */, D2& /* unused */)
-  {
-    /* Nothing to do here */
-  }
-
-  template<typename T1, typename P1, typename D1, typename T2, typename P2,
-      typename D2>
-  typename std::enable_if<
-      HasGradientCheck<T1, P1&(T1::*)()>::value &&
-      !HasRecurrentParameterCheck<T2, P2&(T2::*)()>::value, void>::type
-  Update(T1& layer, P1& /* unused */, D1& delta1, T2& /* unused */,
-         P2& /* unused */, D2& /* unused */)
-  {
-    layer.Gradient(layer.InputParameter(), delta1, layer.Gradient());
-  }
-
-  /*
-   * Calculate and store the output activation.
-   */
-  template<typename DataType, typename... Tp>
-  void OutputPrediction(DataType& output, std::tuple<Tp...>& network)
-  {
-    // Calculate and store the output prediction.
-    outputLayer.OutputClass(std::get<sizeof...(Tp) - 1>(
-        network).OutputParameter(), output);
-  }
-
-  //! Instantiated recurrent neural network.
-  LayerTypes network;
-
-  //! The outputlayer used to evaluate the network
-  OutputLayerType& outputLayer;
-
-  //! Performance strategy used to claculate the error.
-  PerformanceFunction performanceFunc;
-
-  //! The current evaluation mode (training or testing).
-  bool deterministic;
-
-  //! Matrix of (trained) parameters.
-  arma::mat parameter;
-
-  //! The matrix of data points (predictors).
-  arma::mat predictors;
-
-  //! The matrix of responses to the input data points.
-  arma::mat responses;
-
-  //! Locally stored network input size.
-  size_t inputSize;
-
-  //! Locally stored network output size.
-  size_t outputSize;
-
-  //! The index of the current sequence number.
-  size_t seqNum;
-
-  //! Locally stored number of samples in one input sequence.
-  size_t seqLen;
-
-  //! Locally stored parameter that indicates if the input is a sequence.
-  bool seqOutput;
-
-  //! The activation storage we are using to perform the feed backward pass.
-  boost::ptr_vector<arma::mat> activations;
-
-  //! The number of separable functions (the number of predictor points).
-  size_t numFunctions;
-
-  //! Locally stored backward error.
-  arma::mat error;
-}; // class RNN
-
-} // namespace ann
-} // namespace mlpack
-
-// Include implementation.
-#include "rnn_impl.hpp"
-
-#endif
diff --git a/src/mlpack/methods/mvu/mvu.hpp b/src/mlpack/methods/mvu/mvu.hpp
deleted file mode 100644
index c1cea30..0000000
--- a/src/mlpack/methods/mvu/mvu.hpp
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * @file mvu.hpp
- * @author Ryan Curtin
- *
- * An implementation of Maximum Variance Unfolding.  This file defines an MVU
- * class as well as a class representing the objective function (a semidefinite
- * program) which MVU seeks to minimize.  Minimization is performed by the
- * Augmented Lagrangian optimizer (which in turn uses the L-BFGS optimizer).
- *
- * Note: this implementation of MVU does not work.  See #189.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef MLPACK_METHODS_MVU_MVU_HPP
-#define MLPACK_METHODS_MVU_MVU_HPP
-
-#include <mlpack/prereqs.hpp>
-
-namespace mlpack {
-namespace mvu {
-
-/**
- * The MVU class is meant to provide a good abstraction for users.  The dataset
- * needs to be provided, as well as several parameters.
- *
- * - dataset
- * - new dimensionality
- */
-class MVU
-{
- public:
-  MVU(const arma::mat& dataIn);
-
-  void Unfold(const size_t newDim,
-              const size_t numNeighbors,
-              arma::mat& outputCoordinates);
-
- private:
-  const arma::mat& data;
-};
-
-} // namespace mvu
-} // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/mvu/mvu_main.cpp b/src/mlpack/methods/mvu/mvu_main.cpp
deleted file mode 100644
index 975a8bf..0000000
--- a/src/mlpack/methods/mvu/mvu_main.cpp
+++ /dev/null
@@ -1,79 +0,0 @@
-/**
- * @file mvu_main.cpp
- * @author Ryan Curtin
- *
- * Executable for MVU.
- *
- * Note: this implementation of MVU does not work.  See #189.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#include <mlpack/prereqs.hpp>
-#include "mvu.hpp"
-
-PROGRAM_INFO("Maximum Variance Unfolding (MVU)", "This program implements "
-    "Maximum Variance Unfolding, a nonlinear dimensionality reduction "
-    "technique.  The method minimizes dimensionality by unfolding a manifold "
-    "such that the distances to the nearest neighbors of each point are held "
-    "constant.");
-
-PARAM_MATRIX_IN_REQ("input", "Input dataset.", "i");
-PARAM_INT_IN_REQ("new_dim", "New dimensionality of dataset.", "d");
-
-PARAM_MATRIX_OUT("output", "Matrix to save unfolded dataset to.", "o");
-PARAM_INT_IN("num_neighbors", "Number of nearest neighbors to consider while "
-    "unfolding.", "k", 5);
-
-using namespace mlpack;
-using namespace mlpack::mvu;
-using namespace mlpack::math;
-using namespace arma;
-using namespace std;
-
-int main(int argc, char **argv)
-{
-  // Read from command line.
-  CLI::ParseCommandLine(argc, argv);
-  const string inputFile = CLI::GetParam<string>("input_file");
-  const string outputFile = CLI::GetParam<string>("output_file");
-  const int newDim = CLI::GetParam<int>("new_dim");
-  const int numNeighbors = CLI::GetParam<int>("num_neighbors");
-
-  if (!CLI::HasParam("output"))
-    Log::Warn << "--output_file (-o) is not specified; no results will be "
-        << "saved!" << endl;
-
-  RandomSeed(time(NULL));
-
-  // Load input dataset.
-  mat data = std::move(CLI::GetParam<arma::mat>("input"));
-
-  // Verify that the requested dimensionality is valid.
-  if (newDim <= 0 || newDim > (int) data.n_rows)
-  {
-    Log::Fatal << "Invalid new dimensionality (" << newDim << ").  Must be "
-      << "between 1 and the input dataset dimensionality (" << data.n_rows
-      << ")." << std::endl;
-  }
-
-  // Verify that the number of neighbors is valid.
-  if (numNeighbors <= 0 || numNeighbors > (int) data.n_cols)
-  {
-    Log::Fatal << "Invalid number of neighbors (" << numNeighbors << ").  Must "
-        << "be between 1 and the number of points in the input dataset ("
-        << data.n_cols << ")." << std::endl;
-  }
-
-  // Now run MVU.
-  MVU mvu(data);
-
-  mat output;
-  mvu.Unfold(newDim, numNeighbors, output);
-
-  // Save results to file.
-  if (CLI::HasParam("output"))
-    CLI::GetParam<arma::mat>("output") = std::move(output);
-}
diff --git a/src/mlpack/methods/rmva/rmva.hpp b/src/mlpack/methods/rmva/rmva.hpp
deleted file mode 100644
index a469296..0000000
--- a/src/mlpack/methods/rmva/rmva.hpp
+++ /dev/null
@@ -1,963 +0,0 @@
-/**
- * @file rmva.hpp
- * @author Marcus Edel
- *
- * Definition of the RecurrentNeuralAttention class, which implements the
- * Recurrent Model for Visual Attention.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#ifndef __MLPACK_METHODS_RMVA_RMVA_HPP
-#define __MLPACK_METHODS_RMVA_RMVA_HPP
-
-#include <mlpack/prereqs.hpp>
-
-#include <mlpack/methods/ann/network_util.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-#include <mlpack/methods/ann/init_rules/random_init.hpp>
-#include <mlpack/methods/ann/performance_functions/cee_function.hpp>
-#include <mlpack/core/optimizers/rmsprop/rmsprop.hpp>
-#include <mlpack/methods/ann/layer/negative_log_likelihood_layer.hpp>
-#include <mlpack/methods/ann/layer/vr_class_reward_layer.hpp>
-
-#include <boost/ptr_container/ptr_vector.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * This class implements the Recurrent Model for Visual Attention, using a
- * variety of possible layer implementations.
- *
- * For more information, see the following paper.
- *
- * @code
- * @article{MnihHGK14,
- *   title={Recurrent Models of Visual Attention},
- *   author={Volodymyr Mnih, Nicolas Heess, Alex Graves, Koray Kavukcuoglu},
- *   journal={CoRR},
- *   volume={abs/1406.6247},
- *   year={2014}
- * }
- * @endcode
- *
- * @tparam LocatorType Type of locator network.
- * @tparam LocationSensorType Type of location sensor network.
- * @tparam GlimpseSensorType Type of glimpse sensor network.
- * @tparam GlimpseType Type of glimpse network.
- * @tparam StartType Type of start network.
- * @tparam FeedbackType Type of feedback network.
- * @tparam TransferType Type of transfer network.
- * @tparam ClassifierType Type of classifier network.
- * @tparam RewardPredictorType Type of reward predictor network.
- * @tparam InitializationRuleType Rule used to initialize the weight matrix.
- * @tparam MatType Matrix type (arma::mat or arma::sp_mat).
- */
-template<
-  typename LocatorType,
-  typename LocationSensorType,
-  typename GlimpseSensorType,
-  typename GlimpseType,
-  typename StartType,
-  typename FeedbackType,
-  typename TransferType,
-  typename ClassifierType,
-  typename RewardPredictorType,
-  typename InitializationRuleType = RandomInitialization,
-  typename MatType = arma::mat
->
-class RecurrentNeuralAttention
-{
- public:
-  //! Convenience typedef for the internal model construction.
-  using NetworkType = RecurrentNeuralAttention<
-      LocatorType,
-      LocationSensorType,
-      GlimpseSensorType,
-      GlimpseType,
-      StartType,
-      FeedbackType,
-      TransferType,
-      ClassifierType,
-      RewardPredictorType,
-      InitializationRuleType,
-      MatType>;
-
-  /**
-   * Construct the RecurrentNeuralAttention object, which will construct the
-   * recurrent model for visual attentionh using the specified networks.
-   *
-   * @param locator The locator network.
-   * @param locationSensor The location sensor network.
-   * @param glimpseSensor The glimpse sensor network.
-   * @param glimpse The glimpse network.
-   * @param start The start network.
-   * @param feedback The feedback network.
-   * @param transfer The transfer network.
-   * @param classifier The classifier network.
-   * @param rewardPredictor The reward predictor network.
-   * @param nStep Number of steps for the back-propagate through time.
-   * @param initializeRule Rule used to initialize the weight matrix.
-   */
-  template<typename TypeLocator,
-           typename TypeLocationSensor,
-           typename TypeGlimpseSensor,
-           typename TypeGlimpse,
-           typename TypeStart,
-           typename TypeFeedback,
-           typename TypeTransfer,
-           typename TypeClassifier,
-           typename TypeRewardPredictor>
-  RecurrentNeuralAttention(TypeLocator&& locator,
-                           TypeLocationSensor&& locationSensor,
-                           TypeGlimpseSensor&& glimpseSensor,
-                           TypeGlimpse&& glimpse,
-                           TypeStart&& start,
-                           TypeFeedback&& feedback,
-                           TypeTransfer&& transfer,
-                           TypeClassifier&& classifier,
-                           TypeRewardPredictor&& rewardPredictor,
-                           const size_t nStep,
-                           InitializationRuleType initializeRule =
-                              InitializationRuleType());
-  /**
-   * Train the network on the given input data using the given optimizer.
-   *
-   * This will use the existing model parameters as a starting point for the
-   * optimization. If this is not what you want, then you should access the
-   * parameters vector directly with Parameters() and modify it as desired.
-   *
-   * @tparam OptimizerType Type of optimizer to use to train the model.
-   * @param predictors Input training variables.
-   * @param responses Outputs results from input training variables.
-   * @param optimizer Instantiated optimizer used to train the model.
-   */
-  template<
-      template<typename> class OptimizerType = mlpack::optimization::RMSprop
-  >
-  void Train(const arma::mat& predictors,
-             const arma::mat& responses,
-             OptimizerType<NetworkType>& optimizer);
-
-  /**
-   * Predict the responses to a given set of predictors. The responses will
-   * reflect the output of the given output layer as returned by the
-   * OutputClass() function.
-   *
-   * @param predictors Input predictors.
-   * @param responses Matrix to put output predictions of responses into.
-   */
-  void Predict(arma::mat& predictors, arma::mat& responses);
-
-  /**
-   * Evaluate the network with the given parameters. This function is usually
-   * called by the optimizer to train the model.
-   *
-   * @param parameters Matrix model parameters.
-   * @param i Index of point to use for objective function evaluation.
-   * @param deterministic Whether or not to train or test the model. Note some
-   * layer act differently in training or testing mode.
-   */
-  double Evaluate(const arma::mat& parameters,
-                  const size_t i,
-                  const bool deterministic = true);
-
-  /**
-   * Evaluate the gradient of the network with the given parameters, and with
-   * respect to only one point in the dataset. This is useful for
-   * optimizers such as SGD, which require a separable objective function.
-   *
-   * @param parameters Matrix of the model parameters to be optimized.
-   * @param i Index of points to use for objective function gradient evaluation.
-   * @param gradient Matrix to output gradient into.
-   */
-  void Gradient(const arma::mat& parameters,
-                const size_t i,
-                arma::mat& gradient);
-
-  //! Return the number of separable functions (the number of predictor points).
-  size_t NumFunctions() const { return numFunctions; }
-
-  //! Return the initial point for the optimization.
-  const arma::mat& Parameters() const { return parameter; }
-  //! Modify the initial point for the optimization.
-  arma::mat& Parameters() { return parameter; }
-
-  //! Return the number of steps to back-propagate through time.
-  const size_t& Rho() const { return nStep; }
-  //! Modify the number of steps to back-propagate through time.
-  size_t& Rho() { return nStep; }
-
-  //! Return the current location.
-  const arma::mat& Location();
-
-  //! Serialize the model.
-  template<typename Archive>
-  void Serialize(Archive& ar, const unsigned int /* version */);
-
- private:
-  /*
-   * Predict the response of the given input matrix.
-   */
-  template <typename InputType, typename OutputType>
-  void SinglePredict(const InputType& input, OutputType& output)
-  {
-    // Get the locator input size.
-    if (!inputSize)
-    {
-      inputSize = NetworkInputSize(locator);
-    }
-
-    // Reset networks.
-    ResetParameter(locator);
-    ResetParameter(locationSensor);
-    ResetParameter(glimpseSensor);
-    ResetParameter(glimpse);
-    ResetParameter(feedback);
-    ResetParameter(transfer);
-    ResetParameter(classifier);
-    ResetParameter(rewardPredictor);
-    ResetParameter(start);
-
-    // Sample an initial starting actions by forwarding zeros through the
-    // locator.
-    locatorInput.push_back(new arma::cube(arma::zeros<arma::cube>(inputSize, 1,
-        input.n_slices)));
-
-    // Forward pass throught the recurrent network.
-    for (step = 0; step < nStep; step++)
-    {
-      // Locator forward pass.
-      Forward(locatorInput.back(), locator);
-
-      // Location sensor forward pass.
-      Forward(std::get<std::tuple_size<LocatorType>::value - 1>(
-          locator).OutputParameter(), locationSensor);
-
-      // Set the location parameter for all layer that implement a Location
-      // function e.g. GlimpseLayer.
-      ResetLocation(std::get<std::tuple_size<LocatorType>::value - 1>(
-          locator).OutputParameter(), glimpseSensor);
-
-      // Glimpse sensor forward pass.
-      Forward(input, glimpseSensor);
-
-      // Concat the parameter activation from the location sensor and
-      // glimpse sensor.
-      arma::mat concatLayerOutput = arma::join_cols(
-          std::get<std::tuple_size<LocationSensorType>::value - 1>(
-          locationSensor).OutputParameter(),
-          std::get<std::tuple_size<GlimpseSensorType>::value - 1>(
-          glimpseSensor).OutputParameter());
-
-      // Glimpse forward pass.
-      Forward(concatLayerOutput, glimpse);
-
-      if (step == 0)
-      {
-        // Start forward pass.
-        Forward(std::get<std::tuple_size<GlimpseType>::value - 1>(
-            glimpse).OutputParameter(), start);
-
-        // Transfer forward pass.
-        Forward(std::get<std::tuple_size<StartType>::value - 1>(
-            start).OutputParameter(), transfer);
-      }
-      else
-      {
-        // Feedback forward pass.
-        Forward(std::get<std::tuple_size<TransferType>::value - 1>(
-            transfer).OutputParameter(), feedback);
-
-        arma::mat feedbackLayerOutput =
-          std::get<std::tuple_size<GlimpseType>::value - 1>(
-          glimpse).OutputParameter() +
-          std::get<std::tuple_size<FeedbackType>::value - 1>(
-          feedback).OutputParameter();
-
-        // Transfer forward pass.
-        Forward(feedbackLayerOutput, transfer);
-      }
-
-      // Update the input for the next run
-      locatorInput.push_back(new arma::cube(
-          std::get<std::tuple_size<TransferType>::value - 1>(
-          transfer).OutputParameter().memptr(), locatorInput.back().n_rows,
-          locatorInput.back().n_cols, locatorInput.back().n_slices));
-    }
-
-    // Classifier forward pass.
-    Forward(locatorInput.back().slice(0), classifier);
-
-    output = std::get<std::tuple_size<ClassifierType>::value - 1>(
-        classifier).OutputParameter();
-  }
-
-  /**
-   * Update the layer reward for all layer that implement the Rewards function.
-   */
-  template<size_t I = 0, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  ResetReward(const double reward, std::tuple<Tp...>& network)
-  {
-    SetReward(reward, std::get<I>(network));
-    ResetReward<I + 1, Tp...>(reward, network);
-  }
-
-  template<size_t I = 0, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  ResetReward(const double /* reward */, std::tuple<Tp...>& /* network */)
-  {
-  }
-
-  template<typename T>
-  typename std::enable_if<
-      HasRewardCheck<T, double&(T::*)()>::value, void>::type
-  SetReward(const double reward, T& layer)
-  {
-    layer.Reward() = reward;
-  }
-
-  template<typename T>
-  typename std::enable_if<
-      !HasRewardCheck<T, double&(T::*)()>::value, void>::type
-  SetReward(const double /* reward */, T& /* layer */)
-  {
-    /* Nothing to do here */
-  }
-
-  /**
-   * Reset the network by clearing the delta and by setting the layer status.
-   */
-  template<size_t I = 0, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  ResetParameter(std::tuple<Tp...>& /* network */) { /* Nothing to do here */ }
-
-  template<size_t I = 0, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  ResetParameter(std::tuple<Tp...>& network)
-  {
-    ResetDeterministic(std::get<I>(network));
-    std::get<I>(network).Delta().zeros();
-
-    ResetParameter<I + 1, Tp...>(network);
-  }
-
-  template<typename T>
-  typename std::enable_if<
-      HasDeterministicCheck<T, bool&(T::*)(void)>::value, void>::type
-  ResetDeterministic(T& layer)
-  {
-    layer.Deterministic() = deterministic;
-  }
-
-  template<typename T>
-  typename std::enable_if<
-      !HasDeterministicCheck<T, bool&(T::*)(void)>::value, void>::type
-  ResetDeterministic(T& /* layer */) { /* Nothing to do here */ }
-
-  /**
-   * Reset the location by updating the location for all layer that implement
-   * the Location function.
-   */
-  template<size_t I = 0, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  ResetLocation(const arma::mat& /* location */,
-                std::tuple<Tp...>& /* network */)
-  {
-    // Nothing to do here.
-  }
-
-  template<size_t I = 0, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  ResetLocation(const arma::mat& location, std::tuple<Tp...>& network)
-  {
-    SetLocation(std::get<I>(network), location);
-    ResetLocation<I + 1, Tp...>(location, network);
-  }
-
-  template<typename T>
-  typename std::enable_if<
-      HasLocationCheck<T, void(T::*)(const arma::mat&)>::value, void>::type
-  SetLocation(T& layer, const arma::mat& location)
-  {
-    layer.Location(location);
-  }
-
-  template<typename T>
-  typename std::enable_if<
-      !HasLocationCheck<T, void(T::*)(const arma::mat&)>::value, void>::type
-  SetLocation(T& /* layer */, const arma::mat& /* location */)
-  {
-    // Nothing to do here.
-  }
-
-  /**
-   * Save the network layer activations.
-   */
-  template<size_t I = 0, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  SaveActivations(boost::ptr_vector<MatType>& activations,
-                  std::tuple<Tp...>& network,
-                  size_t& activationCounter)
-  {
-    Save(I, activations, std::get<I>(network),
-        std::get<I>(network).InputParameter());
-
-    activationCounter++;
-    SaveActivations<I + 1, Tp...>(activations, network, activationCounter);
-  }
-
-  template<size_t I = 0, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  SaveActivations(boost::ptr_vector<MatType>& /* activations */,
-                  std::tuple<Tp...>& /* network */,
-                  size_t& /* activationCounter */)
-  {
-    // Nothing to do here.
-  }
-
-  /**
-   * Distinguish between recurrent layer and non-recurrent layer when storing
-   * the activations.
-   */
-  template<typename T, typename P>
-  typename std::enable_if<
-      HasRecurrentParameterCheck<T, P&(T::*)()>::value, void>::type
-  Save(const size_t /* layerNumber */,
-       boost::ptr_vector<MatType>& activations,
-       T& layer,
-       P& /* unused */)
-  {
-    activations.push_back(new MatType(layer.RecurrentParameter()));
-  }
-
-  template<typename T, typename P>
-  typename std::enable_if<
-      !HasRecurrentParameterCheck<T, P&(T::*)()>::value, void>::type
-  Save(const size_t /* layerNumber */,
-       boost::ptr_vector<MatType>& activations,
-       T& layer,
-       P& /* unused */)
-  {
-    activations.push_back(new MatType(layer.OutputParameter()));
-  }
-
-  template<size_t I = 0, typename DataTypeA, typename DataTypeB, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  SaveActivations(boost::ptr_vector<DataTypeA>& activationsA,
-                  boost::ptr_vector<DataTypeB>& activationsB,
-                  size_t& dataTypeACounter,
-                  size_t& dataTypeBCounter,
-                  std::tuple<Tp...>& network)
-  {
-    Save(activationsA, activationsB, dataTypeACounter, dataTypeBCounter,
-        std::get<I>(network), std::get<I>(network).OutputParameter());
-
-    SaveActivations<I + 1, DataTypeA, DataTypeB, Tp...>(
-        activationsA, activationsB, dataTypeACounter, dataTypeBCounter,
-        network);
-  }
-
-  template<size_t I = 0, typename DataTypeA, typename DataTypeB, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  SaveActivations(boost::ptr_vector<DataTypeA>& /* activationsA */,
-                  boost::ptr_vector<DataTypeB>& /* activationsB */,
-                  size_t& /* dataTypeACounter */,
-                  size_t& /* dataTypeBCounter */,
-                  std::tuple<Tp...>& /* network */)
-  {
-    // Nothing to do here.
-  }
-
-  template<typename T, typename DataTypeA, typename DataTypeB>
-  void Save(boost::ptr_vector<DataTypeA>& activationsA,
-        boost::ptr_vector<DataTypeB>& /* activationsB */,
-       size_t& dataTypeACounter,
-       size_t& /* dataTypeBCounter */,
-       T& layer,
-       DataTypeA& /* unused */)
-  {
-    activationsA.push_back(new DataTypeA(layer.OutputParameter()));
-    dataTypeACounter++;
-  }
-
-  template<typename T, typename DataTypeA, typename DataTypeB>
-  void Save(boost::ptr_vector<DataTypeA>& /* activationsA */,
-            boost::ptr_vector<DataTypeB>& activationsB,
-            size_t& /* dataTypeACounter */,
-            size_t& dataTypeBCounter,
-            T& layer,
-            DataTypeB& /* unused */)
-  {
-    activationsB.push_back(new DataTypeB(layer.OutputParameter()));
-    dataTypeBCounter++;
-  }
-
-  /**
-   * Load the network layer activations.
-   */
-  template<size_t I = 0, typename DataType, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  LoadActivations(DataType& input,
-                  boost::ptr_vector<MatType>& /* activations */,
-                  size_t& /* activationCounter */,
-                  std::tuple<Tp...>& network)
-  {
-    std::get<0>(network).InputParameter() = input;
-    LinkParameter(network);
-  }
-
-  template<size_t I = 0, typename DataType, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  LoadActivations(DataType& input,
-                  boost::ptr_vector<MatType>& activations,
-                  size_t& activationCounter,
-                  std::tuple<Tp...>& network)
-  {
-    Load(--activationCounter, activations,
-        std::get<sizeof...(Tp) - I - 1>(network),
-        std::get<I>(network).InputParameter());
-
-    LoadActivations<I + 1, DataType, Tp...>(input, activations,
-        activationCounter, network);
-  }
-
-  /**
-   * Distinguish between recurrent layer and non-recurrent layer when storing
-   * the activations.
-   */
-  template<typename T, typename P>
-  typename std::enable_if<
-      HasRecurrentParameterCheck<T, P&(T::*)()>::value, void>::type
-  Load(const size_t layerNumber,
-       boost::ptr_vector<MatType>& activations,
-       T& layer,
-       P& /* output */)
-  {
-    layer.RecurrentParameter() = activations[layerNumber];
-  }
-
-  template<typename T, typename P>
-  typename std::enable_if<
-      !HasRecurrentParameterCheck<T, P&(T::*)()>::value, void>::type
-  Load(const size_t layerNumber,
-       boost::ptr_vector<MatType>& activations,
-       T& layer,
-       P& /* output */)
-  {
-    layer.OutputParameter() = activations[layerNumber];
-  }
-
-  template<size_t I = 0,
-           typename DataType,
-           typename DataTypeA,
-           typename DataTypeB,
-           typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  LoadActivations(DataType& input,
-                  boost::ptr_vector<DataTypeA>& activationsA,
-                  boost::ptr_vector<DataTypeB>& activationsB,
-                  size_t& dataTypeACounter,
-                  size_t& dataTypeBCounter,
-                  std::tuple<Tp...>& network)
-  {
-    Load(activationsA,
-         activationsB,
-         dataTypeACounter,
-         dataTypeBCounter,
-         std::get<sizeof...(Tp) - I - 1>(network),
-         std::get<sizeof...(Tp) - I - 1>(network).OutputParameter());
-
-    LoadActivations<I + 1, DataType, DataTypeA, DataTypeB, Tp...>(
-        input, activationsA, activationsB, dataTypeACounter, dataTypeBCounter,
-        network);
-  }
-
-  template<size_t I = 0,
-           typename DataType,
-           typename DataTypeA,
-           typename DataTypeB,
-           typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  LoadActivations(DataType& input,
-                  boost::ptr_vector<DataTypeA>& /* activationsA */,
-                  boost::ptr_vector<DataTypeB>& /* activationsB */,
-                  size_t& /* dataTypeACounter */,
-                  size_t& /* dataTypeBCounter */,
-                  std::tuple<Tp...>& network)
-  {
-    std::get<0>(network).InputParameter() = input;
-    LinkParameter(network);
-  }
-
-  template<typename T, typename DataTypeA, typename DataTypeB>
-  void Load(boost::ptr_vector<DataTypeA>& activationsA,
-            boost::ptr_vector<DataTypeB>& /* activationsB */,
-            size_t& dataTypeACounter,
-            size_t& /* dataTypeBCounter */,
-            T& layer,
-            DataTypeA& /* output */)
-  {
-    layer.OutputParameter() = activationsA[--dataTypeACounter];
-  }
-
-  template<typename T, typename DataTypeA, typename DataTypeB>
-  void Load(boost::ptr_vector<DataTypeA>& /* activationsA */,
-            boost::ptr_vector<DataTypeB>& activationsB,
-            size_t& /* dataTypeACounter */,
-            size_t& dataTypeBCounter,
-            T& layer,
-            DataTypeB& /* output */)
-  {
-    layer.OutputParameter() = activationsB[--dataTypeBCounter];
-  }
-
-  /**
-   * Run a single iteration of the feed forward algorithm, using the given
-   * input and target vector, store the calculated error into the error
-   * vector.
-   */
-  template<size_t I = 0, typename DataType, typename... Tp>
-  void Forward(const DataType& input, std::tuple<Tp...>& t)
-  {
-    std::get<I>(t).InputParameter() = input;
-    std::get<I>(t).Forward(std::get<I>(t).InputParameter(),
-        std::get<I>(t).OutputParameter());
-
-    ForwardTail<I + 1, Tp...>(t);
-  }
-
-  template<size_t I = 1, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  ForwardTail(std::tuple<Tp...>& network)
-  {
-    LinkParameter(network);
-  }
-
-  template<size_t I = 1, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  ForwardTail(std::tuple<Tp...>& t)
-  {
-    std::get<I>(t).Forward(std::get<I - 1>(t).OutputParameter(),
-        std::get<I>(t).OutputParameter());
-
-    ForwardTail<I + 1, Tp...>(t);
-  }
-
-  /**
-   * Run a single iteration of the backward algorithm, using the given
-   * input and target vector, store the calculated error into the error
-   * vector.
-   */
-  template<size_t I = 1, typename DataType, typename... Tp>
-  typename std::enable_if<sizeof...(Tp) == 1, void>::type
-  Backward(const DataType& error, std::tuple<Tp ...>& t)
-  {
-    std::get<sizeof...(Tp) - I>(t).Backward(
-      std::get<sizeof...(Tp) - I>(t).OutputParameter(), error,
-      std::get<sizeof...(Tp) - I>(t).Delta());
-  }
-
-  template<size_t I = 1, typename DataType, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  Backward(const DataType& error, std::tuple<Tp ...>& t)
-  {
-    std::get<sizeof...(Tp) - I>(t).Backward(
-        std::get<sizeof...(Tp) - I>(t).OutputParameter(), error,
-        std::get<sizeof...(Tp) - I>(t).Delta());
-
-    BackwardTail<I + 1, DataType, Tp...>(error, t);
-  }
-
-  template<size_t I = 1, typename DataType, typename... Tp>
-  typename std::enable_if<I == (sizeof...(Tp)), void>::type
-  BackwardTail(const DataType& /* error */, std::tuple<Tp...>& t)
-  {
-    std::get<sizeof...(Tp) - I>(t).Backward(
-        std::get<sizeof...(Tp) - I>(t).OutputParameter(),
-        std::get<sizeof...(Tp) - I + 1>(t).Delta(),
-        std::get<sizeof...(Tp) - I>(t).Delta());
-  }
-
-  template<size_t I = 1, typename DataType, typename... Tp>
-  typename std::enable_if<I < (sizeof...(Tp)), void>::type
-  BackwardTail(const DataType& error, std::tuple<Tp...>& t)
-  {
-    std::get<sizeof...(Tp) - I>(t).Backward(
-        std::get<sizeof...(Tp) - I>(t).OutputParameter(),
-        std::get<sizeof...(Tp) - I + 1>(t).Delta(),
-        std::get<sizeof...(Tp) - I>(t).Delta());
-
-    BackwardTail<I + 1, DataType, Tp...>(error, t);
-  }
-
-  /**
-   * Link the calculated activation with the correct layer.
-   */
-  template<size_t I = 1, typename... Tp>
-  typename std::enable_if<I == sizeof...(Tp), void>::type
-  LinkParameter(std::tuple<Tp ...>& /* network */) { /* Nothing to do here */ }
-
-  template<size_t I = 1, typename... Tp>
-  typename std::enable_if<I < sizeof...(Tp), void>::type
-  LinkParameter(std::tuple<Tp...>& network)
-  {
-    if (!LayerTraits<typename std::remove_reference<
-        decltype(std::get<I>(network))>::type>::IsBiasLayer)
-    {
-      std::get<I>(network).InputParameter() = std::get<I - 1>(
-          network).OutputParameter();
-    }
-
-    LinkParameter<I + 1, Tp...>(network);
-  }
-
-  /**
-   * Iterate through all layer modules and update the the gradient using the
-   * layer defined optimizer.
-   */
-  template<typename InputType, typename ErrorType, typename... Tp>
-  void UpdateGradients(const InputType& input,
-                       const ErrorType& error,
-                       std::tuple<Tp...>& network)
-  {
-     Update(std::get<0>(network),
-           input,
-           std::get<1>(network).Delta(),
-           std::get<1>(network).OutputParameter());
-
-     UpdateGradients<1, ErrorType, Tp...>(error, network);
-  }
-
-  template<size_t I = 0, typename ErrorType, typename... Tp>
-  typename std::enable_if<I < (sizeof...(Tp) - 1), void>::type
-  UpdateGradients(const ErrorType& error, std::tuple<Tp...>& network)
-  {
-    Update(std::get<I>(network),
-           std::get<I>(network).InputParameter(),
-           std::get<I + 1>(network).Delta(),
-           std::get<I>(network).OutputParameter());
-
-    UpdateGradients<I + 1, ErrorType, Tp...>(error, network);
-  }
-
-  template<size_t I = 0, typename ErrorType, typename... Tp>
-  typename std::enable_if<I == (sizeof...(Tp) - 1), void>::type
-  UpdateGradients(const ErrorType& error, std::tuple<Tp...>& network)
-  {
-    Update(std::get<I>(network),
-       std::get<I>(network).InputParameter(),
-       error,
-       std::get<I>(network).OutputParameter());
-  }
-
-  template<typename LayerType,
-           typename InputType,
-           typename ErrorType,
-           typename GradientType>
-  typename std::enable_if<
-      HasGradientCheck<LayerType,
-          void(LayerType::*)(const InputType&,
-                             const ErrorType&,
-                             GradientType&)>::value, void>::type
-  Update(LayerType& layer,
-         const InputType& input,
-         const ErrorType& error,
-         GradientType& /* gradient */)
-  {
-    layer.Gradient(input, error, layer.Gradient());
-  }
-
-  template<typename LayerType,
-           typename InputType,
-           typename ErrorType,
-           typename GradientType>
-  typename std::enable_if<
-      !HasGradientCheck<LayerType,
-          void(LayerType::*)(const InputType&,
-                             const ErrorType&,
-                             GradientType&)>::value, void>::type
-  Update(LayerType& /* layer */,
-         const InputType& /* input */,
-         const ErrorType& /* error */,
-         GradientType& /* gradient */)
-  {
-    // Nothing to do here
-  }
-
-  //! The locator network.
-  LocatorType locator;
-
-  //! The location sensor network.
-  LocationSensorType locationSensor;
-
-  //! The glimpse sensor network.
-  GlimpseSensorType glimpseSensor;
-
-  //! The glimpse network.
-  GlimpseType glimpse;
-
-  //! The start network.
-  StartType start;
-
-  //! The feedback network.
-  FeedbackType feedback;
-
-  //! The transfer network.
-  TransferType transfer;
-
-  //! The classifier network.
-  ClassifierType classifier;
-
-  //! The reward predictor network.
-  RewardPredictorType rewardPredictor;
-
-  //! The number of steps for the back-propagate through time.
-  size_t nStep;
-
-  //! Locally stored network input size.
-  size_t inputSize;
-
-  //! The current evaluation mode (training or testing).
-  bool deterministic;
-
-  //! The index of the current step.
-  size_t step;
-
-  //! The activation storage we are using to perform the feed backward pass for
-  //! the glimpse network.
-  boost::ptr_vector<arma::mat> glimpseActivations;
-
-  //! The activation storage we are using to perform the feed backward pass for
-  //! the locator network.
-  boost::ptr_vector<arma::mat> locatorActivations;
-
-  //! The activation storage we are using to perform the feed backward pass for
-  //! the feedback network.
-  boost::ptr_vector<arma::mat> feedbackActivations;
-
-  //! The activation storage we are using to save the feedback network input.
-  boost::ptr_vector<arma::mat> feedbackActivationsInput;
-
-  //! The activation storage we are using to perform the feed backward pass for
-  //! the transfer network.
-  boost::ptr_vector<arma::mat> transferActivations;
-
-  //! The activation storage we are using to perform the feed backward pass for
-  //! the location sensor network.
-  boost::ptr_vector<arma::mat> locationSensorActivations;
-
-  //! The activation storage we are using to perform the feed backward pass for
-  //! the glimpse sensor network.
-  boost::ptr_vector<arma::mat> glimpseSensorMatActivations;
-  boost::ptr_vector<arma::cube> glimpseSensorCubeActivations;
-
-  //! The activation storage we are using to perform the feed backward pass for
-  //! the locator input.
-  boost::ptr_vector<arma::cube> locatorInput;
-
-  //! The storage we are using to save the location.
-  boost::ptr_vector<arma::mat> location;
-
-  //! The current number of activations in the glimpse sensor network.
-  size_t glimpseSensorMatCounter;
-  size_t glimpseSensorCubeCounter;
-
-  //! The current number of activations in the glimpse network.
-  size_t glimpseActivationsCounter;
-
-  //! The current number of activations in the glimpse start network.
-  size_t startActivationsCounter;
-
-  //! The current number of activations in the feedback network.
-  size_t feedbackActivationsCounter;
-
-  //! The current number of activations in the transfer network.
-  size_t transferActivationsCounter;
-
-  //! The current number of activations in the locator network.
-  size_t locatorActivationsCounter;
-
-  //! The current number of activations in the location sensor network.
-  size_t locationSensorActivationsCounter;
-
-  //! The current number of activations in the glimpse sensor network.
-  size_t glimpseSensorMatActivationsCounter;
-  size_t glimpseSensorCubeActivationsCounter;
-
-  //! The current number of location for the location storage.
-  size_t locationCounter;
-
-  //! Matrix of (trained) parameters.
-  arma::mat parameter;
-
-  //! The matrix of data points (predictors).
-  arma::mat predictors;
-
-  //! The matrix of responses to the input data points.
-  arma::mat responses;
-
-  //! The number of separable functions (the number of predictor points).
-  size_t numFunctions;
-
-  //! Storage the merge the reward input.
-  arma::field<arma::mat> rewardInput;
-
-  //! The current input.
-  arma::cube input;
-
-  //! The current target.
-  arma::mat target;
-
-  //! Locally stored performance functions.
-  NegativeLogLikelihoodLayer<> negativeLogLikelihoodFunction;
-  VRClassRewardLayer<> vRClassRewardFunction;
-
-  //! Locally stored size of the locator network.
-  size_t locatorSize;
-
-  //! Locally stored size of the location sensor network.
-  size_t locationSensorSize;
-
-  //! Locally stored size of the glimpse sensor network.
-  size_t glimpseSensorSize;
-
-  //! Locally stored size of the glimpse network.
-  size_t glimpseSize;
-
-  //! Locally stored size of the start network.
-  size_t startSize;
-
-  //! Locally stored size of the feedback network.
-  size_t feedbackSize;
-
-  //! Locally stored size of the transfer network.
-  size_t transferSize;
-
-  //! Locally stored size of the classifier network.
-  size_t classifierSize;
-
-  //! Locally stored size of the reward predictor network.
-  size_t rewardPredictorSize;
-
-  //! Locally stored recurrent gradient.
-  arma::mat recurrentGradient;
-
-  //! Locally stored action error.
-  arma::mat actionError;
-
-  //! Locally stored current location.
-  arma::mat evaluationLocation;
-}; // class RecurrentNeuralAttention
-
-} // namespace ann
-} // namespace mlpack
-
-// Include implementation.
-#include "rmva_impl.hpp"
-
-#endif
diff --git a/src/mlpack/methods/rmva/rmva_main.cpp b/src/mlpack/methods/rmva/rmva_main.cpp
deleted file mode 100644
index fafac26..0000000
--- a/src/mlpack/methods/rmva/rmva_main.cpp
+++ /dev/null
@@ -1,286 +0,0 @@
-/**
- * @file rmva_main.cpp
- * @author Marcus Edel
- *
- * Main executable for the Recurrent Model for Visual Attention.
- *
- * mlpack is free software; you may redistribute it and/or modify it under the
- * terms of the 3-clause BSD license.  You should have received a copy of the
- * 3-clause BSD license along with mlpack.  If not, see
- * http://www.opensource.org/licenses/BSD-3-Clause for more information.
- */
-#include <mlpack/prereqs.hpp>
-#include <mlpack/core/util/param.hpp>
-
-#include "rmva.hpp"
-
-#include <mlpack/methods/ann/layer/glimpse_layer.hpp>
-#include <mlpack/methods/ann/layer/linear_layer.hpp>
-#include <mlpack/methods/ann/layer/bias_layer.hpp>
-#include <mlpack/methods/ann/layer/base_layer.hpp>
-#include <mlpack/methods/ann/layer/reinforce_normal_layer.hpp>
-#include <mlpack/methods/ann/layer/multiply_constant_layer.hpp>
-#include <mlpack/methods/ann/layer/constant_layer.hpp>
-#include <mlpack/methods/ann/layer/log_softmax_layer.hpp>
-#include <mlpack/methods/ann/layer/hard_tanh_layer.hpp>
-
-#include <mlpack/core/optimizers/minibatch_sgd/minibatch_sgd.hpp>
-#include <mlpack/core/optimizers/sgd/sgd.hpp>
-
-using namespace mlpack;
-using namespace mlpack::ann;
-using namespace mlpack::optimization;
-using namespace std;
-
-PROGRAM_INFO("Recurrent Model for Visual Attention",
-    "This program trains the Recurrent Model for Visual Attention on the given "
-    "labeled training set, or loads a model from the given model file, and then"
-    " may use that trained model to classify the points in a given test set."
-    "\n\n"
-    "Labels are expected to be passed in separately as their own file "
-    "(--labels_file).  If training is not desired, a pre-existing model can be "
-    "loaded with the --input_model_file (-m) option."
-    "\n\n"
-    "If classifying a test set is desired, the test set should be in the file "
-    "specified with the --test_file (-T) option, and the classifications will "
-    "be saved to the file specified with the --output_file (-o) option.  If "
-    "saving a trained model is desired, the --output_model_file (-M) option "
-    "should be given.");
-
-// Model loading/saving.
-PARAM_STRING_IN("input_model_file", "File containing the Recurrent Model for "
-    "Visual Attention.", "m", "");
-PARAM_STRING_OUT("output_model_file", "File to save trained Recurrent Model for"
-    " Visual Attention to.", "M");
-
-// Training parameters.
-PARAM_MATRIX_IN("training", "Matrix containing the training set.", "t");
-PARAM_MATRIX_IN("labels", "Matrix containing labels for the training set.",
-    "l");
-
-PARAM_STRING_IN("optimizer", "Optimizer to use; 'sgd', 'minibatch-sgd', or "
-    "'lbfgs'.", "O", "minibatch-sgd");
-
-PARAM_INT_IN("max_iterations", "Maximum number of iterations for SGD or RMSProp"
-    " (0 indicates no limit).", "n", 500000);
-PARAM_DOUBLE_IN("tolerance", "Maximum tolerance for termination of SGD or "
-    "RMSProp.", "e", 1e-7);
-
-PARAM_DOUBLE_IN("step_size", "Step size for stochastic gradient descent "
-    "(alpha),", "a", 0.01);
-PARAM_FLAG("linear_scan", "Don't shuffle the order in which data points are "
-    "visited for SGD or mini-batch SGD.", "L");
-PARAM_INT_IN("batch_size", "Batch size for mini-batch SGD.", "b", 20);
-
-PARAM_INT_IN("rho", "Number of steps for the back-propagate through time.", "r",
-    7);
-
-PARAM_INT_IN("classes", "The number of classes.", "c", 10);
-
-PARAM_INT_IN("seed", "Random seed.  If 0, 'std::time(NULL)' is used.", "s", 0);
-
-// Test parameters.
-PARAM_MATRIX_IN("test", "Matrix containing the test set.", "T");
-PARAM_MATRIX_OUT("output", "The matrix in which the predicted labels for the "
-    "test set will be written.", "o");
-
-int main(int argc, char** argv)
-{
-  CLI::ParseCommandLine(argc, argv);
-
-  // Check input parameters.
-  if (CLI::HasParam("training") && CLI::HasParam("input_model_file"))
-    Log::Fatal << "Cannot specify both --training_file (-t) and "
-       << "--input_model_file (-m)!" << endl;
-
-  if (!CLI::HasParam("training") && !CLI::HasParam("input_model_file"))
-    Log::Fatal << "Neither --training_file (-t) nor --input_model_file (-m) are"
-        << " specified!" << endl;
-
-  if (!CLI::HasParam("training") && CLI::HasParam("labels"))
-    Log::Warn << "--labels_file (-l) ignored because --training_file (-t) is "
-        << "not specified." << endl;
-
-  if (!CLI::HasParam("output") && !CLI::HasParam("output_model_file"))
-    Log::Warn << "Neither --output_file (-o) nor --output_model_file (-M) "
-        << "specified; no output will be saved!" << endl;
-
-  if (CLI::HasParam("output") && !CLI::HasParam("test"))
-    Log::Warn << "--output_file (-o) ignored because no test file specified "
-        << "with --test_file (-T)." << endl;
-
-  if (!CLI::HasParam("output") && CLI::HasParam("test"))
-    Log::Warn << "--test_file (-T) specified, but classification results will "
-        << "not be saved because --output_file (-o) is not specified." << endl;
-
-  const string optimizerType = CLI::GetParam<string>("optimizer");
-
-  if ((optimizerType != "sgd") && (optimizerType != "lbfgs") &&
-      (optimizerType != "minibatch-sgd"))
-  {
-    Log::Fatal << "Optimizer type '" << optimizerType << "' unknown; must be "
-        << "'sgd', 'minibatch-sgd', or 'lbfgs'!" << endl;
-  }
-
-  const double stepSize = CLI::GetParam<double>("step_size");
-  const size_t maxIterations = (size_t) CLI::GetParam<int>("max_iterations");
-  const double tolerance = CLI::GetParam<double>("tolerance");
-  const bool shuffle = !CLI::HasParam("linear_scan");
-  const size_t batchSize = (size_t) CLI::GetParam<int>("batch_size");
-  const size_t rho = (size_t) CLI::GetParam<int>("rho");
-  const size_t numClasses = (size_t) CLI::GetParam<int>("classes");
-
-  const size_t hiddenSize = 256;
-  const double unitPixels = 13;
-  const double locatorStd = 0.11;
-  const size_t imageSize = 28;
-  const size_t locatorHiddenSize = 128;
-  const size_t glimpsePatchSize = 8;
-  const size_t glimpseDepth = 1;
-  const size_t glimpseScale = 2;
-  const size_t glimpseHiddenSize = 128;
-  const size_t imageHiddenSize = 256;
-
-
-  // Locator network.
-  LinearMappingLayer<> linearLayer0(hiddenSize, 2);
-  BiasLayer<> biasLayer0(2, 1);
-  HardTanHLayer<> hardTanhLayer0;
-  ReinforceNormalLayer<> reinforceNormalLayer0(2 * locatorStd);
-  HardTanHLayer<> hardTanhLayer1;
-  MultiplyConstantLayer<> multiplyConstantLayer0(2 * unitPixels / imageSize);
-  auto locator = std::tie(linearLayer0, biasLayer0, hardTanhLayer0,
-      reinforceNormalLayer0, hardTanhLayer1, multiplyConstantLayer0);
-
-  // Location sensor network.
-  LinearLayer<> linearLayer1(2, locatorHiddenSize);
-  BiasLayer<> biasLayer1(locatorHiddenSize, 1);
-  ReLULayer<> rectifierLayer0;
-  auto locationSensor = std::tie(linearLayer1, biasLayer1, rectifierLayer0);
-
-  // Glimpse sensor network.
-  GlimpseLayer<> glimpseLayer0(1, glimpsePatchSize, glimpseDepth, glimpseScale);
-  LinearMappingLayer<> linearLayer2(64, glimpseHiddenSize);
-  BiasLayer<> biasLayer2(glimpseHiddenSize, 1);
-  ReLULayer<> rectifierLayer1;
-  auto glimpseSensor = std::tie(glimpseLayer0, linearLayer2, biasLayer2,
-      rectifierLayer1);
-
-  // Glimpse network.
-  LinearLayer<> linearLayer3(glimpseHiddenSize + locatorHiddenSize,
-      imageHiddenSize);
-  BiasLayer<> biasLayer3(imageHiddenSize, 1);
-  ReLULayer<> rectifierLayer2;
-  LinearLayer<> linearLayer4(imageHiddenSize, hiddenSize);
-  BiasLayer<> biasLayer4(hiddenSize, 1);
-  auto glimpse = std::tie(linearLayer3, biasLayer3, rectifierLayer2,
-      linearLayer4, biasLayer4);
-
-  // Feedback network.
-  LinearLayer<> recurrentLayer0(imageHiddenSize, hiddenSize);
-  BiasLayer<> recurrentLayerBias0(hiddenSize, 1);
-  auto feedback = std::tie(recurrentLayer0, recurrentLayerBias0);
-
-  // Start network.
-  AdditionLayer<> startLayer0(hiddenSize, 1);
-  auto start = std::tie(startLayer0);
-
-  // Transfer network.
-  ReLULayer<> rectifierLayer3;
-  auto transfer = std::tie(rectifierLayer3);
-
-  // Classifier network.
-  LinearLayer<> linearLayer5(hiddenSize, numClasses);
-  BiasLayer<> biasLayer6(numClasses, 1);
-  LogSoftmaxLayer<> logSoftmaxLayer0;
-  auto classifier = std::tie(linearLayer5, biasLayer6, logSoftmaxLayer0);
-
-  // Reward predictor network.
-  ConstantLayer<> constantLayer0(1, 1);
-  AdditionLayer<> additionLayer0(1, 1);
-  auto rewardPredictor = std::tie(constantLayer0, additionLayer0);
-
-  // Recurrent Model for Visual Attention.
-  RecurrentNeuralAttention<decltype(locator),
-                           decltype(locationSensor),
-                           decltype(glimpseSensor),
-                           decltype(glimpse),
-                           decltype(start),
-                           decltype(feedback),
-                           decltype(transfer),
-                           decltype(classifier),
-                           decltype(rewardPredictor),
-                           RandomInitialization>
-    net(locator, locationSensor, glimpseSensor, glimpse, start, feedback,
-        transfer, classifier, rewardPredictor, rho);
-
-  // Either we have to train a model, or load a model.
-  if (CLI::HasParam("training"))
-  {
-    arma::mat trainingData = std::move(CLI::GetParam<arma::mat>("training"));
-
-    arma::mat labels;
-
-    // Did the user pass in labels?
-    if (CLI::HasParam("labels"))
-    {
-      // Load labels.
-      labels = std::move(CLI::GetParam<arma::mat>("labels"));
-
-      // Do the labels need to be transposed?
-      if (labels.n_cols == 1)
-        labels = labels.t();
-    }
-
-    // Now run the optimization.
-    if (optimizerType == "sgd")
-    {
-      SGD<decltype(net)> opt(net);
-      opt.StepSize() = stepSize;
-      opt.MaxIterations() = maxIterations;
-      opt.Tolerance() = tolerance;
-      opt.Shuffle() = shuffle;
-
-      Timer::Start("rmva_training");
-      net.Train(trainingData, labels, opt);
-      Timer::Stop("rmva_training");
-    }
-    else if (optimizerType == "minibatch-sgd")
-    {
-      MiniBatchSGD<decltype(net)> opt(net);
-      opt.StepSize() = stepSize;
-      opt.MaxIterations() = maxIterations;
-      opt.Tolerance() = tolerance;
-      opt.Shuffle() = shuffle;
-      opt.BatchSize() = batchSize;
-
-      Timer::Start("rmva_training");
-      net.Train(trainingData, labels, opt);
-      Timer::Stop("rmva_training");
-    }
-  }
-  else
-  {
-    // Load the model from file.
-    data::Load(CLI::GetParam<string>("input_model_file"), "rmva_model", net);
-  }
-
-  // Do we need to do testing?
-  if (CLI::HasParam("test"))
-  {
-    arma::mat testingData = std::move(CLI::GetParam<arma::mat>("test"));
-
-    // Time the running of the Naive Bayes Classifier.
-    arma::mat results;
-    Timer::Start("rmva_testing");
-    net.Predict(testingData, results);
-    Timer::Stop("rmva_testing");
-
-    if (CLI::HasParam("output"))
-      CLI::GetParam<arma::mat>("output") = std::move(results);
-  }
-
-  // Save the model, if requested.
-  if (CLI::HasParam("output_model_file"))
-    data::Save(CLI::GetParam<string>("output_model_file"), "rmva_model", net);
-}

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/mlpack.git



More information about the debian-science-commits mailing list