[mlpack] 25/37: Remove stuff that patches did not (fake commit during svn to git transition).

Barak A. Pearlmutter barak+git at pearlmutter.net
Mon Feb 15 19:35:48 UTC 2016


This is an automated email from the git hooks/post-receive script.

bap pushed a commit to tag mlpack-1.0.10
in repository mlpack.

commit da7f3bd8ced7384ee59f4d2fe80dcf17328d7231
Author: Ryan Curtin <ryan at ratml.org>
Date:   Sat Dec 20 20:52:29 2014 -0500

    Remove stuff that patches did not (fake commit during svn to git transition).
---
 src/mlpack/bindings/CMakeLists.txt                 |   4 -
 src/mlpack/bindings/matlab/CMakeLists.txt          | 154 ---------
 src/mlpack/bindings/matlab/allkfn/CMakeLists.txt   |  19 --
 src/mlpack/bindings/matlab/allkfn/allkfn.cpp       | 194 -----------
 src/mlpack/bindings/matlab/allkfn/allkfn.m         |  58 ----
 src/mlpack/bindings/matlab/allknn/CMakeLists.txt   |  19 --
 src/mlpack/bindings/matlab/allknn/allknn.cpp       | 279 ---------------
 src/mlpack/bindings/matlab/allknn/allknn.m         |  60 ----
 src/mlpack/bindings/matlab/emst/CMakeLists.txt     |  19 --
 src/mlpack/bindings/matlab/emst/emst.cpp           |  72 ----
 src/mlpack/bindings/matlab/emst/emst.m             |  52 ---
 src/mlpack/bindings/matlab/gmm/CMakeLists.txt      |  19 --
 src/mlpack/bindings/matlab/gmm/gmm.cpp             | 129 -------
 src/mlpack/bindings/matlab/gmm/gmm.m               |  28 --
 src/mlpack/bindings/matlab/hmm/hmm_generate.cpp    | 373 ---------------------
 src/mlpack/bindings/matlab/hmm/hmm_generate.m      |  28 --
 .../bindings/matlab/kernel_pca/CMakeLists.txt      |  19 --
 .../bindings/matlab/kernel_pca/kernel_pca.cpp      | 136 --------
 src/mlpack/bindings/matlab/kernel_pca/kernel_pca.m |  71 ----
 src/mlpack/bindings/matlab/kmeans/CMakeLists.txt   |  19 --
 src/mlpack/bindings/matlab/kmeans/kmeans.cpp       | 175 ----------
 src/mlpack/bindings/matlab/kmeans/kmeans.m         |  28 --
 src/mlpack/bindings/matlab/lars/CMakeLists.txt     |  19 --
 src/mlpack/bindings/matlab/lars/lars.cpp           |  58 ----
 src/mlpack/bindings/matlab/lars/lars.m             |  48 ---
 src/mlpack/bindings/matlab/nca/CMakeLists.txt      |  19 --
 src/mlpack/bindings/matlab/nca/nca.cpp             |  55 ---
 src/mlpack/bindings/matlab/nca/nca.m               |  24 --
 src/mlpack/bindings/matlab/nmf/CMakeLists.txt      |  19 --
 src/mlpack/bindings/matlab/nmf/nmf.cpp             | 106 ------
 src/mlpack/bindings/matlab/nmf/nmf.m               |  58 ----
 src/mlpack/bindings/matlab/pca/CMakeLists.txt      |  19 --
 src/mlpack/bindings/matlab/pca/pca.cpp             |  62 ----
 src/mlpack/bindings/matlab/pca/pca.m               |  33 --
 .../bindings/matlab/range_search/CMakeLists.txt    |  19 --
 .../bindings/matlab/range_search/range_search.cpp  | 325 ------------------
 .../bindings/matlab/range_search/range_search.m    |  47 ---
 src/mlpack/methods/adaboost/CMakeLists.txt         |  27 --
 src/mlpack/methods/adaboost/adaboost.hpp           |  69 ----
 src/mlpack/methods/adaboost/adaboost_impl.hpp      | 215 ------------
 src/mlpack/methods/adaboost/adaboost_main.cpp      |  95 ------
 src/mlpack/tests/adaboost_test.cpp                 |  57 ----
 42 files changed, 3329 deletions(-)

diff --git a/src/mlpack/bindings/CMakeLists.txt b/src/mlpack/bindings/CMakeLists.txt
deleted file mode 100644
index 040456d..0000000
--- a/src/mlpack/bindings/CMakeLists.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-# Recurse into individual binding subdirectories, if we are supposed to.
-if(MATLAB_BINDINGS)
-  add_subdirectory(matlab)
-endif(MATLAB_BINDINGS)
diff --git a/src/mlpack/bindings/matlab/CMakeLists.txt b/src/mlpack/bindings/matlab/CMakeLists.txt
deleted file mode 100644
index 6941c94..0000000
--- a/src/mlpack/bindings/matlab/CMakeLists.txt
+++ /dev/null
@@ -1,154 +0,0 @@
-# Build rules for the MATLAB bindings for MLPACK.  These may not work well on
-# non-Linux systems.
-
-# We need the mex compiler for this to work.
-find_package(MatlabMex REQUIRED)
-
-# If the mex compiler is wrapping an "unsupported" version, warn the user that
-# they may have issues with the produced bindings for a multitude of reasons.
-# We can only reasonably check this on a UNIX-like system.
-if(UNIX)
-  # The file test.cpp does not exist, but mex will still print a warning if it's
-  # using a weird version.
-  execute_process(COMMAND "${MATLAB_MEX}" test.cpp
-                  RESULT_VARIABLE MEX_RESULT_TRASH
-                  OUTPUT_VARIABLE MEX_OUTPUT
-                  ERROR_VARIABLE MEX_ERROR_TRASH)
-
-  string(REGEX MATCH "Warning: You are using" MEX_WARNING "${MEX_OUTPUT}")
-
-  if(MEX_WARNING)
-    # We have to find the old compiler version and the new compiler version; if
-    # the MATLAB version is newer, then we don't need to worry.  If this step
-    # fails somehow, we will just issue the warning anyway (just in case).
-    string(REGEX REPLACE
-        ".*using [a-zA-Z]* version \"([0-9.]*)[^\"]*\".*"
-        "\\1" OTHER_COMPILER_VERSION "${MEX_OUTPUT}")
-    string(REGEX REPLACE
-        ".*currently supported with MEX is \"([0-9.]*)[^\"]*\".*"
-        "\\1" MEX_COMPILER_VERSION "${MEX_OUTPUT}")
-
-    # If MEX_COMPILER_VERSION is greater than OTHER_COMPILER_VERSION, we don't
-    # need to issue a warning.
-    set(NEED_TO_WARN 1)
-    if(MEX_COMPILER_VERSION AND OTHER_COMPILER_VERSION)
-      # We seem to have read two valid version strings.  So we can compare
-      # them, and maybe we don't need to issue the warning.
-      if(NOT ("${MEX_COMPILER_VERSION}" VERSION_LESS
-          "${OTHER_COMPILER_VERSION}"))
-        # The mex compiler is newer than our version.  So no warning is
-        # needed.
-        set(NEED_TO_WARN 0)
-      endif(NOT ("${MEX_COMPILER_VERSION}" VERSION_LESS
-          "${OTHER_COMPILER_VERSION}"))
-    endif(MEX_COMPILER_VERSION AND OTHER_COMPILER_VERSION)
-
-    if(NEED_TO_WARN EQUAL 1)
-      message(WARNING "The MATLAB runtime glibc is different than the system "
-          " glibc.  This can (and probably will) cause the MLPACK bindings "
-          "generated by this build script to fail with odd GLIBCXX_a_b_c "
-          "version complaints when they are run.  Assuming that the system "
-          "glibc is newer than the MATLAB-provided version, the MATLAB version "
-          "can probably be deleted (always save a copy in case this is wrong!)."
-          "\nFor more information on this confusing issue, see\n"
-          "http://dovgalecs.com/blog/matlab-glibcxx_3-4-11-not-found/\nand for "
-          "an overly-detailed dissertation/rant on why it is not possible to "
-          "work around this issue in any way, see\n"
-          "http://www.mlpack.org/trac/ticket/253 for more information.")
-    endif(NEED_TO_WARN EQUAL 1)
-  endif(MEX_WARNING)
-endif(UNIX)
-
-# Ignore the fact that we are setting CMAKE_SHARED_LIBRARY_CXX_FLAGS on CMake
-# 2.8.9 and newer.  Because we are requiring at least CMake 2.8.5, we only have
-# to check the patch version.
-if(${CMAKE_PATCH_VERSION} GREATER 8)
-  cmake_policy(SET CMP0018 OLD)
-endif(${CMAKE_PATCH_VERSION} GREATER 8)
-
-# Use the mex compiler to compile.
-set(CMAKE_CXX_COMPILER "${MATLAB_MEX}")
-
-# Set flags for the mex compiler, because a lot of the default CMake flags
-# aren't accepted by mex.  The user who wants to customize these things should
-# probably modify their mexopts.sh so that mex uses those flags by default.
-# There is no easy way to tell mex to compile with profiling symbols, so that is
-# not done even if PROFILE is set.
-if(DEBUG)
-  set(CMAKE_CXX_FLAGS "-g")
-  set(CMAKE_C_FLAGS "-g")
-else(DEBUG)
-  set(CMAKE_CXX_FLAGS "-O")
-  set(CMAKE_C_FLAGS "-O")
-endif(DEBUG)
-
-# Don't give -fPIC; mex will do that for us.
-set(CMAKE_SHARED_LIBRARY_C_FLAGS "")
-set(CMAKE_SHARED_LIBRARY_CXX_FLAGS "")
-
-# Don't make 'lib<method>.mexglx'.
-set(CMAKE_SHARED_LIBRARY_PREFIX "")
-set(CMAKE_SHARED_MODULE_PREFIX "")
-
-# Set custom commands for mex compilation, because the flags are (in general)
-# odd and different.
-set(CMAKE_CXX_COMPILE_OBJECT "<CMAKE_CXX_COMPILER> -outdir <OBJECT_DIR> <FLAGS> -c <SOURCE>")
-set(CMAKE_CXX_CREATE_SHARED_MODULE "<CMAKE_CXX_COMPILER> -cxx <LINK_FLAGS> -output <TARGET> <OBJECTS> <LINK_LIBRARIES>")
-set(CMAKE_CXX_CREATE_SHARED_LIBRARY "${CMAKE_CXX_CREATE_SHARED_MODULE}")
-
-# mex is weird because it doesn't respect the -o option, but in general it
-# appears to turn <source>.cpp into <source>.o, so CMake needs to know to
-# replace the extension.
-set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)
-
-if(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "x86_64")
-  set(CMAKE_SHARED_LIBRARY_SUFFIX ".mexa64")
-  set(CMAKE_SHARED_MODULE_SUFFIX  ".mexa64")
-elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "x86" OR ${CMAKE_SYSTEM_PROCESSOR}
-    STREQUAL "i686")
-  set(CMAKE_SHARED_LIBRARY_SUFFIX ".mexglx")
-  set(CMAKE_SHARED_MODULE_SUFFIX  ".mexglx")
-endif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "x86_64")
-
-# Place MATLAB bindings in matlab/.
-set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/matlab/)
-
-include_directories(${CMAKE_SOURCE_DIR}/src/) # So we can include <mlpack/...>.
-
-# Set MATLAB toolbox install directory.
-set(MATLAB_TOOLBOX_DIR "${MATLAB_ROOT}/toolbox")
-
-# CHANGE HERE FOR NEW BINDINGS!!!!
-add_subdirectory(allkfn)
-add_subdirectory(allknn)
-add_subdirectory(emst)
-add_subdirectory(kmeans)
-add_subdirectory(range_search)
-add_subdirectory(gmm)
-add_subdirectory(pca)
-add_subdirectory(kernel_pca)
-add_subdirectory(lars)
-add_subdirectory(nca)
-add_subdirectory(nmf)
-
-# Create a target whose sole purpose is to modify the pathdef.m MATLAB file so
-# that the MLPACK toolbox is added to the MATLAB default path.
-add_custom_target(matlab ALL
-    # Modify pathdef.m.
-    COMMAND ${CMAKE_COMMAND} -D MATLAB_ROOT="${MATLAB_ROOT}" -D
-        PATHDEF_OUTPUT_FILE="${CMAKE_BINARY_DIR}/matlab/pathdef.m" -P
-        ${CMAKE_SOURCE_DIR}/CMake/ModifyMatlabPathdef.cmake
-    # Due to the dependencies, 'make matlab' makes all the bindings.
-    DEPENDS
-    allknn_mex
-    allkfn_mex
-    emst_mex
-    gmm_mex
-    kmeans_mex
-    range_search_mex
-)
-
-install(FILES "${CMAKE_BINARY_DIR}/matlab/pathdef.m"
-    DESTINATION "${MATLAB_ROOT}/toolbox/local/"
-)
-
diff --git a/src/mlpack/bindings/matlab/allkfn/CMakeLists.txt b/src/mlpack/bindings/matlab/allkfn/CMakeLists.txt
deleted file mode 100644
index 42152b5..0000000
--- a/src/mlpack/bindings/matlab/allkfn/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Simple rules for building mex file.  The _mex suffix is necessary to avoid
-# target name conflicts, and the mex file must have a different name than the .m
-# file.
-add_library(allkfn_mex SHARED
-  allkfn.cpp
-)
-target_link_libraries(allkfn_mex
-  mlpack
-  ${LIBXML2_LIBRARIES}
-)
-
-# Installation rule.  Install both the mex and the MATLAB file.
-install(TARGETS allkfn_mex
-  LIBRARY DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
-install(FILES
-  allkfn.m
-  DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
diff --git a/src/mlpack/bindings/matlab/allkfn/allkfn.cpp b/src/mlpack/bindings/matlab/allkfn/allkfn.cpp
deleted file mode 100644
index 1924d91..0000000
--- a/src/mlpack/bindings/matlab/allkfn/allkfn.cpp
+++ /dev/null
@@ -1,194 +0,0 @@
-/**
- * @file allkfn.cpp
- * @author Patrick Mason
- *
- * MEX function for MATLAB All-kFN binding.
- */
-#include "mex.h"
-
-#include <mlpack/core.hpp>
-#include <mlpack/methods/neighbor_search/neighbor_search.hpp>
-
-using namespace std;
-using namespace mlpack;
-using namespace mlpack::neighbor;
-using namespace mlpack::tree;
-
-void mexFunction(int nlhs, mxArray *plhs[],
-                 int nrhs, const mxArray *prhs[])
-{
-  // Check the inputs.
-  if (nrhs != 6)
-  {
-    mexErrMsgTxt("Expecting seven arguments.");
-  }
-
-  if (nlhs != 2)
-  {
-    mexErrMsgTxt("Two outputs required.");
-  }
-
-  size_t numPoints = mxGetN(prhs[0]);
-  size_t numDimensions = mxGetM(prhs[0]);
-
-  // Create the reference matrix.
-  arma::mat referenceData(numDimensions, numPoints);
-  // setting the values.
-  double * mexDataPoints = mxGetPr(prhs[0]);
-  for (int i = 0, n = numPoints * numDimensions; i < n; ++i)
-  {
-    referenceData(i) = mexDataPoints[i];
-  }
-
-  // getting the leafsize
-  int lsInt = (int) mxGetScalar(prhs[3]);
-
-  // getting k
-  size_t k = (int) mxGetScalar(prhs[1]);
-
-  // naive algorithm?
-  bool naive = (mxGetScalar(prhs[4]) == 1.0);
-
-  // single mode?
-  bool singleMode = (mxGetScalar(prhs[5]) == 1.0);
-
-  // the query matrix
-  double * mexQueryPoints = mxGetPr(prhs[2]);
-  arma::mat queryData;
-  bool hasQueryData = ((mxGetM(prhs[2]) != 0) && (mxGetN(prhs[2]) != 0));
-
-  // Sanity check on k value: must be greater than 0, must be less than the
-  // number of reference points.
-  if (k > referenceData.n_cols)
-  {
-    stringstream os;
-    os << "Invalid k: " << k << "; must be greater than 0 and less ";
-    os << "than or equal to the number of reference points (";
-    os << referenceData.n_cols << ")." << endl;
-    mexErrMsgTxt(os.str().c_str());
-  }
-
-  // Sanity check on leaf size.
-  if (lsInt < 0)
-  {
-    stringstream os;
-    os << "Invalid leaf size: " << lsInt << ".  Must be greater ";
-    os << "than or equal to 0." << endl;
-    mexErrMsgTxt(os.str().c_str());
-  }
-  size_t leafSize = lsInt;
-
-  // Naive mode overrides single mode.
-  if (singleMode && naive)
-  {
-    mexWarnMsgTxt("single_mode ignored because naive is present.");
-  }
-
-  if (naive)
-    leafSize = referenceData.n_cols;
-
-  arma::Mat<size_t> neighbors;
-  arma::mat distances;
-
-  AllkFN* allkfn = NULL;
-
-  std::vector<size_t> oldFromNewRefs;
-
-  // Build trees by hand, so we can save memory: if we pass a tree to
-  // NeighborSearch, it does not copy the matrix.
-  BinarySpaceTree<bound::HRectBound<2>, QueryStat<FurthestNeighborSort> >
-      refTree(referenceData, oldFromNewRefs, leafSize);
-  BinarySpaceTree<bound::HRectBound<2>, QueryStat<FurthestNeighborSort> >*
-      queryTree = NULL; // Empty for now.
-
-  std::vector<size_t> oldFromNewQueries;
-
-  if (hasQueryData)
-  {
-    // setting the values.
-    mexDataPoints = mxGetPr(prhs[2]);
-    numPoints = mxGetN(prhs[2]);
-    numDimensions = mxGetM(prhs[2]);
-    queryData = arma::mat(numDimensions, numPoints);
-    for (int i = 0, n = numPoints * numDimensions; i < n; ++i)
-    {
-      queryData(i) = mexDataPoints[i];
-    }
-
-    if (naive && leafSize < queryData.n_cols)
-      leafSize = queryData.n_cols;
-
-    // Build trees by hand, so we can save memory: if we pass a tree to
-    // NeighborSearch, it does not copy the matrix.
-    queryTree = new BinarySpaceTree<bound::HRectBound<2>,
-        QueryStat<FurthestNeighborSort> >(queryData, oldFromNewQueries,
-        leafSize);
-
-    allkfn = new AllkFN(&refTree, queryTree, referenceData, queryData,
-        singleMode);
-  }
-  else
-  {
-    allkfn = new AllkFN(&refTree, referenceData, singleMode);
-  }
-
-  allkfn->Search(k, neighbors, distances);
-
-  // We have to map back to the original indices from before the tree
-  // construction.
-  arma::mat distancesOut(distances.n_rows, distances.n_cols);
-  arma::Mat<size_t> neighborsOut(neighbors.n_rows, neighbors.n_cols);
-
-  // Do the actual remapping.
-  if (hasQueryData)
-  {
-    for (size_t i = 0; i < distances.n_cols; ++i)
-    {
-      // Map distances (copy a column).
-      distancesOut.col(oldFromNewQueries[i]) = distances.col(i);
-
-      // Map indices of neighbors.
-      for (size_t j = 0; j < distances.n_rows; ++j)
-      {
-        neighborsOut(j, oldFromNewQueries[i]) = oldFromNewRefs[neighbors(j, i)];
-      }
-    }
-  }
-  else
-  {
-    for (size_t i = 0; i < distances.n_cols; ++i)
-    {
-      // Map distances (copy a column).
-      distancesOut.col(oldFromNewRefs[i]) = distances.col(i);
-
-      // Map indices of neighbors.
-      for (size_t j = 0; j < distances.n_rows; ++j)
-      {
-        neighborsOut(j, oldFromNewRefs[i]) = oldFromNewRefs[neighbors(j, i)];
-      }
-    }
-  }
-
-  // Clean up.
-  if (queryTree)
-    delete queryTree;
-
-  // constructing matrix to return to matlab
-  plhs[0] = mxCreateDoubleMatrix(distances.n_rows, distances.n_cols, mxREAL);
-  plhs[1] = mxCreateDoubleMatrix(neighbors.n_rows, neighbors.n_cols, mxREAL);
-
-  // setting the values
-  double * out = mxGetPr(plhs[0]);
-  for (int i = 0, n = distances.n_rows * distances.n_cols; i < n; ++i)
-  {
-    out[i] = distances(i);
-  }
-  out = mxGetPr(plhs[1]);
-  for (int i = 0, n = neighbors.n_rows * neighbors.n_cols; i < n; ++i)
-  {
-    out[i] = neighbors(i);
-  }
-
-  // More clean up.
-  delete allkfn;
-}
diff --git a/src/mlpack/bindings/matlab/allkfn/allkfn.m b/src/mlpack/bindings/matlab/allkfn/allkfn.m
deleted file mode 100644
index b1cd5ba..0000000
--- a/src/mlpack/bindings/matlab/allkfn/allkfn.m
+++ /dev/null
@@ -1,58 +0,0 @@
-function [distances, neighbors] = allkfn(dataPoints, k, varargin)
-% [distances, neighbors] = allkfn(dataPoints, k, varargin)
-%
-% Calculate the all k-furthest-neighbors of a set of points.  You may specify a
-% separate set of reference points and query points, or just a reference set
-% which will be used as both the reference and query set.
-%
-% The output matrices are organized such that row i and column j in the
-% neighbors matrix corresponds to the index of the point in the reference set
-% which is the i'th furthest neighbor from the point in the query set with index
-% j.  Row i and column j in the distances output matrix corresponds to the
-% distance between those two points.
-%
-% Parameters:
-%
-% dataPoints - The reference set of data points.  Columns are assumed to
-%              represent dimensions, with rows representing separate points.
-% k          - The number of furthest neighbors to find.
-%
-% Optional parameters (i.e. allkfn(..., 'parameter', value, ...)):
-%
-% 'queryPoints' - An optional set of query points, if the reference and query
-%                 sets are different.  Columns are assumed to represent
-%                 dimensions, with rows representing separate points.
-% 'leafSize'    - Leaf size in the kd-tree.  Defaults to 20.
-% 'method'      - Algorithm to use.  'naive' uses naive O(n^2) computation;
-%                 'single' uses single-tree traversal; 'dual' uses the standard
-%                 dual-tree traversal.  Defaults to 'dual'.
-%
-% Examples:
-%
-% [distances, neighbors] = allkfn(dataPoints, 5);
-% [distances, neighbors] = allkfn(dataPoints, 5, 'method', 'single');
-% [distances, neighbors] = allkfn(dataPoints, 5, 'queryPoints', queryPoints);
-
-% A parser for the inputs.
-p = inputParser;
-p.addParamValue('queryPoints', zeros(0), @ismatrix);
-p.addParamValue('leafSize', 20, @isscalar);
-p.addParamValue('naive', false, @(x) (x == true) || (x == false));
-p.addParamValue('singleMode', false, @(x) (x == true) || (x == false));
-
-% parsing the varargin options
-varargin{:}
-p.parse(varargin{:});
-parsed = p.Results;
-parsed
-
-% interfacing with mlpack
-[distances neighbors] = mex_allkfn(dataPoints', k, parsed.queryPoints', ...
-    parsed.leafSize, parsed.naive, parsed.singleMode);
-
-% transposing results
-distances = distances';
-neighbors = neighbors' + 1; % matlab indices began at 1, not zero
-
-return;
-
diff --git a/src/mlpack/bindings/matlab/allknn/CMakeLists.txt b/src/mlpack/bindings/matlab/allknn/CMakeLists.txt
deleted file mode 100644
index f7df5b8..0000000
--- a/src/mlpack/bindings/matlab/allknn/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Simple rules for building mex file.  The _mex suffix is necessary to avoid
-# target name conflicts, and the mex file must have a different name than the .m
-# file.
-add_library(allknn_mex SHARED
-  allknn.cpp
-)
-target_link_libraries(allknn_mex
-  mlpack
-  ${LIBXML2_LIBRARIES}
-)
-
-# Installation rule.  Install both the mex and the MATLAB file.
-install(TARGETS allknn_mex
-  LIBRARY DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
-install(FILES
-  allknn.m
-  DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
diff --git a/src/mlpack/bindings/matlab/allknn/allknn.cpp b/src/mlpack/bindings/matlab/allknn/allknn.cpp
deleted file mode 100644
index a13b114..0000000
--- a/src/mlpack/bindings/matlab/allknn/allknn.cpp
+++ /dev/null
@@ -1,279 +0,0 @@
-/**
- * @file allknn.cpp
- * @author Patrick Mason
- *
- * MEX function for MATLAB All-kNN binding.
- */
-#include "mex.h"
-
-#include <mlpack/core.hpp>
-#include <mlpack/core/tree/cover_tree.hpp>
-#include <mlpack/methods/neighbor_search/neighbor_search.hpp>
-
-using namespace std;
-using namespace mlpack;
-using namespace mlpack::neighbor;
-using namespace mlpack::tree;
-
-// the gateway, required by all mex functions
-void mexFunction(int nlhs, mxArray *plhs[],
-                 int nrhs, const mxArray *prhs[])
-{
-  // checking inputs
-  if (nrhs != 7)
-  {
-    mexErrMsgTxt("Expecting seven arguments.");
-  }
-
-  if (nlhs != 2)
-  {
-    mexErrMsgTxt("Two outputs required.");
-  }
-
-  // getting the dimensions of the reference matrix
-  size_t numPoints = mxGetN(prhs[0]);
-  size_t numDimensions = mxGetM(prhs[0]);
-
-  // feeding the referenceData matrix
-  arma::mat referenceData(numDimensions, numPoints);
-  // setting the values.
-  double * mexDataPoints = mxGetPr(prhs[0]);
-  for (int i = 0, n = numPoints * numDimensions; i < n; ++i)
-  {
-    referenceData(i) = mexDataPoints[i];
-  }
-
-  // getting the leafsize
-  int lsInt = (int) mxGetScalar(prhs[3]);
-
-  // getting k
-  size_t k = (int) mxGetScalar(prhs[1]);
-
-  // naive algorithm?
-  bool naive = (mxGetScalar(prhs[4]) == 1.0);
-
-  // single mode?
-  bool singleMode = (mxGetScalar(prhs[5]) == 1.0);
-
-  // the query matrix
-  double * mexQueryPoints = mxGetPr(prhs[2]);
-  arma::mat queryData;
-  bool hasQueryData = ((mxGetM(prhs[2]) != 0) && (mxGetN(prhs[2]) != 0));
-
-  // cover-tree?
-  bool usesCoverTree = (mxGetScalar(prhs[6]) == 1.0);
-
-  // Sanity check on k value: must be greater than 0, must be less than the
-  // number of reference points.
-  if (k > referenceData.n_cols)
-  {
-    stringstream os;
-    os << "Invalid k: " << k << "; must be greater than 0 and less ";
-    os << "than or equal to the number of reference points (";
-    os << referenceData.n_cols << ")." << endl;
-    mexErrMsgTxt(os.str().c_str());
-  }
-
-  // Sanity check on leaf size.
-  if (lsInt < 0)
-  {
-    stringstream os;
-    os << "Invalid leaf size: " << lsInt << ".  Must be greater "
-        "than or equal to 0." << endl;
-    mexErrMsgTxt(os.str().c_str());
-  }
-  size_t leafSize = lsInt;
-
-  // Naive mode overrides single mode.
-  if (singleMode && naive)
-  {
-     mexWarnMsgTxt("single_mode ignored because naive is present.");
-  }
-
-  if (naive)
-    leafSize = referenceData.n_cols;
-
-  arma::Mat<size_t> neighbors;
-  arma::mat distances;
-
-  //if (!CLI::HasParam("cover_tree"))
-  if (usesCoverTree)
-  {
-    // Because we may construct it differently, we need a pointer.
-    AllkNN* allknn = NULL;
-
-    // Mappings for when we build the tree.
-    std::vector<size_t> oldFromNewRefs;
-
-    // Build trees by hand, so we can save memory: if we pass a tree to
-    // NeighborSearch, it does not copy the matrix.
-
-    BinarySpaceTree<bound::HRectBound<2>, QueryStat<NearestNeighborSort> >
-      refTree(referenceData, oldFromNewRefs, leafSize);
-    BinarySpaceTree<bound::HRectBound<2>, QueryStat<NearestNeighborSort> >*
-      queryTree = NULL; // Empty for now.
-
-    std::vector<size_t> oldFromNewQueries;
-
-    if (hasQueryData)
-    {
-      // setting the values.
-      mexDataPoints = mxGetPr(prhs[2]);
-      numPoints = mxGetN(prhs[2]);
-      numDimensions = mxGetM(prhs[2]);
-      queryData = arma::mat(numDimensions, numPoints);
-      for (int i = 0, n = numPoints * numDimensions; i < n; ++i)
-      {
-        queryData(i) = mexDataPoints[i];
-      }
-
-      if (naive && leafSize < queryData.n_cols)
-        leafSize = queryData.n_cols;
-
-      // Build trees by hand, so we can save memory: if we pass a tree to
-      // NeighborSearch, it does not copy the matrix.
-      if (!singleMode)
-      {
-        queryTree = new BinarySpaceTree<bound::HRectBound<2>,
-            QueryStat<NearestNeighborSort> >(queryData, oldFromNewQueries,
-            leafSize);
-      }
-
-      allknn = new AllkNN(&refTree, queryTree, referenceData, queryData,
-          singleMode);
-    }
-    else
-    {
-      allknn = new AllkNN(&refTree, referenceData, singleMode);
-    }
-
-    arma::mat distancesOut;
-    arma::Mat<size_t> neighborsOut;
-
-    allknn->Search(k, neighborsOut, distancesOut);
-
-    // We have to map back to the original indices from before the tree
-    // construction.
-    neighbors.set_size(neighborsOut.n_rows, neighborsOut.n_cols);
-    distances.set_size(distancesOut.n_rows, distancesOut.n_cols);
-
-    // Do the actual remapping.
-    if ((hasQueryData) && !singleMode)
-    {
-      for (size_t i = 0; i < distancesOut.n_cols; ++i)
-      {
-        // Map distances (copy a column) and square root.
-        distances.col(oldFromNewQueries[i]) = sqrt(distancesOut.col(i));
-
-        // Map indices of neighbors.
-        for (size_t j = 0; j < distancesOut.n_rows; ++j)
-        {
-          neighbors(j, oldFromNewQueries[i]) =
-              oldFromNewRefs[neighborsOut(j, i)];
-        }
-      }
-    }
-    else if ((hasQueryData) && singleMode)
-    {
-      // No remapping of queries is necessary.  So distances are the same.
-      distances = sqrt(distancesOut);
-
-      // The neighbor indices must be mapped.
-      for (size_t j = 0; j < neighborsOut.n_elem; ++j)
-      {
-        neighbors[j] = oldFromNewRefs[neighborsOut[j]];
-      }
-    }
-    else
-    {
-      for (size_t i = 0; i < distancesOut.n_cols; ++i)
-      {
-        // Map distances (copy a column).
-        distances.col(oldFromNewRefs[i]) = sqrt(distancesOut.col(i));
-
-        // Map indices of neighbors.
-        for (size_t j = 0; j < distancesOut.n_rows; ++j)
-        {
-          neighbors(j, oldFromNewRefs[i]) = oldFromNewRefs[neighborsOut(j, i)];
-        }
-      }
-    }
-
-    // Clean up.
-    if (queryTree)
-      delete queryTree;
-
-    delete allknn;
-  }
-  else // Cover trees.
-  {
-    // Build our reference tree.
-    CoverTree<metric::LMetric<2, true>, tree::FirstPointIsRoot,
-        QueryStat<NearestNeighborSort> > referenceTree(referenceData, 1.3);
-    CoverTree<metric::LMetric<2, true>, tree::FirstPointIsRoot,
-        QueryStat<NearestNeighborSort> >* queryTree = NULL;
-
-    NeighborSearch<NearestNeighborSort, metric::LMetric<2, true>,
-        CoverTree<metric::LMetric<2, true>, tree::FirstPointIsRoot,
-        QueryStat<NearestNeighborSort> > >* allknn = NULL;
-
-    // See if we have query data.
-    if (hasQueryData)
-    {
-      // setting the values.
-      mexDataPoints = mxGetPr(prhs[2]);
-      numPoints = mxGetN(prhs[2]);
-      numDimensions = mxGetM(prhs[2]);
-      queryData = arma::mat(numDimensions, numPoints);
-      for (int i = 0, n = numPoints * numDimensions; i < n; ++i)
-      {
-        queryData(i) = mexDataPoints[i];
-      }
-
-      // Build query tree.
-      if (!singleMode)
-      {
-        queryTree = new CoverTree<metric::LMetric<2, true>,
-            tree::FirstPointIsRoot, QueryStat<NearestNeighborSort> >(queryData,
-            1.3);
-      }
-
-      allknn = new NeighborSearch<NearestNeighborSort, metric::LMetric<2, true>,
-          CoverTree<metric::LMetric<2, true>, tree::FirstPointIsRoot,
-          QueryStat<NearestNeighborSort> > >(&referenceTree, queryTree,
-          referenceData, queryData, singleMode);
-    }
-    else
-    {
-      allknn = new NeighborSearch<NearestNeighborSort, metric::LMetric<2, true>,
-          CoverTree<metric::LMetric<2, true>, tree::FirstPointIsRoot,
-          QueryStat<NearestNeighborSort> > >(&referenceTree, referenceData,
-          singleMode);
-    }
-
-    allknn->Search(k, neighbors, distances);
-
-    delete allknn;
-
-    if (queryTree)
-      delete queryTree;
-  }
-
-  // writing back to matlab
-  // constructing matrix to return to matlab
-  plhs[0] = mxCreateDoubleMatrix(distances.n_rows, distances.n_cols, mxREAL);
-  plhs[1] = mxCreateDoubleMatrix(neighbors.n_rows, neighbors.n_cols, mxREAL);
-
-  // setting the values
-  double * out = mxGetPr(plhs[0]);
-  for (int i = 0, n = distances.n_rows * distances.n_cols; i < n; ++i)
-  {
-    out[i] = distances(i);
-  }
-  out = mxGetPr(plhs[1]);
-  for (int i = 0, n = neighbors.n_rows * neighbors.n_cols; i < n; ++i)
-  {
-    out[i] = neighbors(i);
-  }
-
-}
diff --git a/src/mlpack/bindings/matlab/allknn/allknn.m b/src/mlpack/bindings/matlab/allknn/allknn.m
deleted file mode 100644
index c5910dc..0000000
--- a/src/mlpack/bindings/matlab/allknn/allknn.m
+++ /dev/null
@@ -1,60 +0,0 @@
-function [distances neighbors] = allknn(dataPoints, k, varargin)
-%All K-Nearest-Neighbors
-%
-%  This program will calculate the all k-nearest-neighbors of a set of points
-%  using kd-trees or cover trees (cover tree support is experimental and may not
-%  be optimally fast). You may specify a separate set of reference points and
-%  query points, or just a reference set which will be used as both the reference
-%  and query set.
-%  
-%  For example, the following will calculate the 5 nearest neighbors of eachpoint
-%  in 'input.csv' and store the distances in 'distances.csv' and the neighbors in
-%  the file 'neighbors.csv':
-  
-%  $ allknn --k=5 --reference_file=input.csv --distances_file=distances.csv
-%    --neighbors_file=neighbors.csv
-  
-%  The output files are organized such that row i and column j in the neighbors
-%  output file corresponds to the index of the point in the reference set which
-%  is the i'th nearest neighbor from the point in the query set with index j. 
-%  Row i and column j in the distances output file corresponds to the distance
-%  between those two points.
-%
-% Parameters:
-% dataPoints - the matrix of data points. Columns are assumed to represent dimensions, 
-%              with rows representing seperate points. 
-% method     - the algorithm for computing the tree. 'naive' or 'boruvka', with 
-%              'boruvka' being the default algorithm.
-% leafSize   - Leaf size in the kd-tree.  One-element leaves give the 
-%              empirically best performance, but at the cost of greater memory
-%              requirements. One is default. 
-% 
-% Examples:
-% result = emst(dataPoints);
-% or 
-% esult = emst(dataPoints,'method','naive');
-
-% a parser for the inputs
-p = inputParser;
-p.addParamValue('queryPoints', zeros(0), @ismatrix);
-p.addParamValue('leafSize', 20, @isscalar);
-p.addParamValue('naive', false, @(x) (x == true) || (x == false));
-p.addParamValue('singleMode', false, @(x) (x == true) || (x == false));
-p.addParamValue('coverTree', false, @(x) (x == true) || (x == false));
-
-% parsing the varargin options
-varargin{:}
-p.parse(varargin{:});
-parsed = p.Results;
-parsed
-
-% interfacing with mlpack
-[distances neighbors] = mex_allknn(dataPoints', k, parsed.queryPoints', ...
-	parsed.leafSize, parsed.naive, parsed.singleMode, parsed.coverTree);
-
-% transposing results
-distances = distances';
-neighbors = neighbors' + 1; % matlab indices began at 1, not zero
-
-return;
-
diff --git a/src/mlpack/bindings/matlab/emst/CMakeLists.txt b/src/mlpack/bindings/matlab/emst/CMakeLists.txt
deleted file mode 100644
index 3b79cdf..0000000
--- a/src/mlpack/bindings/matlab/emst/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Simple rules for building mex file.  The _mex suffix is necessary to avoid
-# target name conflicts, and the mex file must have a different name than the .m
-# file.
-add_library(emst_mex SHARED
-  emst.cpp
-)
-target_link_libraries(emst_mex
-  mlpack
-  ${LIBXML2_LIBRARIES}
-)
-
-# Installation rule.  Install both the mex and the MATLAB file.
-install(TARGETS emst_mex
-  LIBRARY DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
-install(FILES
-  emst.m
-  DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
diff --git a/src/mlpack/bindings/matlab/emst/emst.cpp b/src/mlpack/bindings/matlab/emst/emst.cpp
deleted file mode 100644
index 24e6c8a..0000000
--- a/src/mlpack/bindings/matlab/emst/emst.cpp
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * @file emst.cpp
- * @author Patrick Mason
- *
- * MEX function for MATLAB EMST binding.
- */
-#include "mex.h"
-
-#include <mlpack/core.hpp>
-#include <mlpack/methods/emst/dtb.hpp>
-
-#include <iostream>
-
-using namespace mlpack;
-using namespace mlpack::emst;
-using namespace mlpack::tree;
-
-// The gateway, required by all mex functions.
-void mexFunction(int nlhs, mxArray *plhs[],
-                 int nrhs, const mxArray *prhs[])
-{
-  // Argument checks.
-  if (nrhs != 3)
-  {
-    mexErrMsgTxt("Expecting an datapoints matrix, isBoruvka, and leafSize.");
-  }
-
-  if (nlhs != 1)
-  {
-    mexErrMsgTxt("Output required.");
-  }
-
-  const size_t numPoints = mxGetN(prhs[0]);
-  const size_t numDimensions = mxGetM(prhs[0]);
-
-  // Converting from mxArray to armadillo matrix.
-  arma::mat dataPoints(numDimensions, numPoints);
-
-  // Set the values.
-  double* mexDataPoints = mxGetPr(prhs[0]);
-  for (int i = 0, n = numPoints * numDimensions; i < n; ++i)
-  {
-    dataPoints(i) = mexDataPoints[i];
-  }
-
-  const bool isBoruvka = (mxGetScalar(prhs[1]) == 1.0);
-
-  // Run the computation.
-  arma::mat result;
-  if (isBoruvka)
-  {
-    // Get the number of leaves.
-    const size_t leafSize = (size_t) mxGetScalar(prhs[2]);
-
-    DualTreeBoruvka<> dtb(dataPoints, false, leafSize);
-    dtb.ComputeMST(result);
-  }
-  else
-  {
-    DualTreeBoruvka<> naive(dataPoints, true);
-    naive.ComputeMST(result);
-  }
-
-  // Construct matrix to return to MATLAB.
-  plhs[0] = mxCreateDoubleMatrix(3, numPoints - 1, mxREAL);
-
-  double* out = mxGetPr(plhs[0]);
-  for (int i = 0, n = (numPoints - 1) * 3; i < n; ++i)
-  {
-    out[i] = result(i);
-  }
-}
diff --git a/src/mlpack/bindings/matlab/emst/emst.m b/src/mlpack/bindings/matlab/emst/emst.m
deleted file mode 100644
index ce84fa7..0000000
--- a/src/mlpack/bindings/matlab/emst/emst.m
+++ /dev/null
@@ -1,52 +0,0 @@
-function result = emst(dataPoints, varargin)
-% result = emst(dataPoints, varargin)
-%
-% Compute the Euclidean minimum spanning tree of a set of input points using the
-% dual-tree Boruvka algorithm.
-%
-% The output is saved in a three-column matrix, where each row indicates an
-% edge.  The first column corresponds to the lesser index of the edge; the
-% second column corresponds to the greater index of the edge; and the third
-% column corresponds to the distance between the two points.
-%
-% Required parameters:
-%
-% dataPoints - The matrix of data points. Columns are assumed to represent
-%              dimensions, with rows representing separate points.
-%
-% Optional parameters (i.e. emst(..., 'parameter', value, ...)):
-%
-% 'method'   - The algorithm for computing the tree. 'naive' or 'boruvka', with
-%              'boruvka' being the default dual-tree Boruvka algorithm.
-% 'leafSize' - Leaf size in the kd-tree.  One-element leaves give the
-%              empirically best performance, but at the cost of greater memory
-%              requirements.  Defaults to 1.
-%
-% Examples:
-%
-% result = emst(dataPoints);
-% result = emst(dataPoints, 'method', 'naive');
-% result = emst(dataPoints, 'method', 'naive', 'leafSize', 5);
-
-% A parser for the inputs.
-p = inputParser;
-p.addParamValue('method', 'boruvka', ...
-    @(x) strcmpi(x, 'naive') || strcmpi(x, 'boruvka'));
-p.addParamValue('leafSize', 1, @isscalar);
-
-% Parse the varargin options.
-p.parse(varargin{:});
-parsed = p.Results;
-
-% Interface with mlpack. Transpose to machine learning standards.  MLPACK
-% expects column-major matrices; the user has passed in a row-major matrix.
-if strcmpi(parsed.method, 'boruvka')
-  result = emst_mex(dataPoints', 1, parsed.leafSize);
-    result = result';
-  return;
-else
-  result = emst_mex(dataPoints', 0, 1);
-    result = result';
-  return;
-end
-
diff --git a/src/mlpack/bindings/matlab/gmm/CMakeLists.txt b/src/mlpack/bindings/matlab/gmm/CMakeLists.txt
deleted file mode 100644
index dacb527..0000000
--- a/src/mlpack/bindings/matlab/gmm/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Simple rules for building mex file.  The _mex suffix is necessary to avoid
-# target name conflicts, and the mex file must have a different name than the .m
-# file.
-add_library(gmm_mex SHARED
-  gmm.cpp
-)
-target_link_libraries(gmm_mex
-  mlpack
-  ${LIBXML2_LIBRARIES}
-)
-
-# Installation rule.  Install both the mex and the MATLAB file.
-install(TARGETS gmm_mex
-  LIBRARY DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
-install(FILES
-  gmm.m
-  DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
diff --git a/src/mlpack/bindings/matlab/gmm/gmm.cpp b/src/mlpack/bindings/matlab/gmm/gmm.cpp
deleted file mode 100644
index 63a366e..0000000
--- a/src/mlpack/bindings/matlab/gmm/gmm.cpp
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
- * @file gmm.cpp
- * @author Patrick Mason
- *
- * MEX function for MATLAB GMM binding.
- */
-#include "mex.h"
-
-#include <mlpack/core.hpp>
-#include <mlpack/methods/gmm/gmm.hpp>
-
-using namespace mlpack;
-using namespace mlpack::gmm;
-using namespace mlpack::util;
-
-void mexFunction(int nlhs, mxArray *plhs[],
-                 int nrhs, const mxArray *prhs[])
-{
-  // argument checks
-  if (nrhs != 3)
-  {
-    mexErrMsgTxt("Expecting three inputs.");
-  }
-
-  if (nlhs != 1)
-  {
-    mexErrMsgTxt("Output required.");
-  }
-
-  size_t seed = (size_t) mxGetScalar(prhs[2]);
-  // Check parameters and load data.
-  if (seed != 0)
-    math::RandomSeed(seed);
-  else
-    math::RandomSeed((size_t) std::time(NULL));
-
-  // loading the data
-  double * mexDataPoints = mxGetPr(prhs[0]);
-  size_t numPoints = mxGetN(prhs[0]);
-  size_t numDimensions = mxGetM(prhs[0]);
-  arma::mat dataPoints(numDimensions, numPoints);
-  for (int i = 0, n = numPoints * numDimensions; i < n; ++i)
-  {
-    dataPoints(i) = mexDataPoints[i];
-  }
-
-  int gaussians = (int) mxGetScalar(prhs[1]);
-  if (gaussians <= 0)
-  {
-    std::stringstream ss;
-    ss << "Invalid number of Gaussians (" << gaussians << "); must "
-        "be greater than or equal to 1." << std::endl;
-    mexErrMsgTxt(ss.str().c_str());
-  }
-
-  // Calculate mixture of Gaussians.
-  GMM<> gmm(size_t(gaussians), dataPoints.n_rows);
-
-  ////// Computing the parameters of the model using the EM algorithm //////
-  gmm.Estimate(dataPoints);
-
-  // setting up the matlab structure to be returned
-  mwSize ndim = 1;
-  mwSize dims[1] = {
-    1
-  };
-  const char * fieldNames[3] = {
-    "dimensionality"
-    , "weights"
-    , "gaussians"
-  };
-
-  plhs[0] =  mxCreateStructArray(ndim, dims, 3, fieldNames);
-
-  // dimensionality
-  mxArray * field_value;
-  field_value = mxCreateDoubleMatrix(1, 1, mxREAL);
-  *mxGetPr(field_value) = numDimensions;
-  mxSetFieldByNumber(plhs[0], 0, 0, field_value);
-
-  // mixture weights
-  field_value = mxCreateDoubleMatrix(gmm.Weights().size(), 1, mxREAL);
-  double * values = mxGetPr(field_value);
-  for (int i=0; i<gmm.Weights().size(); ++i)
-  {
-    values[i] = gmm.Weights()[i];
-  }
-  mxSetFieldByNumber(plhs[0], 0, 1, field_value);
-
-  // gaussian mean/variances
-  const char * gaussianNames[2] = {
-    "mean"
-    , "covariance"
-  };
-  ndim = 1;
-  dims[0] = gmm.Gaussians();
-
-  field_value = mxCreateStructArray(ndim, dims, 2, gaussianNames);
-  for (int i=0; i<gmm.Gaussians(); ++i)
-  {
-    mxArray * tmp;
-    double * values;
-
-    // setting the mean
-    arma::mat mean = gmm.Means()[i];
-    tmp = mxCreateDoubleMatrix(numDimensions, 1, mxREAL);
-    values = mxGetPr(tmp);
-    for (int j = 0; j < numDimensions; ++j)
-    {
-      values[j] = mean(j);
-    }
-    // note: SetField does not copy the data structure.
-    // mxDuplicateArray does the necessary copying.
-    mxSetFieldByNumber(field_value, i, 0, mxDuplicateArray(tmp));
-    mxDestroyArray(tmp);
-
-    // setting the covariance matrix
-    arma::mat covariance = gmm.Covariances()[i];
-    tmp = mxCreateDoubleMatrix(numDimensions, numDimensions, mxREAL);
-    values = mxGetPr(tmp);
-    for (int j = 0; j < numDimensions * numDimensions; ++j)
-    {
-      values[j] = covariance(j);
-    }
-    mxSetFieldByNumber(field_value, i, 1, mxDuplicateArray(tmp));
-    mxDestroyArray(tmp);
-  }
-  mxSetFieldByNumber(plhs[0], 0, 2, field_value);
-}
diff --git a/src/mlpack/bindings/matlab/gmm/gmm.m b/src/mlpack/bindings/matlab/gmm/gmm.m
deleted file mode 100644
index 470513c..0000000
--- a/src/mlpack/bindings/matlab/gmm/gmm.m
+++ /dev/null
@@ -1,28 +0,0 @@
-function result = gmm(dataPoints, varargin)
-%Gaussian Mixture Model (GMM) Training
-%
-%  This program takes a parametric estimate of a Gaussian mixture model (GMM)
-%  using the EM algorithm to find the maximum likelihood estimate.  The model is
-%  saved to an XML file, which contains information about each Gaussian.
-%
-%Parameters:
-% dataPoints- (required) Matrix containing the data on which the model will be fit
-% seed      - (optional) Random seed.  If 0, 'std::time(NULL)' is used. 
-%					    Default value is 0.
-% gaussians - (optional) Number of gaussians in the GMM. Default value is 1.
-
-% a parser for the inputs
-p = inputParser;
-p.addParamValue('gaussians', 1, @isscalar);
-p.addParamValue('seed', 0, @isscalar);
-
-% parsing the varargin options
-p.parse(varargin{:});
-parsed = p.Results;
-
-% interfacing with mlpack
-result = mex_gmm(dataPoints', parsed.gaussians, parsed.seed);
-
-
-
-
diff --git a/src/mlpack/bindings/matlab/hmm/hmm_generate.cpp b/src/mlpack/bindings/matlab/hmm/hmm_generate.cpp
deleted file mode 100644
index 13fcbf4..0000000
--- a/src/mlpack/bindings/matlab/hmm/hmm_generate.cpp
+++ /dev/null
@@ -1,373 +0,0 @@
-#include "mex.h"
-
-#include <mlpack/core.hpp>
-
-#include "hmm.hpp"
-#include "hmm_util.hpp"
-#include <mlpack/methods/gmm/gmm.hpp>
-
-/*
-PROGRAM_INFO("Hidden Markov Model (HMM) Sequence Generator", "This "
-    "utility takes an already-trained HMM (--model_file) and generates a "
-    "random observation sequence and hidden state sequence based on its "
-    "parameters, saving them to the specified files (--output_file and "
-    "--state_file)");
-
-PARAM_STRING_REQ("model_file", "File containing HMM (XML).", "m");
-PARAM_INT_REQ("length", "Length of sequence to generate.", "l");
-
-PARAM_INT("start_state", "Starting state of sequence.", "t", 0);
-PARAM_STRING("output_file", "File to save observation sequence to.", "o",
-    "output.csv");
-PARAM_STRING("state_file", "File to save hidden state sequence to (may be left "
-    "unspecified.", "S", "");
-PARAM_INT("seed", "Random seed.  If 0, 'std::time(NULL)' is used.", "s", 0);
-*/
-
-
-using namespace mlpack;
-using namespace mlpack::hmm;
-using namespace mlpack::distribution;
-using namespace mlpack::utilities;
-using namespace mlpack::gmm;
-using namespace mlpack::math;
-using namespace arma;
-using namespace std;
-
-namespace {
-	// gets the transition matrix from the struct
-	void getTransition(mat & transition, const mxArray * mxarray)
-	{
-		mxArray * mxTransitions = mxGetField(mxarray, 0, "transition");
-		if (NULL == mxTransitions)
-		{
-			mexErrMsgTxt("Model struct did not have transition matrix 'transition'.");
-		}
-		if (mxDOUBLE_CLASS != mxGetClassID(mxTransitions))
-		{
-			mexErrMsgTxt("Transition matrix 'transition' must have type mxDOUBLE_CLASS.");
-		}
-		const size_t m = mxGetM(mxTransitions);
-		const size_t n = mxGetN(mxTransitions);
-		transition.resize(m,n);
-
-		double * values = mxGetPr(mxTransitions);
-		for (int i = 0; i < m*n; ++i)
-			transition(i) = values[i];
-	}
-
-	// writes the matlab transition matrix to the model
-	template <class T>
-	void writeTransition(HMM<T> & hmm, const mxArray * mxarray)
-	{
-		mxArray * mxTransitions = mxGetField(mxarray, 0, "transition");
-		if (NULL == mxTransitions)
-		{
-			mexErrMsgTxt("Model struct did not have transition matrix 'transition'.");
-		}
-		if (mxDOUBLE_CLASS != mxGetClassID(mxTransitions))
-		{
-			mexErrMsgTxt("Transition matrix 'transition' must have type mxDOUBLE_CLASS.");
-		}
-
-		arma::mat transition(mxGetM(mxTransitions), mxGetN(mxTransitions));
-		double * values = mxGetPr(mxTransitions);
-		for (int i = 0; i < mxGetM(mxTransitions) * mxGetN(mxTransitions); ++i)
-			transition(i) = values[i];
-
-		hmm.Transition() = transition;
-	}
-
-	// argument check on the emission field
-	void checkEmission(const mat & transition, const mxArray * mxarray)
-	{
-		if (NULL == mxarray)
-		{
-			mexErrMsgTxt("Model struct did not have 'emission' struct.");
-		}
-		if ((int) mxGetN(mxarray) != (int) transition.n_rows)
-		{
-			stringstream ss;
-			ss << "'emissions' struct array must have dimensions 1 x "
-				<<  transition.n_rows << ".";
-			mexErrMsgTxt(ss.str().c_str());
-		}
-	}
-
-} // closing anonymous namespace
-
-void mexFunction(int nlhs, mxArray *plhs[],
-                 int nrhs, const mxArray *prhs[])
-{
-  // argument checks
-  if (nrhs != 4) 
-  {
-    mexErrMsgTxt("Expecting four arguments.");
-  }
-
-  if (nlhs != 1) 
-  {
-    mexErrMsgTxt("Output required.");
-  }
-
-	// seed argument
-	size_t seed = (size_t) mxGetScalar(prhs[3]);
-
-  // Set random seed.
-	if (seed != 0)
-    mlpack::math::RandomSeed(seed);
-  else
-    mlpack::math::RandomSeed((size_t) std::time(NULL));
-
-	// length of observations
-	const int length =  (int) mxGetScalar(prhs[1]);
-
-	// start state
-	const int startState = (int) mxGetScalar(prhs[2]);
-
-  if (length <= 0)
-  {
-		stringstream ss;
-    ss << "Invalid sequence length (" << length << "); must be greater "
-        << "than or equal to 0!";
-		mexErrMsgTxt(ss.str().c_str());
-  }
-
-	// getting the model type
-	if (mxIsStruct(prhs[0]) == 0)
-	{
-		mexErrMsgTxt("Model argument is not a struct.");
-	}
-
-	mxArray * mxHmmType = mxGetField(prhs[0], 0, "hmm_type");
-	if (mxHmmType == NULL)
-	{
-		mexErrMsgTxt("Model struct did not have 'hmm_type'.");
-	}
-	if (mxCHAR_CLASS != mxGetClassID(mxHmmType))
-	{
-		mexErrMsgTxt("'hmm_type' must have type mxCHAR_CLASS.");
-	}
-	
-	// getting the model type string
-	int bufLength = mxGetNumberOfElements(mxHmmType) + 1;
-	char * buf;
-	buf = (char *) mxCalloc(bufLength, sizeof(char));
-  mxGetString(mxHmmType, buf, bufLength);
-	string type(buf);
-	mxFree(buf);
-
-	cout << type << endl;
-
-	// to be filled by the generator
-	mat observations;
-  Col<size_t> sequence;
-
-	// to be removed!
-	SaveRestoreUtility sr;
-
-  if (type == "discrete")
-  {
-    HMM<DiscreteDistribution> hmm(1, DiscreteDistribution(1));
-
-		// writing transition matrix to the hmm
-		writeTransition(hmm, prhs[0]);
-
-		// writing emission matrix to the hmm
-		mxArray * mxEmission = mxGetField(prhs[0], 0, "emission");
-		//checkEmission(hmm, mxEmission);
-
-		vector<DiscreteDistribution> emission(hmm.Transition().n_rows);
-		for (int i=0; i<hmm.Transition().n_rows; ++i) 
-		{
-			mxArray * mxProbabilities = mxGetField(mxEmission, i, "probabilities");
-			if (NULL == mxProbabilities)
-			{
-				mexErrMsgTxt("'probabilities' field could not be found in 'emission' struct.");
-			}			
-	
-			arma::vec probabilities(mxGetN(mxProbabilities));
-			double * values = mxGetPr(mxProbabilities);
-			for (int j=0; j<mxGetN(mxProbabilities); ++j)
-				probabilities(j) = values[j];
-	 
-			emission[i] = DiscreteDistribution(probabilities);
-		}
-
-		hmm.Emission() = emission;
-
-		// At this point, the HMM model should be fully formed.
-    if (startState < 0 || startState >= (int) hmm.Transition().n_rows)
-    {
-			stringstream ss;
-      ss << "Invalid start state (" << startState << "); must be "
-          << "between 0 and number of states (" << hmm.Transition().n_rows
-          << ")!";
-			mexErrMsgTxt(ss.str().c_str());
-    }
-
-    hmm.Generate(size_t(length), observations, sequence, size_t(startState));
-  }
-  else if (type == "gaussian")
-  {
-		/*
-    //HMM<GaussianDistribution> hmm(1, GaussianDistribution(1));
-
-		// get transition matrix
-		//mat transition;
-		//getTransition(transition, prhs[0]);
-
-		//hmm.Transition() = transition;
-		//cout << transition << endl;
-		arma::mat transition("0.75 0.25; 0.25 0.75");
-
-		// get emission
-		//vector<GaussianDistribution> emission(transition.n_rows);
-		vector<GaussianDistribution> emission;
-  	GaussianDistribution g1("5.0 5.0", "1.0 0.0; 0.0 1.0");
-  	GaussianDistribution g2("-5.0 -5.0", "1.0 0.0; 0.0 1.0");
-  	emission.push_back(g1);
-  	emission.push_back(g2);
-
-
-		//HMM<GaussianDistribution> hmm(transition, emission);
-		//hmm.Emission() = emission;
-		HMM<GaussianDistribution> hmm(transition, emission);
-		*/
-  
-		// Our distribution will have three two-dimensional output Gaussians.
-		cout << "following the test" << endl;
-  	HMM<GaussianDistribution> hmm(3, GaussianDistribution(2));
-  	hmm.Transition() = arma::mat("0.4 0.6 0.8; 0.2 0.2 0.1; 0.4 0.2 0.1");
-  	hmm.Emission()[0] = GaussianDistribution("0.0 0.0", "1.0 0.0; 0.0 1.0");
-  	hmm.Emission()[1] = GaussianDistribution("2.0 2.0", "1.0 0.5; 0.5 1.2");
-  	hmm.Emission()[2] = GaussianDistribution("-2.0 1.0", "2.0 0.1; 0.1 1.0");
-
-  	// Now we will generate a long sequence.
-  	std::vector<arma::mat> observations2(1);
-  	std::vector<arma::Col<size_t> > states2(1);
-
-		// testing
-  	SaveHMM(hmm, sr);
-  	sr.WriteFile("testMexGaussian.xml");
-
-  	// Start in state 1 (no reason).
-  	cout << "test generation" << endl;
-		hmm.Generate(10000, observations2[0], states2[0], 1);
-		cout << "test complete" << endl;
-
-    if (startState < 0 || startState >= (int) hmm.Transition().n_rows)
-    {
-			stringstream ss;
-			ss << "Invalid start state (" << startState << "); must be "
-          << "between 0 and number of states (" << hmm.Transition().n_rows
-          << ")!";
-			mexErrMsgTxt(ss.str().c_str());
-    }
-		cout << "generating!" << endl;
-    hmm.Generate(size_t(length), observations, sequence, size_t(startState));
-		cout << "done!" << endl;
-  }
-  else if (type == "gmm")
-  {
-    HMM<GMM<> > hmm(1, GMM<>(1, 1));
-
-    LoadHMM(hmm, sr);
-
-    if (startState < 0 || startState >= (int) hmm.Transition().n_rows)
-    {
-      Log::Fatal << "Invalid start state (" << startState << "); must be "
-          << "between 0 and number of states (" << hmm.Transition().n_rows
-          << ")!" << endl;
-    }
-
-    hmm.Generate(size_t(length), observations, sequence, size_t(startState));
-  }
-  else
-  {
-    Log::Fatal << "Unknown HMM type '" << type << "'" << "'!" << endl;
-  }
-
-	cout << "returning to matlab" << endl;
-
-	// Setting values to be returned to matlab
-	mwSize ndim = 1;
-  mwSize dims[1] = {1};
-  const char * fieldNames[2] = {
-    "observations"
-    , "states"
-  };
-
-	plhs[0] = mxCreateStructArray(ndim, dims, 2, fieldNames);
-
-	mxArray * tmp;
-	double * values;
-
-	cout << observations.n_rows << "," << observations.n_cols << endl;
-	cout << sequence.n_rows << "," << sequence.n_cols << endl;
-	cout << observations << endl;
-	cout << sequence << endl;
-
-	// settings the observations
-	tmp = mxCreateDoubleMatrix(observations.n_rows, observations.n_cols, mxREAL);
-	values = mxGetPr(tmp);
-	for (int i=0; i<observations.n_rows * observations.n_cols; ++i)
-		values[i] = observations(i);
-
-	// note: SetField does not copy the data structure.
-	// mxDuplicateArray does the necessary copying.
-	mxSetFieldByNumber(plhs[0], 0, 0, mxDuplicateArray(tmp));
-	mxDestroyArray(tmp);
-
-	// settings the observations
-	tmp = mxCreateDoubleMatrix(sequence.n_rows, sequence.n_cols, mxREAL);
-	values = mxGetPr(tmp);
-	for (int i=0; i<length; ++i)
-		values[i] = sequence(i);
-
-	// note: SetField does not copy the data structure.
-	// mxDuplicateArray does the necessary copying.
-	mxSetFieldByNumber(plhs[0], 0, 1, mxDuplicateArray(tmp));
-	mxDestroyArray(tmp);
-}
-
-		/*
-		mxArray * mxEmission = mxGetField(prhs[0], 0, "emission");
-		checkEmission(transition, mxEmission);
-
-		vector<GaussianDistribution> emission(transition.n_rows);
-		for (int i=0; i<transition.n_rows; ++i) 
-		{
-			// mean
-			mxArray * mxMean = mxGetField(mxEmission, i, "mean");
-			if (NULL == mxMean)
-			{
-				mexErrMsgTxt("'mean' field could not be found in 'emission' struct.");
-			}			
-	
-			arma::vec mean(mxGetN(mxMean));
-			double * values = mxGetPr(mxMean);
-			for (int j=0; j<mxGetN(mxMean); ++j)
-				mean(j) = values[j];
-
-			cout << mean << endl;
-
-			// covariance
-			mxArray * mxCovariance = mxGetField(mxEmission, i, "covariance");
-			if (NULL == mxCovariance)
-			{
-				mexErrMsgTxt("'covariance' field could not be found in 'emission' struct.");
-			}			
-	
-			const size_t m = (size_t) mxGetM(mxCovariance);
-			const size_t n = (size_t) mxGetN(mxCovariance);
-			mat covariance(m, n);
-			values = mxGetPr(mxCovariance);
-			for (int j=0; j < m * n; ++j)
-				covariance(j) = values[j];
-
-			cout << covariance << endl;
-	 
-			emission[i] = GaussianDistribution(mean, covariance);
-		}
-		*/
diff --git a/src/mlpack/bindings/matlab/hmm/hmm_generate.m b/src/mlpack/bindings/matlab/hmm/hmm_generate.m
deleted file mode 100644
index 4777795..0000000
--- a/src/mlpack/bindings/matlab/hmm/hmm_generate.m
+++ /dev/null
@@ -1,28 +0,0 @@
-function sequence = hmm_generate(model, sequence_length, varargin)
-%Hidden Markov Model (HMM) Sequence Generator
-%
-%  This utility takes an already-trained HMM (model) and generates a
-%  random observation sequence and hidden state sequence based on its parameters,
-%  saving them to the specified files (--output_file and --state_file)
-%
-%Parameters:
-% model           - (required) HMM model struct.
-% sequence_length - (required) Length of the sequence to produce.
-% start_state	    - (optional) Starting state of sequence.  Default value 0.
-% seed            - (optional) Random seed.  If 0, 'std::time(NULL)' is used. 
-%                   Default value 0.
-
-% a parser for the inputs
-p = inputParser;
-p.addParamValue('start_state', 0, @isscalar);
-p.addParamValue('seed', 0, @isscalar);
-
-% parsing the varargin options
-p.parse(varargin{:});
-parsed = p.Results;
-
-% interfacing with mlpack. 
-sequence = mex_hmm_generate(model, sequence_length, ...
-	parsed.start_state, parsed.seed);
-
-
diff --git a/src/mlpack/bindings/matlab/kernel_pca/CMakeLists.txt b/src/mlpack/bindings/matlab/kernel_pca/CMakeLists.txt
deleted file mode 100644
index b2f8933..0000000
--- a/src/mlpack/bindings/matlab/kernel_pca/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Simple rules for building mex file.  The _mex suffix is necessary to avoid
-# target name conflicts, and the mex file must have a different name than the .m
-# file.
-add_library(kernel_pca_mex SHARED
-  kernel_pca.cpp
-)
-target_link_libraries(kernel_pca_mex
-  mlpack
-  ${LIBXML2_LIBRARIES}
-)
-
-# Installation rule.  Install both the mex and the MATLAB file.
-install(TARGETS kernel_pca_mex
-  LIBRARY DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
-install(FILES
-  kernel_pca.m
-  DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
diff --git a/src/mlpack/bindings/matlab/kernel_pca/kernel_pca.cpp b/src/mlpack/bindings/matlab/kernel_pca/kernel_pca.cpp
deleted file mode 100644
index 3257b71..0000000
--- a/src/mlpack/bindings/matlab/kernel_pca/kernel_pca.cpp
+++ /dev/null
@@ -1,136 +0,0 @@
-#include "mex.h"
-
-#include <mlpack/core.hpp>
-#include <mlpack/core/kernels/linear_kernel.hpp>
-#include <mlpack/core/kernels/gaussian_kernel.hpp>
-#include <mlpack/core/kernels/hyperbolic_tangent_kernel.hpp>
-#include <mlpack/core/kernels/laplacian_kernel.hpp>
-#include <mlpack/core/kernels/polynomial_kernel.hpp>
-#include <mlpack/core/kernels/cosine_distance.hpp>
-
-#include <mlpack/methods/kernel_pca/kernel_pca.hpp>
-
-using namespace mlpack;
-using namespace mlpack::kpca;
-using namespace mlpack::kernel;
-using namespace std;
-using namespace arma;
-
-void mexFunction(int nlhs, mxArray *plhs[],
-                 int nrhs, const mxArray *prhs[])
-{
-  // argument checks
-  if (nrhs != 8)
-  {
-    mexErrMsgTxt("Expecting eight arguments.");
-  }
-
-  if (nlhs != 1)
-  {
-    mexErrMsgTxt("Output required.");
-  }
-
-  // Load input dataset.
-  if (mxDOUBLE_CLASS != mxGetClassID(prhs[0]))
-    mexErrMsgTxt("Input dataset must have type mxDOUBLE_CLASS.");
-
-  mat dataset(mxGetM(prhs[0]), mxGetN(prhs[0]));
-  double * values = mxGetPr(prhs[0]);
-  for (int i=0, num=mxGetNumberOfElements(prhs[0]); i<num; ++i)
-    dataset(i) = values[i];
-
-  // Get the new dimensionality, if it is necessary.
-  size_t newDim = dataset.n_rows;
-  const int argNewDim = (int) mxGetScalar(prhs[2]);
-  if (argNewDim != 0)
-  {
-    newDim = argNewDim;
-
-    if (newDim > dataset.n_rows)
-    {
-      stringstream ss;
-      ss << "New dimensionality (" << newDim
-          << ") cannot be greater than existing dimensionality ("
-          << dataset.n_rows << ")!";
-      mexErrMsgTxt(ss.str().c_str());
-    }
-  }
-
-  // Get the kernel type and make sure it is valid.
-  if (mxCHAR_CLASS != mxGetClassID(prhs[1]))
-  {
-    mexErrMsgTxt("Kernel input must have type mxCHAR_CLASS.");
-  }
-  int bufLength = mxGetNumberOfElements(prhs[1]) + 1;
-  char * buf;
-  buf = (char *) mxCalloc(bufLength, sizeof(char));
-  mxGetString(prhs[1], buf, bufLength);
-  string kernelType(buf);
-  mxFree(buf);
-
-  // scale parameter
-  const bool scaleData = (mxGetScalar(prhs[3]) == 1.0);
-
-  if (kernelType == "linear")
-  {
-    KernelPCA<LinearKernel> kpca(LinearKernel(), scaleData);
-    kpca.Apply(dataset, newDim);
-  }
-  else if (kernelType == "gaussian")
-  {
-    const double bandwidth = mxGetScalar(prhs[3]);
-
-    GaussianKernel kernel(bandwidth);
-    KernelPCA<GaussianKernel> kpca(kernel, scaleData);
-    kpca.Apply(dataset, newDim);
-  }
-  else if (kernelType == "polynomial")
-  {
-    const double degree = mxGetScalar(prhs[4]);
-    const double offset = mxGetScalar(prhs[5]);
-
-    PolynomialKernel kernel(offset, degree);
-    KernelPCA<PolynomialKernel> kpca(kernel, scaleData);
-    kpca.Apply(dataset, newDim);
-  }
-  else if (kernelType == "hyptan")
-  {
-    const double scale = mxGetScalar(prhs[6]);
-    const double offset = mxGetScalar(prhs[5]);
-
-    HyperbolicTangentKernel kernel(scale, offset);
-    KernelPCA<HyperbolicTangentKernel> kpca(kernel, scaleData);
-    kpca.Apply(dataset, newDim);
-  }
-  else if (kernelType == "laplacian")
-  {
-    const double bandwidth = mxGetScalar(prhs[7]);
-
-    LaplacianKernel kernel(bandwidth);
-    KernelPCA<LaplacianKernel> kpca(kernel, scaleData);
-    kpca.Apply(dataset, newDim);
-  }
-  else if (kernelType == "cosine")
-  {
-    KernelPCA<CosineDistance> kpca(CosineDistance(), scaleData);
-    kpca.Apply(dataset, newDim);
-  }
-  else
-  {
-    // Invalid kernel type.
-    stringstream ss;
-    ss << "Invalid kernel type ('" << kernelType << "'); valid choices "
-        << "are 'linear', 'gaussian', 'polynomial', 'hyptan', 'laplacian', and "
-        << "'cosine'.";
-    mexErrMsgTxt(ss.str().c_str());
-  }
-
-  // Now returning results to matlab
-  plhs[0] = mxCreateDoubleMatrix(dataset.n_rows, dataset.n_cols, mxREAL);
-  values = mxGetPr(plhs[0]);
-  for (int i = 0; i < dataset.n_rows * dataset.n_cols; ++i)
-  {
-    values[i] = dataset(i);
-  }
-
-}
diff --git a/src/mlpack/bindings/matlab/kernel_pca/kernel_pca.m b/src/mlpack/bindings/matlab/kernel_pca/kernel_pca.m
deleted file mode 100644
index 8a705b3..0000000
--- a/src/mlpack/bindings/matlab/kernel_pca/kernel_pca.m
+++ /dev/null
@@ -1,71 +0,0 @@
-function result = kernel_pca(dataPoints, kernel, varargin)
-%Kernel Principal Components Analysis
-%
-%  This program performs Kernel Principal Components Analysis (KPCA) on the
-%  specified dataset with the specified kernel.  This will transform the data
-%  onto the kernel principal components, and optionally reduce the dimensionality
-%  by ignoring the kernel principal components with the smallest eigenvalues.
-%  
-%  For the case where a linear kernel is used, this reduces to regular PCA.
-%  
-%  The kernels that are supported are listed below:
-%  
-%   * 'linear': the standard linear dot product (same as normal PCA):
-%      K(x, y) = x^T y
-%  
-%   * 'gaussian': a Gaussian kernel; requires bandwidth:
-%      K(x, y) = exp(-(|| x - y || ^ 2) / (2 * (bandwidth ^ 2)))
-%  
-%   * 'polynomial': polynomial kernel; requires offset and degree:
-%      K(x, y) = (x^T y + offset) ^ degree
-%  
-%   * 'hyptan': hyperbolic tangent kernel; requires scale and offset:
-%      K(x, y) = tanh(scale * (x^T y) + offset)
-%  
-%   * 'laplacian': Laplacian kernel; requires bandwidth:
-%      K(x, y) = exp(-(|| x - y ||) / bandwidth)
-%  
-%   * 'cosine': cosine distance:
-%      K(x, y) = 1 - (x^T y) / (|| x || * || y ||)
-%  
-%  The parameters for each of the kernels should be specified with the options
-%  bandwidth, kernel_scale, offset, or degree (or a combination of those
-%  options).
-%
-%Parameters
-% dataPoints         - (required) Input dataset to perform KPCA on.
-% kernel             - (required) The kernel to use.
-% new_dimensionality - (optional) If not 0, reduce the dimensionality of the
-%                      dataset by ignoring the dimensions with the smallest
-%                      eighenvalues.
-% bandwidth          - (optional) Bandwidt, for gaussian or laplacian kernels. 
-%                      Default value is 1.
-% degree             - (optional)  Degree of polynomial, for 'polynomial' kernel. 
-%                      Default value 1.
-% kernel_scale       - (optional) Scale, for 'hyptan' kernel.  Default value 1.
-% offset             - (optional) Offset, for 'hyptan' and 'polynomial' kernels.
-%								       Default value is 1.
-% scale              - (optional) If true, the data will be scaled before performing
-%                      KPCA such that the variance of each feature is 1.
-
-% a parser for the inputs
-p = inputParser;
-p.addParamValue('new_dimensionality', @isscalar);
-p.addParamValue('offset', @isscalar);
-p.addParamValue('kernel_scale', @isscalar);
-p.addParamValue('bandwidth', @isscalar);
-p.addParamValue('degree', @isscalar);
-p.addParamValue('scale', false, @(x) (x == true) || (x == false));
-
-% parsing the varargin options
-p.parse(varargin{:});
-parsed = p.Results;
-
-% interfacing with mlpack. transposing to machine learning standards. 
-result = mex_kernel_pca(dataPoints', kernel, ...
-	parsed.new_dimensionality, parsed.scale, ...
-	parsed.degree, parsed.offset, ...
-	parsed.kernel_scale, parsed.bandwidth);
-
-result = result';
-
diff --git a/src/mlpack/bindings/matlab/kmeans/CMakeLists.txt b/src/mlpack/bindings/matlab/kmeans/CMakeLists.txt
deleted file mode 100644
index 4c0c06b..0000000
--- a/src/mlpack/bindings/matlab/kmeans/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Simple rules for building mex file.  The _mex suffix is necessary to avoid
-# target name conflicts, and the mex file must have a different name than the .m
-# file.
-add_library(kmeans_mex SHARED
-  kmeans.cpp
-)
-target_link_libraries(kmeans_mex
-  mlpack
-  ${LIBXML2_LIBRARIES}
-)
-
-# Installation rule.  Install both the mex and the MATLAB file.
-install(TARGETS kmeans_mex
-  LIBRARY DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
-install(FILES
-  kmeans.m
-  DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
diff --git a/src/mlpack/bindings/matlab/kmeans/kmeans.cpp b/src/mlpack/bindings/matlab/kmeans/kmeans.cpp
deleted file mode 100644
index bccd9cf..0000000
--- a/src/mlpack/bindings/matlab/kmeans/kmeans.cpp
+++ /dev/null
@@ -1,175 +0,0 @@
-/**
- * @file kmeans.cpp
- * @author Patrick Mason
- *
- * MEX function for MATLAB k-means binding.
- */
-#include "mex.h"
-
-#include <mlpack/core.hpp>
-#include <mlpack/methods/kmeans/kmeans.hpp>
-#include <mlpack/methods/kmeans/allow_empty_clusters.hpp>
-
-using namespace mlpack;
-using namespace mlpack::kmeans;
-using namespace std;
-
-void mexFunction(int nlhs, mxArray *plhs[],
-                 int nrhs, const mxArray *prhs[])
-{
-  // argument checks
-  if (nrhs != 7)
-  {
-    mexErrMsgTxt("Expecting seven arguments.");
-  }
-
-  if (nlhs != 1)
-  {
-    mexErrMsgTxt("Output required.");
-  }
-
-  size_t seed = (size_t) mxGetScalar(prhs[6]);
-
-  // Initialize random seed.
-  //if (CLI::GetParam<int>("seed") != 0)
-    //math::RandomSeed((size_t) CLI::GetParam<int>("seed"));
-  if (seed != 0)
-    math::RandomSeed(seed);
-  else
-    math::RandomSeed((size_t) std::time(NULL));
-
-  // Now do validation of options.
-  //string inputFile = CLI::GetParam<string>("inputFile");
-  //int clusters = CLI::GetParam<int>("clusters");
-  int clusters = (int) mxGetScalar(prhs[1]);
-  if (clusters < 1)
-  {
-    stringstream ss;
-    ss << "Invalid number of clusters requested (" << clusters << ")! "
-        << "Must be greater than or equal to 1.";
-    mexErrMsgTxt(ss.str().c_str());
-  }
-
-  //int maxIterations = CLI::GetParam<int>("max_iterations");
-  int maxIterations = (int) mxGetScalar(prhs[2]);
-  if (maxIterations < 0)
-  {
-    stringstream ss;
-    ss << "Invalid value for maximum iterations (" << maxIterations <<
-        ")! Must be greater than or equal to 0.";
-    mexErrMsgTxt(ss.str().c_str());
-  }
-
-  //double overclustering = CLI::GetParam<double>("overclustering");
-  double overclustering = mxGetScalar(prhs[3]);
-  if (overclustering < 1)
-  {
-    stringstream ss;
-    ss << "Invalid value for overclustering (" << overclustering <<
-        ")! Must be greater than or equal to 1.";
-    mexErrMsgTxt(ss.str().c_str());
-  }
-
-  const bool allow_empty_clusters = (mxGetScalar(prhs[4]) == 1.0);
-  const bool fast_kmeans = (mxGetScalar(prhs[5]) == 1.0);
-
-  /*
-  // Make sure we have an output file if we're not doing the work in-place.
-  if (!CLI::HasParam("in_place") && !CLI::HasParam("outputFile"))
-  {
-    Log::Fatal << "--outputFile not specified (and --in_place not set)."
-        << std::endl;
-  }
-  */
-
-  // Load our dataset.
-  const size_t numPoints = mxGetN(prhs[0]);
-  const size_t numDimensions = mxGetM(prhs[0]);
-  arma::mat dataset(numDimensions, numPoints);
-
-  // setting the values.
-  double * mexDataPoints = mxGetPr(prhs[0]);
-  for (int i = 0, n = numPoints * numDimensions; i < n; ++i)
-  {
-    dataset(i) = mexDataPoints[i];
-  }
-
-  // Now create the KMeans object.  Because we could be using different types,
-  // it gets a little weird...
-  arma::Col<size_t> assignments;
-
-  //if (CLI::HasParam("allow_empty_clusters"))
-  if (allow_empty_clusters)
-  {
-    KMeans<metric::SquaredEuclideanDistance, RandomPartition,
-        AllowEmptyClusters> k(maxIterations, overclustering);
-
-    //if (CLI::HasParam("fast_kmeans"))
-    if (fast_kmeans)
-      k.FastCluster(dataset, clusters, assignments);
-    else
-      k.Cluster(dataset, clusters, assignments);
-  }
-  else
-  {
-    KMeans<> k(maxIterations, overclustering);
-
-    //if (CLI::HasParam("fast_kmeans"))
-    if (fast_kmeans)
-      k.FastCluster(dataset, clusters, assignments);
-    else
-      k.Cluster(dataset, clusters, assignments);
-  }
-
-  /*
-  // Now figure out what to do with our results.
-  if (CLI::HasParam("in_place"))
-  {
-    // Add the column of assignments to the dataset; but we have to convert them
-    // to type double first.
-    arma::vec converted(assignments.n_elem);
-    for (size_t i = 0; i < assignments.n_elem; i++)
-      converted(i) = (double) assignments(i);
-
-    dataset.insert_rows(dataset.n_rows, trans(converted));
-
-    // Save the dataset.
-    data::Save(inputFile.c_str(), dataset);
-  }
-  else
-  {
-    if (CLI::HasParam("labels_only"))
-    {
-      // Save only the labels.
-      string outputFile = CLI::GetParam<string>("outputFile");
-      arma::Mat<size_t> output = trans(assignments);
-      data::Save(outputFile.c_str(), output);
-    }
-    else
-    {
-      // Convert the assignments to doubles.
-      arma::vec converted(assignments.n_elem);
-      for (size_t i = 0; i < assignments.n_elem; i++)
-        converted(i) = (double) assignments(i);
-
-      dataset.insert_rows(dataset.n_rows, trans(converted));
-
-      // Now save, in the different file.
-      string outputFile = CLI::GetParam<string>("outputFile");
-      data::Save(outputFile.c_str(), dataset);
-    }
-  }
-  */
-
-  // constructing matrix to return to matlab
-  plhs[0] = mxCreateDoubleMatrix(assignments.n_elem, 1, mxREAL);
-
-  // setting the values
-  double * out = mxGetPr(plhs[0]);
-  for (int i = 0, n = assignments.n_elem; i < n; ++i)
-  {
-    out[i] = assignments(i);
-  }
-
-}
-
diff --git a/src/mlpack/bindings/matlab/kmeans/kmeans.m b/src/mlpack/bindings/matlab/kmeans/kmeans.m
deleted file mode 100644
index 38599de..0000000
--- a/src/mlpack/bindings/matlab/kmeans/kmeans.m
+++ /dev/null
@@ -1,28 +0,0 @@
-function assignments = emst(dataPoints, clusters, varargin)
-%K-Means Clustering
-%
-%  This program performs K-Means clustering on the given dataset, storing the
-%  learned cluster assignments either as a column of labels in the file
-%  containing the input dataset or in a separate file.  Empty clusters are not
-%  allowed by default; when a cluster becomes empty, the point furthest from the
-%  centroid of the cluster with maximum variance is taken to fill that cluster.
-
-% a parser for the inputs
-p = inputParser;
-p.addParamValue('allow_empty_clusters', false, @(x) (x == true) || (x == false));
-p.addParamValue('fast_kmeans', false, @(x) (x == true) || (x == false));
-p.addParamValue('max_iterations', 1000, @isscalar);
-p.addParamValue('overclustering', 1, @isscalar);
-p.addParamValue('seed', 0, @isscalar);
-
-% parsing the varargin options
-p.parse(varargin{:});
-parsed = p.Results;
-
-% interfacing with mlpack. transposing to machine learning standards. 
-assignments = mex_kmeans(dataPoints', clusters, parsed.max_iterations, ...
-	parsed.overclustering, parsed.allow_empty_clusters, ...
-	parsed.fast_kmeans, parsed.seed);
-
-assignments = assignments + 1; % changing to matlab indexing
-
diff --git a/src/mlpack/bindings/matlab/lars/CMakeLists.txt b/src/mlpack/bindings/matlab/lars/CMakeLists.txt
deleted file mode 100644
index ad7ad3a..0000000
--- a/src/mlpack/bindings/matlab/lars/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Simple rules for building mex file.  The _mex suffix is necessary to avoid
-# target name conflicts, and the mex file must have a different name than the .m
-# file.
-add_library(lars_mex SHARED
-  lars.cpp
-)
-target_link_libraries(lars_mex
-  mlpack
-  ${LIBXML2_LIBRARIES}
-)
-
-# Installation rule.  Install both the mex and the MATLAB file.
-install(TARGETS lars_mex
-  LIBRARY DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
-install(FILES
-  lars.m
-  DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
diff --git a/src/mlpack/bindings/matlab/lars/lars.cpp b/src/mlpack/bindings/matlab/lars/lars.cpp
deleted file mode 100644
index 4908a16..0000000
--- a/src/mlpack/bindings/matlab/lars/lars.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-#include "mex.h"
-
-#include <mlpack/core.hpp>
-
-#include <mlpack/methods/lars/lars.hpp>
-
-using namespace arma;
-using namespace std;
-using namespace mlpack;
-using namespace mlpack::regression;
-
-void mexFunction(int nlhs, mxArray *plhs[],
-                 int nrhs, const mxArray *prhs[])
-{
-  // argument checks
-  if (nrhs != 4)
-  {
-    mexErrMsgTxt("Expecting four inputs.");
-  }
-
-  if (nlhs != 1)
-  {
-    mexErrMsgTxt("Output required.");
-  }
-
-  double lambda1 = mxGetScalar(prhs[2]);
-  double lambda2 = mxGetScalar(prhs[3]);
-  bool useCholesky = (mxGetScalar(prhs[3]) == 1.0);
-
-  // loading covariates
-  mat matX(mxGetM(prhs[0]), mxGetN(prhs[0]));
-  double * values = mxGetPr(prhs[0]);
-  for (int i=0, num=mxGetNumberOfElements(prhs[0]); i<num; ++i)
-    matX(i) = values[i];
-
-  // loading responses
-  mat matY(mxGetM(prhs[1]), mxGetN(prhs[1]));
-  values = mxGetPr(prhs[1]);
-  for (int i=0, num=mxGetNumberOfElements(prhs[1]); i<num; ++i)
-    matY(i) = values[i];
-
-  if (matY.n_cols > 1)
-    mexErrMsgTxt("Only one column or row allowed in responses file!");
-
-  if (matY.n_elem != matX.n_rows)
-    mexErrMsgTxt("Number of responses must be equal to number of rows of X!");
-
-  // Do LARS.
-  LARS lars(useCholesky, lambda1, lambda2);
-  vec beta;
-  lars.Regress(matX, matY.unsafe_col(0), beta, false /* do not transpose */);
-
-  // return to matlab
-  plhs[0] = mxCreateDoubleMatrix(beta.n_elem, 1, mxREAL);
-  values = mxGetPr(plhs[0]);
-  for (int i = 0; i < beta.n_elem; ++i)
-    values[i] = beta(i);
-}
diff --git a/src/mlpack/bindings/matlab/lars/lars.m b/src/mlpack/bindings/matlab/lars/lars.m
deleted file mode 100644
index 6789c2d..0000000
--- a/src/mlpack/bindings/matlab/lars/lars.m
+++ /dev/null
@@ -1,48 +0,0 @@
-function beta = lars(X, Y, varargin)
-%LARS
-%
-%  An implementation of LARS: Least Angle Regression (Stagewise/laSso).  This is
-%  a stage-wise homotopy-based algorithm for L1-regularized linear regression
-%  (LASSO) and L1+L2-regularized linear regression (Elastic Net).
-%  
-%  Let X be a matrix where each row is a point and each column is a dimension,
-%  and let y be a vector of targets.
-%  
-%  The Elastic Net problem is to solve
-%  
-%    min_beta 0.5 || X * beta - y ||_2^2 + lambda_1 ||beta||_1 +
-%        0.5 lambda_2 ||beta||_2^2
-%  
-%  If lambda_1 > 0 and lambda_2 = 0, the problem is the LASSO.
-%  If lambda_1 > 0 and lambda_2 > 0, the problem is the Elastic Net.
-%  If lambda_1 = 0 and lambda_2 > 0, the problem is Ridge Regression.
-%  If lambda_1 = 0 and lambda_2 = 0, the problem is unregularized linear
-%  regression.
-%  
-%  For efficiency reasons, it is not recommended to use this algorithm with
-%  lambda_1 = 0.
-%
-%Parameters
-% X         		 - (required) Matrix containing covariates.
-% Y              - (required) Matrix containing y.
-% lambda1			   - (optional) Default value 0. l1-penalty regularization.
-% lambda2				 - (optional) Default value 0. l2-penalty regularization.
-% useCholesky    - (optional) Use Cholesky decomposition during computation
-%                             rather than explicitly computing the full Gram
-%                             matrix.
-
-% a parser for the inputs
-p = inputParser;
-p.addParamValue('lambda1', @isscalar);
-p.addParamValue('lambda2', @isscalar);
-p.addParamValue('useCholesky', false, @(x) (x == true) || (x == false));
-
-% parsing the varargin options
-p.parse(varargin{:});
-parsed = p.Results;
-
-% interfacing with mlpack. Does not require transposing.
-beta = mex_lars(X, Y, ...
-	parsed.lambda1, parsed.lambda2, parsed.useCholesky);
-
-
diff --git a/src/mlpack/bindings/matlab/nca/CMakeLists.txt b/src/mlpack/bindings/matlab/nca/CMakeLists.txt
deleted file mode 100644
index da5a327..0000000
--- a/src/mlpack/bindings/matlab/nca/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Simple rules for building mex file.  The _mex suffix is necessary to avoid
-# target name conflicts, and the mex file must have a different name than the .m
-# file.
-add_library(nca_mex SHARED
-  nca.cpp
-)
-target_link_libraries(nca_mex
-  mlpack
-  ${LIBXML2_LIBRARIES}
-)
-
-# Installation rule.  Install both the mex and the MATLAB file.
-install(TARGETS nca_mex
-  LIBRARY DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
-install(FILES
-  nca.m
-  DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
diff --git a/src/mlpack/bindings/matlab/nca/nca.cpp b/src/mlpack/bindings/matlab/nca/nca.cpp
deleted file mode 100644
index 3edd26b..0000000
--- a/src/mlpack/bindings/matlab/nca/nca.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-#include "mex.h"
-
-#include <mlpack/core.hpp>
-#include <mlpack/core/metrics/lmetric.hpp>
-
-#include <mlpack/methods/nca/nca.hpp>
-
-using namespace mlpack;
-using namespace mlpack::nca;
-using namespace mlpack::metric;
-using namespace std;
-using namespace arma;
-
-void mexFunction(int nlhs, mxArray *plhs[],
-                 int nrhs, const mxArray *prhs[])
-{
-  // argument checks
-  if (nrhs != 2)
-  {
-    mexErrMsgTxt("Expecting two inputs.");
-  }
-
-  if (nlhs != 1)
-  {
-    mexErrMsgTxt("Output required.");
-  }
-
-  // Load data.
-  mat data(mxGetM(prhs[0]), mxGetN(prhs[0]));
-  double * values = mxGetPr(prhs[0]);
-  for (int i=0, num=mxGetNumberOfElements(prhs[0]); i<num; ++i)
-    data(i) = values[i];
-
-  // load labels
-  umat labels(mxGetNumberOfElements(prhs[1]), 1);
-  values = mxGetPr(prhs[1]);
-  for (int i=0, num=mxGetNumberOfElements(prhs[1]); i<num; ++i)
-    labels(i) = (int) values[i];
-
-  // dimension checks
-  if (labels.n_elem != data.n_cols)
-    mexErrMsgTxt("Labels vector and data have unmatching dimensions.");
-
-  // Now create the NCA object and run the optimization.
-  NCA<LMetric<2> > nca(data, labels.unsafe_col(0));
-
-  mat distance;
-  nca.LearnDistance(distance);
-
-  // return to matlab
-  plhs[0] = mxCreateDoubleMatrix(distance.n_rows, distance.n_cols, mxREAL);
-  values = mxGetPr(plhs[0]);
-  for (int i = 0; i < distance.n_elem; ++i)
-    values[i] = distance(i);
-}
diff --git a/src/mlpack/bindings/matlab/nca/nca.m b/src/mlpack/bindings/matlab/nca/nca.m
deleted file mode 100644
index 9eb9602..0000000
--- a/src/mlpack/bindings/matlab/nca/nca.m
+++ /dev/null
@@ -1,24 +0,0 @@
-function result = nca(dataPoints, labels)
-%Neighborhood Components Analysis (NCA)
-%
-%  This program implements Neighborhood Components Analysis, both a linear
-%  dimensionality reduction technique and a distance learning technique.  The
-%  method seeks to improve k-nearest-neighbor classification on a dataset by
-%  scaling the dimensions.  The method is nonparametric, and does not require a
-%  value of k.  It works by using stochastic ("soft") neighbor assignments and
-%  using optimization techniques over the gradient of the accuracy of the
-%  neighbor assignments.
-%  
-%  To work, this algorithm needs labeled data.  It can be given as the last row
-%  of the input dataset (--input_file), or alternatively in a separate file
-%  (--labels_file).
-%
-%Parameters:
-% dataPoints - Input dataset to run NCA on.
-% labels     - Labels for input dataset.
-
-% interfacing with mlpack. transposing to machine learning standards. 
-result = mex_nca(dataPoints', labels);
-result = result';
-
-
diff --git a/src/mlpack/bindings/matlab/nmf/CMakeLists.txt b/src/mlpack/bindings/matlab/nmf/CMakeLists.txt
deleted file mode 100644
index 255de6f..0000000
--- a/src/mlpack/bindings/matlab/nmf/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Simple rules for building mex file.  The _mex suffix is necessary to avoid
-# target name conflicts, and the mex file must have a different name than the .m
-# file.
-add_library(nmf_mex SHARED
-  nmf.cpp
-)
-target_link_libraries(nmf_mex
-  mlpack
-  ${LIBXML2_LIBRARIES}
-)
-
-# Installation rule.  Install both the mex and the MATLAB file.
-install(TARGETS nmf_mex
-  LIBRARY DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
-install(FILES
-  nmf.m
-  DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
diff --git a/src/mlpack/bindings/matlab/nmf/nmf.cpp b/src/mlpack/bindings/matlab/nmf/nmf.cpp
deleted file mode 100644
index 373abab..0000000
--- a/src/mlpack/bindings/matlab/nmf/nmf.cpp
+++ /dev/null
@@ -1,106 +0,0 @@
-#include "mex.h"
-
-#include <mlpack/core.hpp>
-
-#include <mlpack/methods/nmf/nmf.hpp>
-
-#include <mlpack/methods/nmf/random_init.hpp>
-#include <mlpack/methods/nmf/mult_dist_update_rules.hpp>
-#include <mlpack/methods/nmf/mult_div_update_rules.hpp>
-#include <mlpack/methods/nmf/als_update_rules.hpp>
-
-using namespace mlpack;
-using namespace mlpack::nmf;
-using namespace std;
-
-void mexFunction(int nlhs, mxArray *plhs[],
-                 int nrhs, const mxArray *prhs[])
-{
-  // argument checks
-  if (nrhs != 6)
-  {
-    mexErrMsgTxt("Expecting six inputs.");
-  }
-
-  if (nlhs != 2)
-  {
-    mexErrMsgTxt("Two outputs required.");
-  }
-
-  const size_t seed = (size_t) mxGetScalar(prhs[5]);
-
-  // Initialize random seed.
-  if (seed != 0)
-    math::RandomSeed(seed);
-  else
-    math::RandomSeed((size_t) std::time(NULL));
-
-  // Gather parameters.
-  const size_t r = (size_t) mxGetScalar(prhs[1]);
-  const size_t maxIterations = (size_t) mxGetScalar(prhs[2]);
-  const double minResidue = mxGetScalar(prhs[3]);
-
-  // update rule
-  int bufLength = mxGetNumberOfElements(prhs[4]) + 1;
-  char * buf = (char *) mxCalloc(bufLength, sizeof(char));
-  mxGetString(prhs[4], buf, bufLength);
-  string updateRules(buf);
-  mxFree(buf);
-
-  // Validate rank.
-  if (r < 1)
-  {
-    mexErrMsgTxt("The rank of the factorization cannot be less than 1.");
-  }
-
-  if ((updateRules != "multdist") &&
-      (updateRules != "multdiv") &&
-      (updateRules != "als"))
-  {
-    stringstream ss;
-    ss << "Invalid update rules ('" << updateRules << "'); must be '"
-        << "multdist', 'multdiv', or 'als'.";
-    mexErrMsgTxt(ss.str().c_str());
-  }
-
-  // Load input dataset.
-  arma::mat V(mxGetM(prhs[0]), mxGetN(prhs[0]));
-  double * values = mxGetPr(prhs[0]);
-  for (int i=0, num=mxGetNumberOfElements(prhs[0]); i<num; ++i)
-    V(i) = values[i];
-
-  arma::mat W;
-  arma::mat H;
-
-  // Perform NMF with the specified update rules.
-  if (updateRules == "multdist")
-  {
-    NMF<> nmf(maxIterations, minResidue);
-    nmf.Apply(V, r, W, H);
-  }
-  else if (updateRules == "multdiv")
-  {
-    NMF<RandomInitialization,
-        WMultiplicativeDivergenceRule,
-        HMultiplicativeDivergenceRule> nmf(maxIterations, minResidue);
-    nmf.Apply(V, r, W, H);
-  }
-  else if (updateRules == "als")
-  {
-    NMF<RandomInitialization,
-        WAlternatingLeastSquaresRule,
-        HAlternatingLeastSquaresRule> nmf(maxIterations, minResidue);
-    nmf.Apply(V, r, W, H);
-  }
-
-  // return to matlab
-  plhs[0] = mxCreateDoubleMatrix(W.n_rows, W.n_cols, mxREAL);
-  values = mxGetPr(plhs[0]);
-  for (int i = 0; i < W.n_elem; ++i)
-    values[i] = W(i);
-
-  plhs[1] = mxCreateDoubleMatrix(H.n_rows, H.n_cols, mxREAL);
-  values = mxGetPr(plhs[0]);
-  for (int i = 0; i < H.n_elem; ++i)
-    values[i] = H(i);
-}
diff --git a/src/mlpack/bindings/matlab/nmf/nmf.m b/src/mlpack/bindings/matlab/nmf/nmf.m
deleted file mode 100644
index 0d0642a..0000000
--- a/src/mlpack/bindings/matlab/nmf/nmf.m
+++ /dev/null
@@ -1,58 +0,0 @@
-function [W H] = nmf(dataPoints, rank, varargin)
-%Non-negative Matrix Factorization
-%
-%  This program performs non-negative matrix factorization on the given dataset,
-%  storing the resulting decomposed matrices in the specified files.  For an
-%  input dataset V, NMF decomposes V into two matrices W and H such that 
-%  
-%  V = W * H
-%  
-%  where all elements in W and H are non-negative.  If V is of size (n x m), then
-%  W will be of size (n x r) and H will be of size (r x m), where r is the rank
-%  of the factorization (specified by --rank).
-%  
-%  Optionally, the desired update rules for each NMF iteration can be chosen from
-%  the following list:
-%  
-%   - multdist: multiplicative distance-based update rules (Lee and Seung 1999)
-%   - multdiv: multiplicative divergence-based update rules (Lee and Seung 1999)
-%   - als: alternating least squares update rules (Paatero and Tapper 1994)
-%  
-%  The maximum number of iterations is specified with 'max_iterations', and the
-%  minimum residue required for algorithm termination is specified with
-%  'min_residue'.
-%
-%Parameters:
-% dataPoints        - (required) Input dataset to perform NMF on.
-% rank							- (required) Rank of the factorization.
-% max_iterations    - (optional) Number of iterations before NMF terminates.
-%																 (Default value 10000.)
-% min_residue			  - (optional) The minimum root mean square residue allowed for
-%                                each iteration, below which the program
-%                                terminates.  Default value 1e-05.
-% seed							- (optional) Random seed.If 0, 'std::time(NULL)' is used. 
-%														     Default 0.
-% update rules			- (optional) Update rules for each iteration; ( multdist |
-%                                multdiv | als ).  Default value 'multdist'.
-
-% a parser for the inputs
-p = inputParser;
-p.addParamValue('max_iterations', 10000, @isscalar);
-p.addParamValue('min_residue', 1e-05, @isscalar);
-p.addParamValue('update_rules', 'multdist', @ischar);
-p.addParamValue('seed', 0, @isscalar);
-
-% parsing the varargin options
-p.parse(varargin{:});
-parsed = p.Results;
-
-% interfacing with mlpack. transposing for machine learning standards. 
-[W H] = mex_nmf(dataPoints', rank, ...
-	parsed.max_iterations, parsed.min_residue, ...
-	parsed.update_rules, parsed.seed);
-W = W';
-H = H';
-
-
-
-
diff --git a/src/mlpack/bindings/matlab/pca/CMakeLists.txt b/src/mlpack/bindings/matlab/pca/CMakeLists.txt
deleted file mode 100644
index fd03c8d..0000000
--- a/src/mlpack/bindings/matlab/pca/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Simple rules for building mex file.  The _mex suffix is necessary to avoid
-# target name conflicts, and the mex file must have a different name than the .m
-# file.
-add_library(pca_mex SHARED
-  pca.cpp
-)
-target_link_libraries(pca_mex
-  mlpack
-  ${LIBXML2_LIBRARIES}
-)
-
-# Installation rule.  Install both the mex and the MATLAB file.
-install(TARGETS pca_mex
-  LIBRARY DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
-install(FILES
-  pca.m
-  DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
diff --git a/src/mlpack/bindings/matlab/pca/pca.cpp b/src/mlpack/bindings/matlab/pca/pca.cpp
deleted file mode 100644
index ba9fe31..0000000
--- a/src/mlpack/bindings/matlab/pca/pca.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-#include "mex.h"
-
-#include <mlpack/core.hpp>
-
-#include <mlpack/methods/pca/pca.hpp>
-
-using namespace mlpack;
-using namespace mlpack::pca;
-using namespace std;
-
-void mexFunction(int nlhs, mxArray *plhs[],
-                 int nrhs, const mxArray *prhs[])
-{
-  // argument checks
-  if (nrhs != 3)
-  {
-    mexErrMsgTxt("Expecting three inputs.");
-  }
-
-  if (nlhs != 1)
-  {
-    mexErrMsgTxt("Output required.");
-  }
-
-  // loading the data
-  double * mexDataPoints = mxGetPr(prhs[0]);
-  size_t numPoints = mxGetN(prhs[0]);
-  size_t numDimensions = mxGetM(prhs[0]);
-  arma::mat dataset(numDimensions, numPoints);
-  for (int i = 0, n = numPoints * numDimensions; i < n; ++i)
-    dataset(i) = mexDataPoints[i];
-
-  // Find out what dimension we want.
-  size_t newDimension = dataset.n_rows; // No reduction, by default.
-
-  if (mxGetScalar(prhs[1]) != 0.0)
-  {
-    // Validate the parameter.
-    newDimension = (size_t) mxGetScalar(prhs[1]);
-    if (newDimension > dataset.n_rows)
-    {
-      std::stringstream ss;
-      ss << "New dimensionality (" << newDimension
-          << ") cannot be greater than existing dimensionality ("
-          << dataset.n_rows << ")!";
-      mexErrMsgTxt(ss.str().c_str());
-    }
-  }
-
-  // Get the options for running PCA.
-  const bool scale = (mxGetScalar(prhs[2]) == 1.0);
-
-  // Perform PCA.
-  PCA p(scale);
-  p.Apply(dataset, newDimension);
-
-  // Now returning results to matlab
-  plhs[0] = mxCreateDoubleMatrix(dataset.n_rows, dataset.n_cols, mxREAL);
-  double * values = mxGetPr(plhs[0]);
-  for (int i = 0; i < dataset.n_rows * dataset.n_cols; ++i)
-    values[i] = dataset(i);
-}
diff --git a/src/mlpack/bindings/matlab/pca/pca.m b/src/mlpack/bindings/matlab/pca/pca.m
deleted file mode 100644
index 2063ae7..0000000
--- a/src/mlpack/bindings/matlab/pca/pca.m
+++ /dev/null
@@ -1,33 +0,0 @@
-function result = pca(dataPoints, varargin)
-%Principal Components Analysis
-%
-%  This program performs principal components analysis on the given dataset.  It
-%  will transform the data onto its principal components, optionally performing
-%  dimensionality reduction by ignoring the principal components with the
-%  smallest eigenvalues.
-%
-%Parameters:
-% dataPoints        - (required) Matrix to perform PCA on.
-% newDimensionality - (optional) Desired dimensionality of output dataset.  If 0,
-%                                no dimensionality reduction is performed. 
-%                                Default value 0.
-% scale             - (optional) If set, the data will be scaled before running
-%                                PCA, such that the variance of each feature is
-%                                1. Default value is false.
-
-% a parser for the inputs
-p = inputParser;
-p.addParamValue('newDimensionality', 0, @isscalar);
-p.addParamValue('scale', false, @(x) (x == true) || (x == false));
-
-% parsing the varargin options
-p.parse(varargin{:});
-parsed = p.Results;
-
-% interfacing with mlpack
-result = mex_pca(dataPoints', parsed.newDimensionality, parsed.scale);
-result = result';
-
-
-
-
diff --git a/src/mlpack/bindings/matlab/range_search/CMakeLists.txt b/src/mlpack/bindings/matlab/range_search/CMakeLists.txt
deleted file mode 100644
index e12ea30..0000000
--- a/src/mlpack/bindings/matlab/range_search/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-# Simple rules for building mex file.  The _mex suffix is necessary to avoid
-# target name conflicts, and the mex file must have a different name than the .m
-# file.
-add_library(range_search_mex SHARED
-  range_search.cpp
-)
-target_link_libraries(range_search_mex
-  mlpack
-  ${LIBXML2_LIBRARIES}
-)
-
-# Installation rule.  Install both the mex and the MATLAB file.
-install(TARGETS range_search_mex
-  LIBRARY DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
-install(FILES
-  range_search.m
-  DESTINATION "${MATLAB_TOOLBOX_DIR}/mlpack/"
-)
diff --git a/src/mlpack/bindings/matlab/range_search/range_search.cpp b/src/mlpack/bindings/matlab/range_search/range_search.cpp
deleted file mode 100644
index e66fdd6..0000000
--- a/src/mlpack/bindings/matlab/range_search/range_search.cpp
+++ /dev/null
@@ -1,325 +0,0 @@
-/**
- * @file range_search.cpp
- * @author Patrick Mason
- *
- * MEX function for MATLAB range search binding.
- */
-#include "mex.h"
-
-#include <mlpack/core.hpp>
-#include <mlpack/core/metrics/lmetric.hpp>
-#include <mlpack/methods/range_search/range_search.hpp>
-
-using namespace std;
-using namespace mlpack;
-using namespace mlpack::range;
-using namespace mlpack::tree;
-
-typedef RangeSearch<metric::SquaredEuclideanDistance,
-    BinarySpaceTree<bound::HRectBound<2>, EmptyStatistic> > RSType;
-
-// the gateway, required by all mex functions
-void mexFunction(int nlhs, mxArray *plhs[],
-                 int nrhs, const mxArray *prhs[])
-{
-  // Give CLI the command line parameters the user passed in.
-  //CLI::ParseCommandLine(argc, argv);
-
-  // Get all the parameters.
-  //string referenceFile = CLI::GetParam<string>("reference_file");
-  //string distancesFile = CLI::GetParam<string>("distances_file");
-  //string neighborsFile = CLI::GetParam<string>("neighbors_file");
-
-  //int lsInt = CLI::GetParam<int>("leaf_size");
-  //double max = CLI::GetParam<double>("max");
-  //double min = CLI::GetParam<double>("min");
-  //bool naive = CLI::HasParam("naive");
-  //bool singleMode = CLI::HasParam("single_mode");
-
-  // argument checks
-  if (nrhs != 7)
-  {
-    mexErrMsgTxt("Expecting an datapoints matrix, isBoruvka, and leafSize.");
-  }
-
-  if (nlhs != 1)
-  {
-    mexErrMsgTxt("Output required.");
-  }
-
-  double max = mxGetScalar(prhs[1]);
-  double min = mxGetScalar(prhs[2]);
-  int lsInt = (int) mxGetScalar(prhs[4]);
-  bool naive = (mxGetScalar(prhs[5]) == 1.0);
-  bool singleMode = (mxGetScalar(prhs[6]) == 1.0);
-
-  // checking for query data
-  bool hasQueryData = ((mxGetM(prhs[3]) != 0) && (mxGetN(prhs[3]) != 0));
-  arma::mat queryData;
-
-  // setting the dataset values.
-  double * mexDataPoints = mxGetPr(prhs[0]);
-  size_t numPoints = mxGetN(prhs[0]);
-  size_t numDimensions = mxGetM(prhs[0]);
-  arma::mat referenceData(numDimensions, numPoints);
-  for (int i = 0, n = numPoints * numDimensions; i < n; ++i)
-  {
-    referenceData(i) = mexDataPoints[i];
-  }
-
-  //if (!data::Load(referenceFile.c_str(), referenceData))
-  //  Log::Fatal << "Reference file " << referenceFile << "not found." << endl;
-
-  //Log::Info << "Loaded reference data from '" << referenceFile << "'." << endl;
-
-  // Sanity check on range value: max must be greater than min.
-  if (max <= min)
-  {
-    stringstream ss;
-    ss << "Invalid range: maximum (" << max << ") must be greater than "
-        << "minimum (" << min << ").";
-    mexErrMsgTxt(ss.str().c_str());
-  }
-
-  // Sanity check on leaf size.
-  if (lsInt < 0)
-  {
-    stringstream ss;
-    ss << "Invalid leaf size: " << lsInt << ".  Must be greater "
-        "than or equal to 0.";
-    mexErrMsgTxt(ss.str().c_str());
-  }
-
-  size_t leafSize = lsInt;
-
-  // Naive mode overrides single mode.
-  if (singleMode && naive)
-  {
-    mexWarnMsgTxt("single_mode ignored because naive is present.");
-  }
-
-  if (naive)
-    leafSize = referenceData.n_cols;
-
-  vector<vector<size_t> > neighbors;
-  vector<vector<double> > distances;
-
-  // Because we may construct it differently, we need a pointer.
-  RSType* rangeSearch = NULL;
-
-  // Mappings for when we build the tree.
-  vector<size_t> oldFromNewRefs;
-
-  // Build trees by hand, so we can save memory: if we pass a tree to
-  // NeighborSearch, it does not copy the matrix.
-  //Log::Info << "Building reference tree..." << endl;
-  //Timer::Start("tree_building");
-
-  BinarySpaceTree<bound::HRectBound<2>, tree::EmptyStatistic>
-      refTree(referenceData, oldFromNewRefs, leafSize);
-  BinarySpaceTree<bound::HRectBound<2>, tree::EmptyStatistic>*
-      queryTree = NULL; // Empty for now.
-
-  //Timer::Stop("tree_building");
-
-  std::vector<size_t> oldFromNewQueries;
-
-  //if (CLI::GetParam<string>("query_file") != "")
-  if (hasQueryData)
-  {
-    //string queryFile = CLI::GetParam<string>("query_file");
-    //if (!data::Load(queryFile.c_str(), queryData))
-    //  Log::Fatal << "Query file " << queryFile << " not found" << endl;
-
-    // setting the values.
-    mexDataPoints = mxGetPr(prhs[3]);
-    numPoints = mxGetN(prhs[3]);
-    numDimensions = mxGetM(prhs[3]);
-    queryData = arma::mat(numDimensions, numPoints);
-    for (int i = 0, n = numPoints * numDimensions; i < n; ++i)
-    {
-      queryData(i) = mexDataPoints[i];
-    }
-
-    if (naive && leafSize < queryData.n_cols)
-      leafSize = queryData.n_cols;
-
-    //Log::Info << "Loaded query data from '" << queryFile << "'." << endl;
-
-    //Log::Info << "Building query tree..." << endl;
-
-    // Build trees by hand, so we can save memory: if we pass a tree to
-    // NeighborSearch, it does not copy the matrix.
-    //Timer::Start("tree_building");
-
-    queryTree = new BinarySpaceTree<bound::HRectBound<2>,
-        tree::EmptyStatistic >(queryData, oldFromNewQueries,
-        leafSize);
-
-    //Timer::Stop("tree_building");
-
-    rangeSearch = new RSType(&refTree, queryTree, referenceData, queryData,
-        singleMode);
-
-    //Log::Info << "Tree built." << endl;
-  }
-  else
-  {
-    rangeSearch = new RSType(&refTree, referenceData, singleMode);
-
-    //Log::Info << "Trees built." << endl;
-  }
-
-  //Log::Info << "Computing neighbors within range [" << min << ", " << max
-  //    << "]." << endl;
-
-  math::Range r = math::Range(min, max);
-  rangeSearch->Search(r, neighbors, distances);
-
-  //Log::Info << "Neighbors computed." << endl;
-
-  // We have to map back to the original indices from before the tree
-  // construction.
-  //Log::Info << "Re-mapping indices..." << endl;
-
-  vector<vector<double> > distancesOut;
-  distancesOut.resize(distances.size());
-  vector<vector<size_t> > neighborsOut;
-  neighborsOut.resize(neighbors.size());
-
-  // Do the actual remapping.
-  //if (CLI::GetParam<string>("query_file") != "")
-  if (hasQueryData)
-  {
-    for (size_t i = 0; i < distances.size(); ++i)
-    {
-      // Map distances (copy a column).
-      distancesOut[oldFromNewQueries[i]] = distances[i];
-
-      // Map indices of neighbors.
-      neighborsOut[oldFromNewQueries[i]].resize(neighbors[i].size());
-      for (size_t j = 0; j < distances[i].size(); ++j)
-      {
-        neighborsOut[oldFromNewQueries[i]][j] = oldFromNewRefs[neighbors[i][j]];
-      }
-    }
-  }
-  else
-  {
-    for (size_t i = 0; i < distances.size(); ++i)
-    {
-      // Map distances (copy a column).
-      distancesOut[oldFromNewRefs[i]] = distances[i];
-
-      // Map indices of neighbors.
-      neighborsOut[oldFromNewRefs[i]].resize(neighbors[i].size());
-      for (size_t j = 0; j < distances[i].size(); ++j)
-      {
-        neighborsOut[oldFromNewRefs[i]][j] = oldFromNewRefs[neighbors[i][j]];
-      }
-    }
-  }
-
-  // Setting values to be returned to matlab
-  mwSize ndim = 1;
-  mwSize dims[1] = {distancesOut.size()};
-  const char * fieldNames[2] = {
-    "neighbors"
-    , "distances"
-  };
-
-  plhs[0] = mxCreateStructArray(ndim, dims, 2, fieldNames);
-
-  // setting the structure elements
-  for (int i=0; i<distancesOut.size(); ++i)
-  {
-    mxArray * tmp;
-    double * values;
-
-    // settings the neighbors
-    const size_t numElements = distancesOut[i].size();
-    tmp = mxCreateDoubleMatrix(1, numElements, mxREAL);
-    values = mxGetPr(tmp);
-    for (int j=0; j<numElements; ++j)
-    {
-      // converting to matlab's index offset
-      values[j] = neighborsOut[i][j] + 1;
-    }
-    // note: SetField does not copy the data structure.
-    // mxDuplicateArray does the necessary copying.
-    mxSetFieldByNumber(plhs[0], i, 0, mxDuplicateArray(tmp));
-    mxDestroyArray(tmp);
-
-    // setting the distances
-    tmp = mxCreateDoubleMatrix(1, numElements, mxREAL);
-    values = mxGetPr(tmp);
-    for (int j=0; j<numElements; ++j)
-    {
-      values[j] = distancesOut[i][j];
-    }
-    mxSetFieldByNumber(plhs[0], i, 1, mxDuplicateArray(tmp));
-    mxDestroyArray(tmp);
-  }
-
-  // Clean up.
-  if (queryTree)
-    delete queryTree;
-  delete rangeSearch;
-
-  /*
-  // Save output.  We have to do this by hand.
-  fstream distancesStr(distancesFile.c_str(), fstream::out);
-  if (!distancesStr.is_open())
-  {
-    Log::Warn << "Cannot open file '" << distancesFile << "' to save output "
-        << "distances to!" << endl;
-  }
-  else
-  {
-    // Loop over each point.
-    for (size_t i = 0; i < distancesOut.size(); ++i)
-    {
-      // Store the distances of each point.  We may have 0 points to store, so
-      // we must account for that possibility.
-      for (size_t j = 0; j + 1 < distancesOut[i].size(); ++j)
-      {
-        distancesStr << distancesOut[i][j] << ", ";
-      }
-
-      if (distancesOut[i].size() > 0)
-        distancesStr << distancesOut[i][distancesOut[i].size() - 1];
-
-      distancesStr << endl;
-    }
-
-    distancesStr.close();
-  }
-
-  fstream neighborsStr(neighborsFile.c_str(), fstream::out);
-  if (!neighborsStr.is_open())
-  {
-    Log::Warn << "Cannot open file '" << neighborsFile << "' to save output "
-        << "neighbor indices to!" << endl;
-  }
-  else
-  {
-    // Loop over each point.
-    for (size_t i = 0; i < neighborsOut.size(); ++i)
-    {
-      // Store the neighbors of each point.  We may have 0 points to store, so
-      // we must account for that possibility.
-      for (size_t j = 0; j + 1 < neighborsOut[i].size(); ++j)
-      {
-        neighborsStr << neighborsOut[i][j] << ", ";
-      }
-
-      if (neighborsOut[i].size() > 0)
-        neighborsStr << neighborsOut[i][neighborsOut[i].size() - 1];
-
-      neighborsStr << endl;
-    }
-
-    neighborsStr.close();
-  }
-  */
-}
diff --git a/src/mlpack/bindings/matlab/range_search/range_search.m b/src/mlpack/bindings/matlab/range_search/range_search.m
deleted file mode 100644
index 1a50ec0..0000000
--- a/src/mlpack/bindings/matlab/range_search/range_search.m
+++ /dev/null
@@ -1,47 +0,0 @@
-function result = range_search(dataPoints, maxDistance, varargin)
-%Range Search
-%
-%  This function implements range search with a Euclidean distance metric. For a
-%  given query point, a given range, and a given set of reference points, the
-%  program will return all of the reference points with distance to the query
-%  point in the given range.  This is performed for an entire set of query
-%  points. You may specify a separate set of reference and query points, or only
-%  a reference set -- which is then used as both the reference and query set. 
-%  The given range is taken to be inclusive (that is, points with a distance
-%  exactly equal to the minimum and maximum of the range are included in the
-%  results).
-%  
-%  For example, the following will calculate the points within the range [2, 5]
-%  of each point in 'input.csv' and store the distances in 'distances.csv' and
-%  the neighbors in 'neighbors.csv':
-%
-%Parameters:
-% dataPoints  - (required) Matrix containing the reference dataset.
-% maxDistance - (required) The upper bound of the range.
-% minDistance	- (optional) The lower bound. The default value is zero.
-% queryPoints - (optional) Range search query points.
-% leafSize    - (optional) Leaf size for tree building.  Default value 20.
-% naive			  - (optional) If true, O(n^2) naive mode is used for computation.
-% singleMode  - (optional) If true, single-tree search is used (as opposed to 
-%               dual-tree search.
-
-% a parser for the inputs
-p = inputParser;
-p.addParamValue('minDistance', 0, @isscalar);
-p.addParamValue('queryPoints', zeros(0), @ismatrix);
-p.addParamValue('leafSize', 20, @isscalar);
-p.addParamValue('naive', false, @(x) (x == true) || (x == false));
-p.addParamValue('singleMode', false, @(x) (x == true) || (x == false));
-
-% parsing the varargin options
-p.parse(varargin{:});
-parsed = p.Results;
-
-% interfacing with mlpack
-result = mex_range_search(dataPoints', maxDistance, ...
-	parsed.minDistance, parsed.queryPoints', parsed.leafSize, ...
-	parsed.naive, parsed.singleMode);
-
-
-
-
diff --git a/src/mlpack/methods/adaboost/CMakeLists.txt b/src/mlpack/methods/adaboost/CMakeLists.txt
deleted file mode 100644
index 1320b9d..0000000
--- a/src/mlpack/methods/adaboost/CMakeLists.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-cmake_minimum_required(VERSION 2.8)
-
-# Define the files we need to compile.
-# Anything not in this list will not be compiled into MLPACK.
-set(SOURCES
-  adaboost.hpp
-  adaboost_impl.hpp
-)
-
-# Add directory name to sources.
-set(DIR_SRCS)
-foreach(file ${SOURCES})
-  set(DIR_SRCS ${DIR_SRCS} ${CMAKE_CURRENT_SOURCE_DIR}/${file})
-endforeach()
-# Append sources (with directory name) to list of all MLPACK sources (used at
-# the parent scope).
-set(MLPACK_SRCS ${MLPACK_SRCS} ${DIR_SRCS} PARENT_SCOPE)
-
-
-add_executable(adaboost
-  adaboost_main.cpp
-)
-target_link_libraries(adaboost
-  mlpack
-)
-
-install(TARGETS adaboost RUNTIME DESTINATION bin)
diff --git a/src/mlpack/methods/adaboost/adaboost.hpp b/src/mlpack/methods/adaboost/adaboost.hpp
deleted file mode 100644
index cfca3bc..0000000
--- a/src/mlpack/methods/adaboost/adaboost.hpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/**
- * @file adaboost.hpp
- * @author Udit Saxena
- *
- * AdaBoost header file
- */
-
-#ifndef _MLPACK_METHODS_ADABOOST_ADABOOST_HPP
-#define _MLPACK_METHODS_ADABOOST_ADABOOST_HPP
-
-#include <mlpack/core.hpp>
-#include <mlpack/methods/perceptron/perceptron.hpp>
- 
-namespace mlpack {
-namespace adaboost {
-
-template <typename MatType = arma::mat, typename WeakLearner = 
-          mlpack::perceptron::Perceptron<> >
-class Adaboost 
-{
-public:
-  /**
-   *  Constructor. Currently runs the Adaboost.mh algorithm
-   *  
-   *  @param data Input data
-   *  @param labels Corresponding labels
-   *  @param iterations Number of boosting rounds 
-   *  @param other Weak Learner, which has been initialized already
-   */
-  Adaboost(const MatType& data, const arma::Row<size_t>& labels,
-           int iterations, const WeakLearner& other);
-
-  /**
-   *  This function helps in building a classification Matrix which is of 
-   *  form: 
-   *  -1 if l is not the correct label
-   *  1 if l is the correct label
-   *
-   *  @param t The classification matrix to be built
-   *  @param l The labels from which the classification matrix is to be built.
-   */
-  void buildClassificationMatrix(arma::mat& t, const arma::Row<size_t>& l);
-
-  /**
-   *  This function helps in building the Weight Distribution matrix
-   *  which is updated during every iteration. It calculates the 
-   *  "difficulty" in classifying a point by adding the weights for all 
-   *  instances, using D.
-   *  
-   *  @param D The 2 Dimensional weight matrix from which the weights are
-   *            to be calculated.
-   *  @param weights The output weight vector.
-   */
-  void buildWeightMatrix(const arma::mat& D, arma::rowvec& weights);
-
-  // Stores the final classification of the Labels.
-  arma::Row<size_t> finalHypothesis;
-
-  // To check for the bound for the hammingLoss.
-  double ztAccumulator;
-
-}; // class Adaboost
-
-} // namespace adaboost
-} // namespace mlpack
-
-#include "adaboost_impl.hpp"
-
-#endif
\ No newline at end of file
diff --git a/src/mlpack/methods/adaboost/adaboost_impl.hpp b/src/mlpack/methods/adaboost/adaboost_impl.hpp
deleted file mode 100644
index b39d229..0000000
--- a/src/mlpack/methods/adaboost/adaboost_impl.hpp
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * @file adaboost_impl.hpp
- * @author Udit Saxena
- *
- * Implementation of the Adaboost class
- *
- *  @code
- *  @article{Schapire:1999:IBA:337859.337870,
- *  author = {Schapire, Robert E. and Singer, Yoram},
- *  title = {Improved Boosting Algorithms Using Confidence-rated Predictions},
- *  journal = {Mach. Learn.},
- *  issue_date = {Dec. 1999},
- *  volume = {37},
- *  number = {3},
- *  month = dec,
- *  year = {1999},
- *  issn = {0885-6125},
- *  pages = {297--336},
- *  numpages = {40},
- *  url = {http://dx.doi.org/10.1023/A:1007614523901},
- *  doi = {10.1023/A:1007614523901},
- *  acmid = {337870},
- *  publisher = {Kluwer Academic Publishers},
- *  address = {Hingham, MA, USA},
- *  keywords = {boosting algorithms, decision trees, multiclass classification, output coding
- *  }
- *  @endcode
- *
-}
- */
-
-#ifndef _MLPACK_METHODS_ADABOOST_ADABOOST_IMPL_HPP
-#define _MLPACK_METHODS_ADABOOST_ADABOOST_IMPL_HPP
-
-#include "adaboost.hpp"
-
-namespace mlpack {
-namespace adaboost {
-/**
- *  Constructor. Currently runs the Adaboost.mh algorithm
- *  
- *  @param data Input data
- *  @param labels Corresponding labels
- *  @param iterations Number of boosting rounds 
- *  @param other Weak Learner, which has been initialized already
- */
-template<typename MatType, typename WeakLearner>
-Adaboost<MatType, WeakLearner>::Adaboost(const MatType& data, 
-        const arma::Row<size_t>& labels, int iterations, 
-        const WeakLearner& other)
-{
-  // Counting the number of classes into numClasses.
-  size_t numClasses = (arma::max(labels) - arma::min(labels)) + 1;
-
-  int i, j, k;
-  double rt, alphat = 0.0, zt;
-  
-  ztAccumulator = 1.0; 
-  
-  // To be used for prediction by the Weak Learner for prediction.
-  arma::Row<size_t> predictedLabels(labels.n_cols);
-  
-  // Use tempData to modify input Data for incorporating weights.
-  MatType tempData(data);
-  
-  // Build the classification Matrix yt from labels
-  arma::mat yt(predictedLabels.n_cols, numClasses);
-  
-  // Build a classification matrix of the form D(i,l)
-  // where i is the ith instance
-  // l is the lth class.
-  buildClassificationMatrix(yt, labels);
-  
-  // ht(x), to be loaded after a round of prediction every time the weak
-  // learner is run, by using the buildClassificationMatrix function
-  arma::mat ht(predictedLabels.n_cols, numClasses);
-
-  // This matrix is a helper matrix used to calculate the final hypothesis.
-  arma::mat sumFinalH(predictedLabels.n_cols, numClasses);
-  sumFinalH.fill(0.0);
-  
-  // load the initial weights into a 2-D matrix
-  const double initWeight = (double) 1 / (data.n_cols * numClasses);
-  arma::mat D(data.n_cols, numClasses);
-  D.fill(initWeight);
-  
-  // Weights are to be compressed into this rowvector
-  // for focussing on the perceptron weights.
-  arma::rowvec weights(predictedLabels.n_cols);
-  
-  // This is the final hypothesis.
-  arma::Row<size_t> finalH(predictedLabels.n_cols);
-
-  // now start the boosting rounds
-  for (i = 0; i < iterations; i++)
-  {
-    // Initialized to zero in every round.
-    rt = 0.0; 
-    zt = 0.0;
-    
-    // Build the weight vectors
-    buildWeightMatrix(D, weights);
-    
-    // call the other weak learner and train the labels.
-    WeakLearner w(other, tempData, weights, labels);
-    w.Classify(tempData, predictedLabels);
-
-    //Now from predictedLabels, build ht, the weak hypothesis
-    buildClassificationMatrix(ht, predictedLabels);
-    
-    // Now, start calculation of alpha(t) using ht
-    
-    // begin calculation of rt
-
-    for (j = 0;j < ht.n_rows; j++)
-    {
-      for (k = 0;k < ht.n_cols; k++)
-        rt += (D(j,k) * yt(j,k) * ht(j,k));
-    }
-    // end calculation of rt
-
-    alphat = 0.5 * log((1 + rt) / (1 - rt));
-    // end calculation of alphat
-    
-    // now start modifying weights
-
-    for (j = 0;j < D.n_rows; j++)
-    {
-      for (k = 0;k < D.n_cols; k++)
-      {  
-        // we calculate zt, the normalization constant
-        zt += D(j,k) * exp(-1 * alphat * yt(j,k) * ht(j,k));
-        D(j,k) = D(j,k) * exp(-1 * alphat * yt(j,k) * ht(j,k));
-
-        // adding to the matrix of FinalHypothesis 
-        sumFinalH(j,k) += (alphat * ht(j,k));
-      }
-    }
-    
-    // normalization of D
-    D = D / zt;
-    
-    // Accumulating the value of zt for the Hamming Loss bound.
-    ztAccumulator *= zt;
-  }
-
-  // Iterations are over, now build a strong hypothesis
-  // from a weighted combination of these weak hypotheses.
-  
-  arma::rowvec tempSumFinalH;
-  arma::uword max_index;
-  for (i = 0;i < sumFinalH.n_rows; i++)
-  {
-    tempSumFinalH = sumFinalH.row(i);
-    tempSumFinalH.max(max_index);
-    finalH(i) = max_index;
-  }
-  finalHypothesis = finalH;
-}
-
-/**
- *  This function helps in building a classification Matrix which is of 
- *  form: 
- *  -1 if l is not the correct label
- *  1 if l is the correct label
- *
- *  @param t The classification matrix to be built
- *  @param l The labels from which the classification matrix is to be built.
- */
-template <typename MatType, typename WeakLearner>
-void Adaboost<MatType, WeakLearner>::buildClassificationMatrix(
-                                     arma::mat& t, const arma::Row<size_t>& l)
-{
-  int i, j;
-
-  for (i = 0;i < t.n_rows; i++)
-  {
-    for (j = 0;j < t.n_cols; j++)
-    {
-      if (j == l(i))
-        t(i,j) = 1.0;
-      else
-        t(i,j) = -1.0;
-    }
-  }
-}
-
-/**
- *  This function helps in building the Weight Distribution matrix
- *  which is updated during every iteration. It calculates the 
- *  "difficulty" in classifying a point by adding the weights for all 
- *  instances, using D.
- *  
- *  @param D The 2 Dimensional weight matrix from which the weights are
- *            to be calculated.
- *  @param weights The output weight vector.
- */
-template <typename MatType, typename WeakLearner>
-void Adaboost<MatType, WeakLearner>::buildWeightMatrix(
-                                     const arma::mat& D, arma::rowvec& weights)
-{
-  int i, j;
-  weights.fill(0.0);
-
-  for (i = 0;i < D.n_rows; i++)
-  {
-    for (j = 0;j < D.n_cols; j++)
-      weights(i) += D(i,j);
-  }
-}
-
-} // namespace adaboost
-} // namespace mlpack
-
-#endif
\ No newline at end of file
diff --git a/src/mlpack/methods/adaboost/adaboost_main.cpp b/src/mlpack/methods/adaboost/adaboost_main.cpp
deleted file mode 100644
index 3cb9028..0000000
--- a/src/mlpack/methods/adaboost/adaboost_main.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * @file: adaboost_main.cpp
- * @author: Udit Saxena
- *
- * 
- */
-
-#include <mlpack/core.hpp>
-#include "adaboost.hpp"
-
-using namespace mlpack;
-using namespace std;
-using namespace arma;
-using namespace mlpack::adaboost;
-
-PROGRAM_INFO("","");
-
-//necessary parameters
-PARAM_STRING_REQ("train_file", "A file containing the training set.", "t");
-PARAM_STRING_REQ("labels_file", "A file containing labels for the training set.",
-  "l");
-PARAM_STRING_REQ("test_file", "A file containing the test set.", "T");
-
-//optional parameters.
-PARAM_STRING("output", "The file in which the predicted labels for the test set"
-    " will be written.", "o", "output.csv");
-PARAM_INT("iterations","The maximum number of boosting iterations "
-  "to be run", "i", 1000);
-PARAM_INT_REQ("classes","The number of classes in the input label set.","c");
-
-int main(int argc, char *argv[])
-{
-  CLI::ParseCommandLine(argc, argv);
-  
-  const string trainingDataFilename = CLI::GetParam<string>("train_file");
-  mat trainingData;
-  data::Load(trainingDataFilename, trainingData, true);
-  
-  const string labelsFilename = CLI::GetParam<string>("labels_file");
-  // Load labels.
-  mat labelsIn;
-  // data::Load(labelsFilename, labelsIn, true);
-
-  if (CLI::HasParam("labels_file"))
-  {
-    const string labelsFilename = CLI::GetParam<string>("labels_file");
-    // Load labels.
-    data::Load(labelsFilename, labelsIn, true);
-
-    // Do the labels need to be transposed?
-    if (labelsIn.n_rows == 1)
-      labelsIn = labelsIn.t();
-  }
-  else
-  {
-    // Extract the labels as the last
-    Log::Info << "Using the last dimension of training set as labels." << endl;
-
-    labelsIn = trainingData.row(trainingData.n_rows - 1).t();
-    trainingData.shed_row(trainingData.n_rows - 1);
-  }
-  
-  // helpers for normalizing the labels
-  Col<size_t> labels;
-  vec mappings;
-
-  // Do the labels need to be transposed?
-  if (labelsIn.n_rows == 1)
-    labelsIn = labelsIn.t();
-
-  // normalize the labels
-  data::NormalizeLabels(labelsIn.unsafe_col(0), labels, mappings);
-  
-  const string testingDataFilename = CLI::GetParam<string>("test_file");
-  mat testingData;
-  data::Load(testingDataFilename, testingData, true);
-
-  if (testingData.n_rows != trainingData.n_rows)
-    Log::Fatal << "Test data dimensionality (" << testingData.n_rows << ") "
-        << "must be the same as training data (" << trainingData.n_rows - 1
-        << ")!" << std::endl;
-  int iterations = CLI::GetParam<int>("iterations");
-  
-  // define your own weak learner, perceptron in this case.
-  // defining the number of iterations of the perceptron.
-  int iter = 400;
-  
-  perceptron::Perceptron<> p(trainingData, labels.t(), iter);
-  
-  Timer::Start("Training");
-  Adaboost<> a(trainingData, labels.t(), iterations, p);
-  Timer::Stop("Training");
-
-  return 0;
-}
\ No newline at end of file
diff --git a/src/mlpack/tests/adaboost_test.cpp b/src/mlpack/tests/adaboost_test.cpp
deleted file mode 100644
index 880c678..0000000
--- a/src/mlpack/tests/adaboost_test.cpp
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * @file Adaboost_test.cpp
- * @author Udit Saxena
- *
- * Tests for Adaboost class.
- */
-
-#include <mlpack/core.hpp>
-#include <mlpack/methods/adaboost/adaboost.hpp>
-
-#include <boost/test/unit_test.hpp>
-#include "old_boost_test_definitions.hpp"
-
-using namespace mlpack;
-using namespace arma;
-using namespace mlpack::adaboost;
-
-BOOST_AUTO_TEST_SUITE(AdaboostTest);
-
-/**
- *  This test case runs the Adaboost.mh algorithm on the UCI Iris dataset.
- *  It checks whether the hamming loss breaches the upperbound, which
- *  is provided by ztAccumulator.
- */
-BOOST_AUTO_TEST_CASE(HammingLossBound)
-{
-  arma::mat inputData;
-
-  if (!data::Load("iris.txt", inputData))
-    BOOST_FAIL("Cannot load test dataset iris.txt!");
-
-  arma::Mat<size_t> labels;
-
-  if (!data::Load("iris_labels.txt",labels))
-    BOOST_FAIL("Cannot load labels for iris iris_labels.txt");
-  
-  // no need to map the labels here
-
-  // Define your own weak learner, perceptron in this case.
-  // Run the perceptron for perceptron_iter iterations.
-  int perceptron_iter = 4000;
-
-  perceptron::Perceptron<> p(inputData, labels.row(0), perceptron_iter);
-
-  // Define parameters for the adaboost
-  int iterations = 100;
-  Adaboost<> a(inputData, labels.row(0), iterations, p);
-  int countError = 0;
-  for (size_t i = 0; i < labels.n_cols; i++)
-    if(labels(i) != a.finalHypothesis(i))
-      countError++;
-  double hammingLoss = (double) countError / labels.n_cols;
-
-  BOOST_REQUIRE(hammingLoss <= a.ztAccumulator);
-}
-
-BOOST_AUTO_TEST_SUITE_END();
\ No newline at end of file

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/mlpack.git



More information about the debian-science-commits mailing list