[mlpack] 19/44: Merge r17388.
Barak A. Pearlmutter
barak+git at pearlmutter.net
Mon Feb 15 19:35:53 UTC 2016
This is an automated email from the git hooks/post-receive script.
bap pushed a commit to tag mlpack-1.0.11
in repository mlpack.
commit 970a8fa3c7a080bcf0ba3b22989d1e6932fe5a00
Author: Ryan Curtin <ryan at ratml.org>
Date: Sun Dec 7 19:37:51 2014 +0000
Merge r17388.
---
.../amf/update_rules/svd_batch_learning.hpp | 43 ++++++++++++++++------
1 file changed, 32 insertions(+), 11 deletions(-)
diff --git a/src/mlpack/methods/amf/update_rules/svd_batch_learning.hpp b/src/mlpack/methods/amf/update_rules/svd_batch_learning.hpp
index deaf704..ae14c71 100644
--- a/src/mlpack/methods/amf/update_rules/svd_batch_learning.hpp
+++ b/src/mlpack/methods/amf/update_rules/svd_batch_learning.hpp
@@ -22,13 +22,29 @@
#include <mlpack/core.hpp>
-namespace mlpack
-{
-namespace amf
-{
+namespace mlpack {
+namespace amf {
+
+/**
+ * This class implements SVD batch learning with momentum. This procedure is
+ * described in the paper 'A Guide to singular Value Decomposition'
+ * by Chih-Chao Ma. Class implements 'Algorithm 4' given in the paper.
+ * This factorizer decomposes the matrix V into two matrices W and H such that
+ * sum of sum of squared error between V and W*H is minimum. This optimization is
+ * performed with gradient descent. To make gradient descent faster momentum is
+ * added.
+ */
class SVDBatchLearning
{
public:
+ /**
+ * SVD Batch learning constructor.
+ *
+ * @param u step value used in batch learning
+ * @param kw regularization constant for W matrix
+ * @param kh regularization constant for H matrix
+ * @param momentum momentum applied to batch learning process
+ */
SVDBatchLearning(double u = 0.0002,
double kw = 0,
double kh = 0,
@@ -78,7 +94,7 @@ class SVDBatchLearning
{
double val;
if((val = V(i, j)) != 0)
- deltaW.row(i) += (val - arma::dot(W.row(i), H.col(j))) *
+ deltaW.row(i) += (val - arma::dot(W.row(i), H.col(j))) *
arma::trans(H.col(j));
}
if(kw != 0) deltaW.row(i) -= kw * W.row(i);
@@ -118,7 +134,7 @@ class SVDBatchLearning
{
double val;
if((val = V(i, j)) != 0)
- deltaH.col(j) += (val - arma::dot(W.row(i), H.col(j))) *
+ deltaH.col(j) += (val - arma::dot(W.row(i), H.col(j))) *
arma::trans(W.row(i));
}
if(kh != 0) deltaH.col(j) -= kh * H.col(j);
@@ -127,7 +143,7 @@ class SVDBatchLearning
mH += u*deltaH;
H += mH;
}
-
+
private:
double u;
double kw;
@@ -140,7 +156,13 @@ class SVDBatchLearning
arma::mat mH;
};
-template<>
+//! TODO : Merge this template specialized function for sparse matrix using
+//! common row_col_iterator
+
+/**
+ * WUpdate function specialization for sparse matrix
+ */
+template<>
inline void SVDBatchLearning::WUpdate<arma::sp_mat>(const arma::sp_mat& V,
arma::mat& W,
const arma::mat& H)
@@ -158,7 +180,7 @@ inline void SVDBatchLearning::WUpdate<arma::sp_mat>(const arma::sp_mat& V,
{
size_t row = it.row();
size_t col = it.col();
- deltaW.row(it.row()) += (*it - arma::dot(W.row(row), H.col(col))) *
+ deltaW.row(it.row()) += (*it - arma::dot(W.row(row), H.col(col))) *
arma::trans(H.col(col));
}
@@ -189,7 +211,7 @@ inline void SVDBatchLearning::HUpdate<arma::sp_mat>(const arma::sp_mat& V,
{
size_t row = it.row();
size_t col = it.col();
- deltaH.col(col) += (*it - arma::dot(W.row(row), H.col(col))) *
+ deltaH.col(col) += (*it - arma::dot(W.row(row), H.col(col))) *
arma::trans(W.row(row));
}
@@ -205,7 +227,6 @@ inline void SVDBatchLearning::HUpdate<arma::sp_mat>(const arma::sp_mat& V,
} // namespace amf
} // namespace mlpack
-
#endif
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/mlpack.git
More information about the debian-science-commits
mailing list