[mlpack] 107/149: Minor code cleanups.
Barak A. Pearlmutter
barak+git at pearlmutter.net
Sat May 2 09:11:15 UTC 2015
This is an automated email from the git hooks/post-receive script.
bap pushed a commit to branch svn-trunk
in repository mlpack.
commit 556c4eb6698cb73d031f6cdac153c9f9f32601e7
Author: rcurtin <rcurtin at 9d5b8971-822b-0410-80eb-d18c1038ef23>
Date: Wed Nov 19 17:09:01 2014 +0000
Minor code cleanups.
git-svn-id: http://svn.cc.gatech.edu/fastlab/mlpack/trunk@17388 9d5b8971-822b-0410-80eb-d18c1038ef23
---
.../amf/update_rules/svd_batch_learning.hpp | 33 ++++++++++------------
1 file changed, 15 insertions(+), 18 deletions(-)
diff --git a/src/mlpack/methods/amf/update_rules/svd_batch_learning.hpp b/src/mlpack/methods/amf/update_rules/svd_batch_learning.hpp
index aca613e..3b9308c 100644
--- a/src/mlpack/methods/amf/update_rules/svd_batch_learning.hpp
+++ b/src/mlpack/methods/amf/update_rules/svd_batch_learning.hpp
@@ -9,25 +9,23 @@
#include <mlpack/core.hpp>
-namespace mlpack
-{
-namespace amf
-{
+namespace mlpack {
+namespace amf {
/**
- * This class implements SVD batch learning with momentum. This procedure is
- * described in the paper 'A Guide to singular Value Decomposition'
- * by Chih-Chao Ma. Class implements 'Algorithm 4' given in the paper.
+ * This class implements SVD batch learning with momentum. This procedure is
+ * described in the paper 'A Guide to singular Value Decomposition'
+ * by Chih-Chao Ma. Class implements 'Algorithm 4' given in the paper.
* This factorizer decomposes the matrix V into two matrices W and H such that
* sum of sum of squared error between V and W*H is minimum. This optimization is
- * performed with gradient descent. To make gradient descent faster momentum is
- * added.
+ * performed with gradient descent. To make gradient descent faster momentum is
+ * added.
*/
class SVDBatchLearning
{
public:
/**
- * SVD Batch learning constructor.
+ * SVD Batch learning constructor.
*
* @param u step value used in batch learning
* @param kw regularization constant for W matrix
@@ -91,7 +89,7 @@ class SVDBatchLearning
{
double val;
if((val = V(i, j)) != 0)
- deltaW.row(i) += (val - arma::dot(W.row(i), H.col(j))) *
+ deltaW.row(i) += (val - arma::dot(W.row(i), H.col(j))) *
arma::trans(H.col(j));
}
// add regularization
@@ -135,7 +133,7 @@ class SVDBatchLearning
{
double val;
if((val = V(i, j)) != 0)
- deltaH.col(j) += (val - arma::dot(W.row(i), H.col(j))) *
+ deltaH.col(j) += (val - arma::dot(W.row(i), H.col(j))) *
arma::trans(W.row(i));
}
// add regularization
@@ -147,7 +145,7 @@ class SVDBatchLearning
// add momentum to H
H += mH;
}
-
+
private:
//! step size of the algorithm
double u;
@@ -164,13 +162,13 @@ class SVDBatchLearning
arma::mat mH;
}; // class SBDBatchLearning
-//! TODO : Merge this template specialized function for sparse matrix using
+//! TODO : Merge this template specialized function for sparse matrix using
//! common row_col_iterator
/**
* WUpdate function specialization for sparse matrix
*/
-template<>
+template<>
inline void SVDBatchLearning::WUpdate<arma::sp_mat>(const arma::sp_mat& V,
arma::mat& W,
const arma::mat& H)
@@ -188,7 +186,7 @@ inline void SVDBatchLearning::WUpdate<arma::sp_mat>(const arma::sp_mat& V,
{
size_t row = it.row();
size_t col = it.col();
- deltaW.row(it.row()) += (*it - arma::dot(W.row(row), H.col(col))) *
+ deltaW.row(it.row()) += (*it - arma::dot(W.row(row), H.col(col))) *
arma::trans(H.col(col));
}
@@ -219,7 +217,7 @@ inline void SVDBatchLearning::HUpdate<arma::sp_mat>(const arma::sp_mat& V,
{
size_t row = it.row();
size_t col = it.col();
- deltaH.col(col) += (*it - arma::dot(W.row(row), H.col(col))) *
+ deltaH.col(col) += (*it - arma::dot(W.row(row), H.col(col))) *
arma::trans(W.row(row));
}
@@ -235,7 +233,6 @@ inline void SVDBatchLearning::HUpdate<arma::sp_mat>(const arma::sp_mat& V,
} // namespace amf
} // namespace mlpack
-
#endif // __MLPACK_METHODS_AMF_UPDATE_RULES_SVD_BATCHLEARNING_HPP
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/mlpack.git
More information about the debian-science-commits
mailing list