[mlpack] 97/207: Invert loops to better use memory.

Barak A. Pearlmutter barak+git at pearlmutter.net
Thu Mar 23 17:53:44 UTC 2017


This is an automated email from the git hooks/post-receive script.

bap pushed a commit to branch master
in repository mlpack.

commit 0217359fcd64419dd1d5738785c4ff8e699fba35
Author: Ryan Curtin <ryan at ratml.org>
Date:   Sun Feb 26 23:01:31 2017 -0500

    Invert loops to better use memory.
---
 src/mlpack/core/dists/discrete_distribution.cpp | 44 +++++++++++++++----------
 1 file changed, 27 insertions(+), 17 deletions(-)

diff --git a/src/mlpack/core/dists/discrete_distribution.cpp b/src/mlpack/core/dists/discrete_distribution.cpp
index 0a76c67..9866a12 100644
--- a/src/mlpack/core/dists/discrete_distribution.cpp
+++ b/src/mlpack/core/dists/discrete_distribution.cpp
@@ -64,13 +64,15 @@ void DiscreteDistribution::Train(const arma::mat& observations)
   // Get the dimension size of the distribution.
   const size_t dimensions = probabilities.size();
 
+  // Clear the old probabilities.
+  for (size_t i = 0; i < dimensions; i++)
+    probabilities[i].zeros();
+
   // Iterate all the probabilities in each dimension
-  for (size_t i=0; i < dimensions; i++)
+  for (size_t r = 0; r < observations.n_cols; ++r)
   {
-    // Clear the old probabilities
-    probabilities[i].zeros();
-    for (size_t r=0; r < observations.n_cols; r++)
-      {
+    for (size_t i = 0; i < dimensions; ++i)
+    {
       // Add the probability of each observation.  The addition of 0.5 to the
       // observation is to turn the default flooring operation of the size_t
       // cast into a rounding observation.
@@ -86,14 +88,17 @@ void DiscreteDistribution::Train(const arma::mat& observations)
         throw std::invalid_argument(oss.str());
       }
       probabilities[i][obs]++;
-      }
+    }
+  }
 
-    // Now normailze the distribution.
+  // Now normalize the distributions.
+  for (size_t i = 0; i < dimensions; ++i)
+  {
     double sum = accu(probabilities[i]);
     if (sum > 0)
       probabilities[i] /= sum;
-    else
-      probabilities[i].fill(1.0 / probabilities[i].n_elem); // Force normalization.
+    else // Force normalization.
+      probabilities[i].fill(1.0 / probabilities[i].n_elem);
   }
 }
 
@@ -113,13 +118,15 @@ void DiscreteDistribution::Train(const arma::mat& observations,
 
   // Get the dimension size of the distribution.
   size_t dimensions = probabilities.size();
-  for (size_t i=0; i < dimensions; i++)
-  {
-    // Clear the old probabilities
+
+  // Clear the old probabilities.
+  for (size_t i = 0; i < dimensions; i++)
     probabilities[i].zeros();
 
-    // Ensure that the observation is within the bounds.
-    for (size_t r=0; r < observations.n_cols; r++)
+  // Ensure that the observation is within the bounds.
+  for (size_t r = 0; r < observations.n_cols; r++)
+  {
+    for (size_t i = 0; i < dimensions; i++)
     {
       // Add the probability of each observation.  The addition of 0.5 to the
       // observation is to turn the default flooring operation of the size_t cast
@@ -138,12 +145,15 @@ void DiscreteDistribution::Train(const arma::mat& observations,
 
       probabilities[i][obs] += probObs[r];
     }
+  }
 
-    // Now normailze the distribution.
+  // Now normalize the distributions.
+  for (size_t i = 0; i < dimensions; ++i)
+  {
     double sum = accu(probabilities[i]);
     if (sum > 0)
       probabilities[i] /= sum;
-    else
-      probabilities[i].fill(1.0 / probabilities[i].n_elem); // Force normalization.
+    else // Force normalization.
+      probabilities[i].fill(1.0 / probabilities[i].n_elem);
   }
 }

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/mlpack.git



More information about the debian-science-commits mailing list