[mathicgb] 35/393: Made F4MatrixReducer remove empty rows in each subcomputation and added tests for this. Also removed unused reduction code in F4MatrixReducer.

Doug Torrance dtorrance-guest at moszumanska.debian.org
Fri Apr 3 15:58:28 UTC 2015


This is an automated email from the git hooks/post-receive script.

dtorrance-guest pushed a commit to branch upstream
in repository mathicgb.

commit 14a7b33585016fffca1b5eb32f3cef6abfcdc05b
Author: Bjarke Hammersholt Roune <bjarkehr.code at gmail.com>
Date:   Fri Sep 28 11:13:48 2012 +0200

    Made F4MatrixReducer remove empty rows in each subcomputation and added tests for this. Also removed unused reduction code in F4MatrixReducer.
---
 src/mathicgb/F4MatrixReducer.cpp | 324 +++------------------------------------
 src/mathicgb/QuadMatrix.cpp      |   6 +-
 src/mathicgb/QuadMatrix.hpp      |   3 +
 src/mathicgb/SparseMatrix.cpp    |   2 +-
 src/mathicgb/SparseMatrix.hpp    |   2 +-
 src/test/F4MatrixReducer.cpp     |  52 +++++--
 6 files changed, 74 insertions(+), 315 deletions(-)

diff --git a/src/mathicgb/F4MatrixReducer.cpp b/src/mathicgb/F4MatrixReducer.cpp
index 5a635ca..b29c00b 100755
--- a/src/mathicgb/F4MatrixReducer.cpp
+++ b/src/mathicgb/F4MatrixReducer.cpp
@@ -17,12 +17,17 @@ public:
   DenseRow() {}
   DenseRow(size_t colCount): mEntries(colCount) {}
 
-  void takeModulus(SparseMatrix::Scalar modulus, size_t startCol = 0) {
+  /// returns false if all entries are zero
+  bool takeModulus(SparseMatrix::Scalar modulus, size_t startCol = 0) {
     typedef typename std::vector<T>::iterator Iter;
+    T bitwiseOr = 0;
     Iter end = mEntries.end();
-    for (Iter it = mEntries.begin() + startCol; it != end; ++it)
+    for (Iter it = mEntries.begin() + startCol; it != end; ++it) {
       if (*it >= modulus)
         *it %= modulus;
+      bitwiseOr |= *it;
+    }
+    return bitwiseOr != 0;
   }
 
   void clear() {
@@ -301,6 +306,8 @@ void myReduce
 
 #pragma omp parallel for num_threads(threadCount) schedule(dynamic)
   for (size_t row = 0; row < rowCount; ++row) {
+    if (toReduce.emptyRow(row))
+      continue;
 #ifdef _OPENMP
     DenseRow<uint64>& denseRow = denseRowPerThread[omp_get_thread_num()];
 #endif
@@ -311,17 +318,18 @@ void myReduce
         continue;
       denseRow.rowReduceByUnitary(pivot, reduceBy, modulus);
     }
-    denseRow.takeModulus(modulus, pivotCount);
+    if (denseRow.takeModulus(modulus, pivotCount)) {
 #pragma omp critical
-    {
-      denseRow.appendTo(reduced, pivotCount);
+      {
+        denseRow.appendTo(reduced, pivotCount);
+      }
     }
   }
 }
 
 void myReduceToEchelonForm5
 (SparseMatrix& toReduce, SparseMatrix::Scalar modulus, size_t threadCount) {
-  // making no assumptions on toReduce.
+  // making no assumptions on toReduce except no zero rows
 
   SparseMatrix::RowIndex const rowCount = toReduce.rowCount();
   SparseMatrix::ColIndex const colCount = toReduce.colCount();
@@ -330,6 +338,7 @@ void myReduceToEchelonForm5
   std::vector<DenseRow<uint64> > dense(rowCount);
 #pragma omp parallel for num_threads(threadCount) schedule(dynamic)
   for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
+    MATHICGB_ASSERT(!toReduce.emptyRow(row));
     dense[row].reset(colCount);
     dense[row].addRow(toReduce, row);
   }
@@ -361,6 +370,8 @@ void myReduceToEchelonForm5
 #pragma omp parallel for num_threads(threadCount) schedule(dynamic)
     for (size_t row = 0; row < rowCount; ++row) {
       DenseRow<uint64>& denseRow = dense[row];
+      if (denseRow.empty())
+        continue;
 
       // reduce by each row of reduced.
       for (size_t reducerRow = 0; reducerRow < reducerCount; ++reducerRow) {
@@ -380,7 +391,10 @@ void myReduceToEchelonForm5
       leadCols[row] = col;
 
       // note if we have found a new pivot row
-      if (col < colCount) {
+      if (col == colCount)
+        denseRow.clear();
+      else {
+        MATHICGB_ASSERT(col < colCount);
         bool isNewReducer = false;
 #pragma omp critical
         {
@@ -424,300 +438,8 @@ void myReduceToEchelonForm5
 
   toReduce.clear(colCount);
   for (size_t row = 0; row < rowCount; ++row)
-    dense[row].appendTo(toReduce);
-}
-
-void myReduceToEchelonForm4
-(SparseMatrix& toReduce,
- SparseMatrix::Scalar modulus,
- SparseMatrix& reduced,
- size_t threadCount) {
-  // making no assumptions on toReduce.
-
-  size_t const colCount = toReduce.colCount();
-  reduced.clear(colCount);
-  MATHICGB_ASSERT(reduced.colCount() == colCount);
-
-  size_t const noReducer = static_cast<size_t>(-1);
-  std::vector<size_t> reducer(colCount);
-  std::fill(reducer.begin(), reducer.end(), noReducer);
-
-  std::vector<size_t> reduceThese;
-  std::vector<size_t> lookAtThese;
-
-  SparseMatrix::RowIndex rowCount = toReduce.rowCount();
-  std::vector<DenseRow<uint64> > dense(rowCount);
-  for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
-    dense[row].reset(colCount);
-    dense[row].addRow(toReduce, row);
-  }
-
-  std::vector<size_t> leadCols(colCount);
-
-  std::vector<size_t> newReducers;
-
-  for (size_t i = 0; i < rowCount; ++i)
-    lookAtThese.push_back(i);
-
-  while (true) {
-    size_t const lookAtTheseCount = lookAtThese.size();
-    //for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
-    for (size_t i = 0; i < lookAtTheseCount; ++i) {
-      size_t const row = lookAtThese[i];
-      MATHICGB_ASSERT(!dense[row].empty());
-      for (size_t col = leadCols[row]; col < colCount; ++col) {
-        if (dense[row][col] == 0)
-          continue;
-        dense[row][col] %= modulus;
-        if (dense[row][col] == 0)
-          continue;
-        if (reducer[col] == noReducer) {
-          reducer[col] = row; //reduced.rowCount();
-          newReducers.push_back(col);
-          MATHICGB_ASSERT(reduced.colCount() == colCount);
-          MATHICGB_ASSERT(dense[row].colCount() == colCount);
-          dense[row].normalize(modulus, col);
-          MATHICGB_ASSERT(dense[row][col] == 1);
-          dense[row].appendTo(reduced);
-          MATHICGB_ASSERT(reduced.rowBegin(reduced.rowCount()-1).scalar() == 1);
-          MATHICGB_ASSERT(dense[reducer[col]][col] == 1);
-        } else {
-          leadCols[row] = col;
-          reduceThese.push_back(row);
-        }
-        break;
-      }
-    }
-    std::sort(newReducers.begin(), newReducers.end());
-    size_t const newReducerCount = newReducers.size();
-
-    size_t const reduceCount = reduceThese.size();
-    if (reduceCount == 0)
-      break;
-    //std::cout << "reducing " << reduceCount << " out of " << toReduce.rowCount() << std::;
-#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
-    for (size_t i = 0; i < reduceCount; ++i) {
-      //std::cout << omp_get_thread_num();
-      size_t row = reduceThese[i];
-      DenseRow<uint64>& denseRow = dense[row];
-      for (size_t i = 0; i < newReducerCount; ++i) {
-        size_t col = newReducers[i];
-        if (denseRow[col] == 0)
-          continue;
-        if (reducer[col] == noReducer)
-          continue;
-        MATHICGB_ASSERT(dense[reducer[col]][col] == 1);
-        denseRow.rowReduceByUnitary(dense[reducer[col]], col, modulus);
-      }
-    }
-    //std::cout << "done reducing that batch" << std::endl;
-    newReducers.clear();
-    lookAtThese.swap(reduceThese);
-    reduceThese.clear();
-  }
-}
-
-void myReduceToEchelonForm3
-(SparseMatrix& toReduce,
- SparseMatrix::Scalar modulus,
- SparseMatrix& reduced,
- size_t threadCount) {
-  // making no assumptions on toReduce.
-
-  size_t const colCount = toReduce.colCount();
-  reduced.clear(colCount);
-  MATHICGB_ASSERT(reduced.colCount() == colCount);
-
-  size_t const noReducer = static_cast<size_t>(-1);
-  std::vector<size_t> reducer(colCount);
-  std::fill(reducer.begin(), reducer.end(), noReducer);
-
-  std::vector<size_t> reduceThese;
-
-  SparseMatrix::RowIndex rowCount = toReduce.rowCount();
-  std::vector<DenseRow<uint64> > dense(rowCount);
-  for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
-    dense[row].reset(colCount);
-    dense[row].addRow(toReduce, row);
-  }
-
-  std::vector<size_t> leadCols(colCount);
-
-  std::vector<size_t> newReducers;
-
-  while (true) {
-    SparseMatrix::RowIndex rowCount = toReduce.rowCount();
-    for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
-      if (dense[row].empty())
-        continue;
-      for (size_t col = 0; col < colCount; ++col) {
-        if (dense[row][col] == 0)
-          continue;
-        dense[row][col] %= modulus;
-        if (dense[row][col] == 0)
-          continue;
-        if (reducer[col] == noReducer) {
-          reducer[col] = reduced.rowCount();
-          newReducers.push_back(col);
-          MATHICGB_ASSERT(reduced.colCount() == colCount);
-          MATHICGB_ASSERT(dense[row].colCount() == colCount);
-          reduced.appendRowWithModulusNormalized(dense[row].mEntries, modulus);
-          dense[row].clear();
-          MATHICGB_ASSERT(reduced.rowBegin(reduced.rowCount()-1).scalar() == 1);
-        } else {
-          leadCols[row] = col;
-          reduceThese.push_back(row);
-        }
-        break;
-      }
-    }
-    std::sort(newReducers.begin(), newReducers.end());
-    size_t const newReducerCount = newReducers.size();
-
-    size_t const reduceCount = reduceThese.size();
-    if (reduceCount == 0)
-      break;
-    //std::cout << "reducing " << reduceCount << " out of " << toReduce.rowCount() << std::;
-#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
-    for (size_t i = 0; i < reduceCount; ++i) {
-      //std::cout << omp_get_thread_num();
-      size_t row = reduceThese[i];
-      DenseRow<uint64>& denseRow = dense[row];
-      //for (size_t col = leadCols[row]; col < colCount; ++col) {
-      for (size_t i = 0; i < newReducerCount; ++i) {
-        size_t col = newReducers[i];
-        if (denseRow[col] == 0)
-          continue;
-        if (reducer[col] == noReducer)
-          continue;
-        denseRow.rowReduceByUnitary(reducer[col], reduced, modulus);
-      }
-    }
-    //std::cout << "done reducing that batch" << std::endl;
-    reduceThese.clear();
-    newReducers.clear();
-  }
-}
-
-void myReduceToEchelonForm2
-(SparseMatrix& toReduce,
- SparseMatrix::Scalar modulus,
- SparseMatrix& reduced,
- size_t threadCount) {
-  // making no assumptions on toReduce.
-
-  size_t const colCount = toReduce.colCount();
-  reduced.clear(colCount);
-
-  SparseMatrix somewhatReduced;
-  somewhatReduced.ensureAtLeastThisManyColumns(colCount);
-
-  size_t const noReducer = static_cast<size_t>(-1);
-  std::vector<size_t> reducer(colCount);
-  std::fill(reducer.begin(), reducer.end(), noReducer);
-
-
-  std::vector<size_t> reduceThese;
-
-#ifdef _OPENMP
-  std::vector<DenseRow<uint64> > denseRowPerThread(threadCount);
-#else
-  DenseRow<uint64> denseRow;
-#endif
-
-  while (toReduce.rowCount() > 0) {
-    SparseMatrix::RowIndex rowCount = toReduce.rowCount();
-    for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
-      SparseMatrix::RowIterator rowBegin = toReduce.rowBegin(row);
-      SparseMatrix::RowIterator rowEnd = toReduce.rowEnd(row);
-      if (rowBegin == rowEnd)
-        continue;
-      if (reducer[rowBegin.index()] == noReducer) {
-        reducer[rowBegin.index()] = reduced.rowCount();
-        reduced.appendRowAndNormalize(toReduce, row, modulus);
-        MATHICGB_ASSERT(reduced.rowBegin(reduced.rowCount()-1).scalar() == 1);
-      } else
-        reduceThese.push_back(row);
-    }
-
-    size_t const reduceCount = reduceThese.size();
-    //    std::cout << "reducing " << reduceCount << " out of " << toReduce.rowCount() << std::endl;
-#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
-    for (size_t i = 0; i < reduceCount; ++i) {
-#ifdef _OPENMP
-    DenseRow<uint64>& denseRow = denseRowPerThread[omp_get_thread_num()];
-#endif
-      size_t row = reduceThese[i];
-      denseRow.reset(colCount);
-      denseRow.addRow(toReduce, row);
-      for (size_t col = 0; col < colCount; ++col) {
-        if (denseRow[col] == 0)
-          continue;
-        if (reducer[col] == noReducer)
-          continue;
-        denseRow.rowReduceByUnitary(reducer[col], reduced, modulus);
-      }
-
-#pragma omp critical
-      {
-        denseRow.appendToWithModulus(somewhatReduced, modulus);
-        MATHICGB_ASSERT(reducer[somewhatReduced.rowBegin(somewhatReduced.rowCount() - 1).index()] == noReducer);
-      }
-    }
-    //    std::cout << "done reducing that batch" << std::endl;
-    reduceThese.clear();
-    toReduce.swap(somewhatReduced);
-    somewhatReduced.clear();
-  }
-}
-
-void myReduceToEchelonForm
-(SparseMatrix const& toReduce,
- SparseMatrix::Scalar modulus,
- SparseMatrix& reduced) {
-  // making no assumptions on toReduce.
-
-  size_t const colCount = toReduce.colCount();
-  reduced.clear(colCount);
-
-  size_t const noReducer = static_cast<size_t>(-1);
-  std::vector<size_t> reducer(colCount);
-  std::fill(reducer.begin(), reducer.end(), noReducer);
-
-  DenseRow<uint64> denseRow;
-  SparseMatrix::RowIndex rowCount = toReduce.rowCount();
-  for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
-    SparseMatrix::RowIterator rowEnd = toReduce.rowEnd(row);
-    SparseMatrix::RowIterator rowBegin = toReduce.rowBegin(row);
-    if (rowBegin == rowEnd) {
-      // I think we don't need to copy empty rows
-      //reduced.rowDone();
-      continue; // zero row
-    }
-    if (reducer[rowBegin.index()] == noReducer) {
-      reducer[rowBegin.index()] = reduced.rowCount();
-      reduced.appendRowAndNormalize(toReduce, row, modulus);
-      MATHICGB_ASSERT(reduced.rowBegin(reduced.rowCount()-1).scalar() == 1);
-      continue;
-    }
-
-    denseRow.reset(colCount);
-    denseRow.addRow(toReduce, row);
-    for (size_t col = 0; col < colCount; ++col) {
-      //MATHICGB_ASSERT(reducer[col] != noReducer);
-      if (denseRow[col] == 0)
-        continue;
-      if (reducer[col] == noReducer)
-        continue;
-      denseRow.rowReduceByUnitary(reducer[col], reduced, modulus);
-    }
-    reduced.appendRowWithModulusNormalized(denseRow.mEntries, modulus);
-    size_t last = reduced.rowCount() - 1;
-    if (reduced.rowBegin(last) != reduced.rowEnd(last)) {
-      reducer[reduced.rowBegin(last).index()] = last;
-      MATHICGB_ASSERT(reduced.rowBegin(reduced.rowCount()-1).scalar() == 1);
-    }
-  }
+    if (!dense[row].empty())
+      dense[row].appendTo(toReduce);
 }
 
 class IOException : public std::runtime_error {
diff --git a/src/mathicgb/QuadMatrix.cpp b/src/mathicgb/QuadMatrix.cpp
index 9395725..b3d9ff5 100755
--- a/src/mathicgb/QuadMatrix.cpp
+++ b/src/mathicgb/QuadMatrix.cpp
@@ -56,9 +56,13 @@ void QuadMatrix::print(std::ostream& out) const {
   out << printer;
 }
 
-// String representation intended for debugging.
 std::string QuadMatrix::toString() const {
   std::ostringstream out;
   print(out);
   return out.str();
 }
+
+std::ostream& operator<<(std::ostream& out, const QuadMatrix qm) {
+  qm.print(out);
+  return out;
+}
diff --git a/src/mathicgb/QuadMatrix.hpp b/src/mathicgb/QuadMatrix.hpp
index 6c7873b..345c234 100755
--- a/src/mathicgb/QuadMatrix.hpp
+++ b/src/mathicgb/QuadMatrix.hpp
@@ -5,6 +5,7 @@
 #include "SparseMatrix.hpp"
 #include <vector>
 #include <string>
+#include <ostream>
 class ostream;
 
 /** Represents a matrix composed of 4 sub-matrices that fit together
@@ -36,4 +37,6 @@ public:
 #endif
 };
 
+std::ostream& operator<<(std::ostream& out, const QuadMatrix qm);
+
 #endif
diff --git a/src/mathicgb/SparseMatrix.cpp b/src/mathicgb/SparseMatrix.cpp
index d60b8aa..451019a 100755
--- a/src/mathicgb/SparseMatrix.cpp
+++ b/src/mathicgb/SparseMatrix.cpp
@@ -1,7 +1,7 @@
 #include "stdinc.h"
 #include "SparseMatrix.hpp"
 
-std::ostream& operator<<(std::ostream& out, SparseMatrix& matrix) {
+std::ostream& operator<<(std::ostream& out, const SparseMatrix& matrix) {
   matrix.print(out);
   return out;
 }
diff --git a/src/mathicgb/SparseMatrix.hpp b/src/mathicgb/SparseMatrix.hpp
index 69f1963..8ab3b83 100755
--- a/src/mathicgb/SparseMatrix.hpp
+++ b/src/mathicgb/SparseMatrix.hpp
@@ -369,6 +369,6 @@ private:
   ColIndex mColCount;
 };
 
-std::ostream& operator<<(std::ostream& out, SparseMatrix& matrix);
+std::ostream& operator<<(std::ostream& out, const SparseMatrix& matrix);
 
 #endif
diff --git a/src/test/F4MatrixReducer.cpp b/src/test/F4MatrixReducer.cpp
index 47251ad..5fbf6c5 100755
--- a/src/test/F4MatrixReducer.cpp
+++ b/src/test/F4MatrixReducer.cpp
@@ -54,36 +54,66 @@ TEST(F4MatrixReducer, Reduce) {
 
   // bottom left
   m.bottomLeft.clear(4);
+  m.bottomLeft.rowDone();
+  m.bottomLeft.appendEntry(1, 9);
+  m.bottomLeft.rowDone();
+  m.bottomLeft.appendEntry(0, 2);
+  m.bottomLeft.appendEntry(1, 99); // 100 = -2 mod 101
+  m.bottomLeft.appendEntry(2, 83); // 83 = -18 mod 101
+  m.bottomLeft.appendEntry(3, 6);
+  m.bottomLeft.rowDone();
   m.bottomLeft.appendEntry(0, 1);
   m.bottomLeft.appendEntry(1, 1);
   m.bottomLeft.appendEntry(3, 24);
   m.bottomLeft.rowDone();
-  m.bottomLeft.appendEntry(1, 9);
+  m.bottomLeft.rowDone();
+  m.bottomLeft.appendEntry(3, 100);
   m.bottomLeft.rowDone();
 
   // bottom right
   m.bottomRight.clear(5);
+  m.bottomRight.rowDone();
+  m.bottomRight.appendEntry(1, 2);
+  m.bottomRight.appendEntry(3, 11);
+  m.bottomRight.rowDone();
+  m.bottomRight.appendEntry(2, 16);
+  m.bottomRight.appendEntry(3, 47);
+  m.bottomRight.rowDone();
   m.bottomRight.appendEntry(0, 1);
   m.bottomRight.appendEntry(2, 12);
   m.bottomRight.appendEntry(3, 13);
   m.bottomRight.appendEntry(4, 41);
   m.bottomRight.rowDone();
+  m.bottomRight.appendEntry(0, 2);
   m.bottomRight.appendEntry(1, 2);
-  m.bottomRight.appendEntry(3, 11);
+  m.bottomRight.appendEntry(2, 8);
+  m.bottomRight.appendEntry(3, 75);
+  m.bottomRight.appendEntry(4, 90);
+  m.bottomRight.rowDone();
+  m.bottomRight.appendEntry(0, 1);
+  m.bottomRight.appendEntry(1, 1);
+  m.bottomRight.appendEntry(2, 4);
+  m.bottomRight.appendEntry(3, 88);
+  m.bottomRight.appendEntry(4, 45);
   m.bottomRight.rowDone();
 
   MATHICGB_ASSERT(m.debugAssertValid());
   const char* origStr = 
     "Left columns: a a2 a3 a4\n"
     "Right columns: b b2 b3 b4 b5\n"
-    "0: 0#1 1#2 3#3  | 0: 2#8               \n"
-    "1: 1#1 2#3      | 1: 3#9               \n"
-    "2: 2#1 3#7      | 2: 4#10              \n"
-    "3: 3#1          | 3:                   \n"
-    "                |                      \n"
-    "0: 0#1 1#1 3#24 | 0: 0#1 2#12 3#13 4#41\n"
-    "1: 1#9          | 1: 1#2 3#11          \n";
-  ASSERT_EQ(origStr, m.toString());
+    "0: 0#1 1#2 3#3       | 0: 2#8                  \n"
+    "1: 1#1 2#3           | 1: 3#9                  \n"
+    "2: 2#1 3#7           | 2: 4#10                 \n"
+    "3: 3#1               | 3:                      \n"
+    "                     |                         \n"
+    "0:                   | 0:                      \n" // zero row
+    "1: 1#9               | 1: 1#2 3#11             \n" // becomes second row
+    "2: 0#2 1#99 2#83 3#6 | 2: 2#16 3#47            \n" // zero on left red.  
+    "3: 0#1 1#1 3#24      | 3: 0#1 2#12 3#13 4#41   \n" // becomes first row
+    "4:                   | 4: 0#2 1#2 2#8 3#75 4#90\n" // zero on right red.
+    "5: 3#100             | 5: 0#1 1#1 2#4 3#88 4#45\n"; // zero on right red.
+
+  ASSERT_EQ(origStr, m.toString()) << "Printed m:\n" << m;
 
   SparseMatrix reduced;
   F4MatrixReducer red;
@@ -92,5 +122,5 @@ TEST(F4MatrixReducer, Reduce) {
   const char* redStr =
     "0: 0#1 2#4 3#22 4#11\n"
     "1: 1#1 3#66 4#34\n";
-  ASSERT_EQ(redStr, reduced.toString());
+  ASSERT_EQ(redStr, reduced.toString()) << "Printed reduced:\n" << reduced;
 }

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/mathicgb.git



More information about the debian-science-commits mailing list