[mathicgb] 121/393: First version of Shrawan's Gaussian elimination code.

Doug Torrance dtorrance-guest at moszumanska.debian.org
Fri Apr 3 15:58:45 UTC 2015


This is an automated email from the git hooks/post-receive script.

dtorrance-guest pushed a commit to branch upstream
in repository mathicgb.

commit 828eeee03f9b449878e737537f6be84ee871a170
Author: Sharwan Kumar Tiwari <stiwari at mumin.mathematik.uni-kl.de>
Date:   Tue Dec 11 17:04:03 2012 +0100

    First version of Shrawan's Gaussian elimination code.
---
 src/mathicgb/F4MatrixReducer.cpp | 179 ++++++++++++++++++++++++++++++++++++++-
 src/mathicgb/PolyRing.hpp        |  13 +++
 src/mathicgb/stdinc.h            |   8 +-
 3 files changed, 195 insertions(+), 5 deletions(-)

diff --git a/src/mathicgb/F4MatrixReducer.cpp b/src/mathicgb/F4MatrixReducer.cpp
index 1621b99..1b5abfa 100755
--- a/src/mathicgb/F4MatrixReducer.cpp
+++ b/src/mathicgb/F4MatrixReducer.cpp
@@ -79,7 +79,7 @@ namespace {
       const auto end = matrix.rowEnd(row);
       for (auto it = matrix.rowBegin(row); it != end; ++it) {
         MATHICGB_ASSERT(it.index() < colCount());
-        mEntries[it.index()] = it.scalar();
+        mEntries[it.index()] += it.scalar();
       }
     }
 
@@ -404,6 +404,172 @@ namespace {
   }
 }
 
+// todo: use auto instead of these typedefs where possible/reasonable
+// todo: do SparseMatrix::Scalar instead of Scalar and remove this typedef
+typedef SparseMatrix::Scalar Scalar;
+typedef SparseMatrix::RowIndex RowIndex; // todo: same
+typedef SparseMatrix::ColIndex ColIndex; // todo: same
+
+Scalar modPrime = 11; // todo: remove this variable
+
+void addRowMultipleInplace(
+  std::vector<std::vector<Scalar> >& matrix,
+  const RowIndex addRow,
+  const Scalar multiple,
+  const RowIndex row,
+  const ColIndex leadingCol,
+  const ColIndex colCount
+) {
+  assert(addRow < matrix.size());
+  assert(row < matrix.size());
+  assert(row != addRow);
+  assert(leadingCol < colCount);
+  assert(matrix[row].size() == colCount);
+  assert(matrix[addRow].size() == colCount);
+  for(ColIndex col = leadingCol; col < colCount; ++col){
+    const Scalar product = modularProduct
+      (multiple, matrix[addRow][col], modPrime);
+    matrix[row][col] = modularSum(matrix[row][col], product, modPrime);
+  }
+}
+
+void makeRowUnitary(
+  std:: vector< std:: vector< Scalar> >& matrix,
+  const RowIndex row,
+  const ColIndex colCount,
+  const ColIndex leadingCol
+) {
+  assert(row<matrix.size());
+  assert(matrix[row].size() == colCount);
+  assert(leadingCol < colCount);
+  assert(modPrime > 1);
+  const Scalar leadingScalar = matrix[row][leadingCol];
+  assert(leadingScalar != 0);
+  Scalar multiply = modularInverse(leadingScalar, modPrime);
+  for(ColIndex col = leadingCol; col < colCount; ++col)
+    matrix[row][col] = (matrix[row][col] * multiply) % modPrime;
+  // todo: use modularProduct on above line
+}
+
+// todo: make this take a parameter startAtCol
+ColIndex leadingColumn(
+  const std::vector<std::vector<Scalar> >& matrix,
+  const RowIndex row,
+  const ColIndex colCount
+) {
+  assert(row < matrix.size());
+  assert(matrix[row].size() == colCount);
+  for(ColIndex col = 0; col < colCount; ++col){
+    if(matrix[row][col] != 0)
+      return col;
+  }
+  return colCount;
+}
+
+void rowReducedEchelonMatrix(
+  std::vector<std::vector<Scalar> >& matrix,
+  const ColIndex colCount
+) {
+  // todo assert: matrix.empty() || matrix[0].size() == colCount
+  const	auto rowCount=matrix.size();
+  // pivotRowOfCol[i] is the pivot in column i or rowCount
+  // if we have not identified such a pivot so far.
+  std::vector<Scalar> pivotRowOfCol(colCount, rowCount);
+  for(RowIndex rowIt=0; rowIt<rowCount;++rowIt){ // todo: rename rowIt to row
+    while (true) { // reduce row by previous pivots
+      const auto leadingCol = leadingColumn(matrix, rowIt, colCount);
+      if(leadingCol==colCount)
+        break; // row was zero
+      const auto pivotRow = pivotRowOfCol[leadingCol];
+      if(pivotRow == rowCount) {
+        makeRowUnitary(matrix, rowIt, colCount, leadingCol);
+        pivotRowOfCol[leadingCol] = rowIt;
+        break; // row is now a pivot
+      }
+
+      const auto multiple =
+        modularNegative(matrix[rowIt][leadingCol], modPrime);
+	  addRowMultipleInplace
+	    (matrix, pivotRow, multiple, rowIt, leadingCol, colCount);
+    }
+  }  
+}   
+
+void reduceToEchelonFormShrawan
+  (SparseMatrix& toReduce, SparseMatrix::Scalar modulus, int threadCount)
+{
+  const SparseMatrix::RowIndex rowCount = toReduce.rowCount();
+  const SparseMatrix::ColIndex colCount = toReduce.colCount();
+
+  // Convert input matrix to dense format
+  std::vector<std::vector<Scalar>> matrix(rowCount);
+  for (RowIndex row; row < rowCount; ++row) {
+    MATHICGB_ASSERT(!toReduce.emptyRow(row));
+    matrix[row].resize(colCount);
+    const auto end = toReduce.rowEnd(row);
+    for (auto it = toReduce.rowBegin(row); it != end; ++it) {
+      MATHICGB_ASSERT(it.index() < colCount);
+      matrix[row][it.index()] = it.scalar();
+    }
+  }
+
+  // todo: make modPrime a parameter and rename it to modulus.
+  modPrime = modulus;
+  rowReducedEchelonMatrix(matrix, colCount);
+
+  // convert reduced matrix to SparseMatrix.
+  toReduce.clear(colCount);
+  for (size_t row = 0; row < rowCount; ++row) {
+    bool rowIsZero = true;
+    for (size_t col = 0; col < colCount; ++col) {
+      if (matrix[row][col] != 0) {
+        rowIsZero = false;
+        toReduce.appendEntry(col, matrix[row][col]);
+      }
+    }
+    if (!rowIsZero)
+      toReduce.rowDone();
+  }
+}
+
+void reduceToEchelonFormShrawanDelayedModulus
+  (SparseMatrix& toReduce, SparseMatrix::Scalar modulus, int threadCount)
+{
+  // todo: implement delayed modulus
+  const SparseMatrix::RowIndex rowCount = toReduce.rowCount();
+  const SparseMatrix::ColIndex colCount = toReduce.colCount();
+
+  // Convert input matrix to dense format
+  std::vector<std::vector<Scalar>> matrix(rowCount);
+  for (RowIndex row; row < rowCount; ++row) {
+    MATHICGB_ASSERT(!toReduce.emptyRow(row));
+    matrix[row].resize(colCount);
+    const auto end = toReduce.rowEnd(row);
+    for (auto it = toReduce.rowBegin(row); it != end; ++it) {
+      MATHICGB_ASSERT(it.index() < colCount);
+      matrix[row][it.index()] = it.scalar();
+    }
+  }
+
+  // todo: make modPrime a parameter and rename it to modulus.
+  modPrime = modulus;
+  rowReducedEchelonMatrix(matrix, colCount);
+
+  // convert reduced matrix to SparseMatrix.
+  toReduce.clear(colCount);
+  for (size_t row = 0; row < rowCount; ++row) {
+    bool rowIsZero = true;
+    for (size_t col = 0; col < colCount; ++col) {
+      if (matrix[row][col] != 0) {
+        rowIsZero = false;
+        toReduce.appendEntry(col, matrix[row][col]);
+      }
+    }
+    if (!rowIsZero)
+      toReduce.rowDone();
+  }
+}
+
 SparseMatrix F4MatrixReducer::reduce(const QuadMatrix& matrix) {
   MATHICGB_ASSERT(mThreadCount >= 1);
   MATHICGB_ASSERT(matrix.debugAssertValid());
@@ -411,7 +577,16 @@ SparseMatrix F4MatrixReducer::reduce(const QuadMatrix& matrix) {
     matrix.printSizes(std::cerr);
 
   SparseMatrix newPivots(::reduce(matrix, mModulus, mThreadCount));
-  ::reduceToEchelonForm(newPivots, mModulus, mThreadCount);
+  const bool useShrawan = true;
+  const bool useDelayedModulus = false;
+  if (useShrawan) {
+    if (useDelayedModulus)
+      reduceToEchelonFormShrawanDelayedModulus
+        (newPivots, mModulus, mThreadCount);
+    else    
+      reduceToEchelonFormShrawan(newPivots, mModulus, mThreadCount);
+  } else
+    reduceToEchelonForm(newPivots, mModulus, mThreadCount);
   return std::move(newPivots);
 }
 
diff --git a/src/mathicgb/PolyRing.hpp b/src/mathicgb/PolyRing.hpp
index 0f357c4..a4f9719 100755
--- a/src/mathicgb/PolyRing.hpp
+++ b/src/mathicgb/PolyRing.hpp
@@ -82,6 +82,19 @@ T modularProduct(T a, T b, T modulus) {
   return static_cast<T>(bigProd % modulus);
 }
 
+/** Returns a+b mod modulus.  It is required that 0 <= a, b < modulus. */
+template<class T>
+T modularSum(T a, T b, T modulus) {
+  typedef typename ModularProdType<T>::type BigT;
+  MATHICGB_ASSERT(0 <= a);
+  MATHICGB_ASSERT(a < modulus);
+  MATHICGB_ASSERT(0 <= b);
+  MATHICGB_ASSERT(b < modulus);
+  BigT bigSum = static_cast<BigT>(a) + b;
+  MATHICGB_ASSERT(bigSum - a == b);
+  return static_cast<T>(bigSum % modulus);
+}
+
 /** Returns -a mod modulus. It is required that 0 <= a < modulus. */
 template<class T>
 T modularNegative(T a, T modulus) {
diff --git a/src/mathicgb/stdinc.h b/src/mathicgb/stdinc.h
index 25d2f27..6813c7f 100755
--- a/src/mathicgb/stdinc.h
+++ b/src/mathicgb/stdinc.h
@@ -72,6 +72,7 @@
 #define MATHICGB_PURE __attribute__(pure)
 #define MATHICGB_MUST_CHECK_RETURN_VALUE __attribute__(warn_unused_result)
 #define MATHICGB_UNREACHABLE __builtin_unreachable()
+#define MATHICGB_RESTRICT
 
 #else
 
@@ -84,6 +85,7 @@
 #define MATHICGB_PURE
 #define MATHICGB_MUST_CHECK_RETURN_VALUE
 #define MATHICGB_UNREACHABLE
+#define MATHICGB_RESTRICT
 
 #endif
 
@@ -97,12 +99,12 @@
 #define DEBUG
 #include <iostream> // Useful for debugging.
 #include <cassert>
-#define MATHICGB_ASSERT(X) do{assert(X);}while(0)
+#define MATHICGB_ASSERT(X) do{assert(X);}while(0)
 #define MATHICGB_ASSERT_NO_ASSUME(X) MATHICGB_ASSERT(X)
 #define MATHICGB_IF_DEBUG(X) X
 #else
-#define MATHICGB_ASSERT(X) MATHICGB_ASSUME(X)
-#define MATHICGB_ASSERT_NO_ASSUME(X)
+#define MATHICGB_ASSERT(X) MATHICGB_ASSUME(X)
+#define MATHICGB_ASSERT_NO_ASSUME(X)
 #define MATHICGB_IF_DEBUG(X)
 #endif
 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/mathicgb.git



More information about the debian-science-commits mailing list