[mathicgb] 34/393: Added F4MatrixReducer which reduces an F4 matrix to reduced row echelon form. Also added a single non-trivial test that passes, so maybe it actually works. This is a code dump of the prototype code I wrote for matrix reduction previously so it needs to be cleaned up - I only did some search-replace to make it compile and a bit of effort to hook things up correctly and shake out the bugs in the glue code.
Doug Torrance
dtorrance-guest at moszumanska.debian.org
Fri Apr 3 15:58:28 UTC 2015
This is an automated email from the git hooks/post-receive script.
dtorrance-guest pushed a commit to branch upstream
in repository mathicgb.
commit 907cd4a761907444f7c79dcf776b7ee6232132f6
Author: Bjarke Hammersholt Roune <bjarkehr.code at gmail.com>
Date: Thu Sep 27 22:23:37 2012 +0200
Added F4MatrixReducer which reduces an F4 matrix to reduced row echelon form. Also added a single non-trivial test that passes, so maybe it actually works. This is a code dump of the prototype code I wrote for matrix reduction previously so it needs to be cleaned up - I only did some search-replace to make it compile and a bit of effort to hook things up correctly and shake out the bugs in the glue code.
---
Makefile.am | 5 +-
src/mathicgb/F4MatrixReducer.cpp | 1063 ++++++++++++++++++++++++++++++++++++++
src/mathicgb/F4MatrixReducer.hpp | 18 +
src/mathicgb/F4Reducer.cpp | 7 +
src/mathicgb/PolyRing.hpp | 44 +-
src/mathicgb/SparseMatrix.cpp | 4 +
src/mathicgb/SparseMatrix.hpp | 2 +
src/mathicgb/stdinc.h | 9 +-
src/test/F4MatrixBuilder.cpp | 1 +
src/test/F4MatrixReducer.cpp | 96 ++++
10 files changed, 1242 insertions(+), 7 deletions(-)
diff --git a/Makefile.am b/Makefile.am
index 8f86f9f..00b0d9a 100755
--- a/Makefile.am
+++ b/Makefile.am
@@ -59,7 +59,8 @@ libmathicgb_la_SOURCES = src/mathicgb/BjarkeGeobucket2.cpp \
src/mathicgb/TypicalReducer.cpp src/mathicgb/F4Reducer.hpp \
src/mathicgb/F4Reducer.cpp src/mathicgb/F4MatrixBuilder.hpp \
src/mathicgb/F4MatrixBuilder.cpp src/mathicgb/QuadMatrix.hpp \
- src/mathicgb/QuadMatrix.cpp
+ src/mathicgb/QuadMatrix.cpp src/mathicgb/F4MatrixReducer.cpp \
+ src/mathicgb/F4MatrixReducer.hpp
# When making a distribution file, Automake knows to include all files
# that are necessary to build the project. EXTRA_DIST specifies files
@@ -90,4 +91,4 @@ unittest_SOURCES=src/test/FreeModuleOrderTest.cpp \
src/test/gtestInclude.cpp src/test/testMain.cpp src/test/gb-test.cpp \
src/test/ideals.cpp src/test/poly-test.cpp src/test/ideals.hpp \
src/test/SparseMatrix.cpp src/test/QuadMatrixBuilder.cpp \
- src/test/F4MatrixBuilder.cpp
+ src/test/F4MatrixBuilder.cpp src/test/F4MatrixReducer.cpp
diff --git a/src/mathicgb/F4MatrixReducer.cpp b/src/mathicgb/F4MatrixReducer.cpp
new file mode 100755
index 0000000..5a635ca
--- /dev/null
+++ b/src/mathicgb/F4MatrixReducer.cpp
@@ -0,0 +1,1063 @@
+#include "stdinc.h"
+#include "F4MatrixReducer.hpp"
+
+#include "QuadMatrix.hpp"
+#include "SparseMatrix.hpp"
+#include "PolyRing.hpp"
+#include <algorithm>
+#include <vector>
+#include <stdexcept>
+#include <map>
+#include <string>
+#include <cstdio>
+
+template<class T>
+class DenseRow {
+public:
+ DenseRow() {}
+ DenseRow(size_t colCount): mEntries(colCount) {}
+
+ void takeModulus(SparseMatrix::Scalar modulus, size_t startCol = 0) {
+ typedef typename std::vector<T>::iterator Iter;
+ Iter end = mEntries.end();
+ for (Iter it = mEntries.begin() + startCol; it != end; ++it)
+ if (*it >= modulus)
+ *it %= modulus;
+ }
+
+ void clear() {
+ mEntries.clear();
+ }
+
+ bool empty() const {
+ return mEntries.empty();
+ }
+
+ void reset(size_t colCount) {
+ mEntries.clear();
+ mEntries.resize(colCount);
+ }
+
+ template<class Iter>
+ void addSparseNoModulus(Iter begin, Iter end) {
+ for (; begin != end; ++begin) {
+ MATHICGB_ASSERT(begin.index() < colCount());
+ mEntries[begin.index()] += begin.scalar();
+ }
+ }
+
+ T& operator[](size_t col) {
+ MATHICGB_ASSERT(col < colCount());
+ return mEntries[col];
+ }
+ T const& operator[](size_t col) const {
+ MATHICGB_ASSERT(col < colCount());
+ return mEntries[col];
+ }
+ size_t colCount() const {
+ return mEntries.size();
+ }
+
+ void appendToWithModulus(SparseMatrix& matrix, SparseMatrix::Scalar modulus) {
+ matrix.appendRowWithModulus(mEntries, modulus);
+ }
+
+ void appendTo(SparseMatrix& matrix, size_t leadCol = 0) {
+ matrix.appendRow(mEntries, leadCol);
+ }
+
+ void normalize(SparseMatrix::Scalar modulus, size_t lead) {
+ MATHICGB_ASSERT(lead < colCount());
+ MATHICGB_ASSERT(mEntries[lead] != 0);
+
+ typedef typename std::vector<T>::iterator Iter;
+ Iter end = mEntries.end();
+ Iter it = mEntries.begin() + lead;
+ SparseMatrix::Scalar toInvert = static_cast<SparseMatrix::Scalar>(*it % modulus);
+ SparseMatrix::Scalar multiply = modularInverse(toInvert, modulus);
+ *it = 1;
+ for (++it; it != end; ++it) {
+ SparseMatrix::Scalar entry = static_cast<SparseMatrix::Scalar>(*it % modulus);
+ if (entry != 0)
+ entry = modularProduct(entry, multiply, modulus);
+ *it = entry;
+ }
+ }
+
+ void addRow(SparseMatrix const& matrix, SparseMatrix::RowIndex row) {
+ MATHICGB_ASSERT(row < matrix.rowCount());
+ MATHICGB_ASSERT(matrix.colCount() == colCount());
+ SparseMatrix::RowIterator end = matrix.rowEnd(row);
+ for (SparseMatrix::RowIterator it = matrix.rowBegin(row); it != end; ++it) {
+ MATHICGB_ASSERT(it.index() < colCount());
+ mEntries[it.index()] = it.scalar();
+ }
+ }
+
+ void addRowPrefix(SparseMatrix const& matrix, SparseMatrix::RowIndex row, size_t stopAtCol) {
+ MATHICGB_ASSERT(row < matrix.rowCount());
+ MATHICGB_ASSERT(matrix.colCount() == colCount());
+ SparseMatrix::RowIterator end = matrix.rowEnd(row);
+ for (SparseMatrix::RowIterator it = matrix.rowBegin(row); it != end; ++it) {
+ if (it.index() >= stopAtCol)
+ break;
+ MATHICGB_ASSERT(it.index() < colCount());
+ mEntries[it.index()] = it.scalar();
+ }
+ }
+
+ template<class Iter>
+ void addRowMultiple(SparseMatrix::Scalar multiple, Iter begin, Iter end) {
+ T mult = static_cast<T>(multiple);
+ for (; begin != end; ++begin) {
+ MATHICGB_ASSERT(begin.index() < colCount());
+ // Watch out for overflow here! This is only OK because
+ // T is promoting the multiplication to type T.
+ mEntries[begin.index()] += begin.scalar() * mult;
+ }
+ }
+
+ template<class Iter>
+ void addRowPrefixMultiple(SparseMatrix::Scalar multiple, Iter begin, Iter end, SparseMatrix::RowIndex stopAtCol) {
+ T mult = static_cast<T>(multiple);
+ for (; begin != end; ++begin) {
+ if (begin.index() >= stopAtCol)
+ break;
+ MATHICGB_ASSERT(begin.index() < colCount());
+ // Watch out for overflow here! This is only OK because
+ // T is promoting the multiplication to type T.
+ mEntries[begin.index()] += begin.scalar() * mult;
+ }
+ }
+
+ void addRowMultiple(SparseMatrix::Scalar multiple, DenseRow const& dense, size_t lead) {
+ MATHICGB_ASSERT(dense.colCount() == colCount());
+ MATHICGB_ASSERT(lead < colCount());
+ T mult = static_cast<T>(multiple);
+ size_t colCo = colCount();
+ for (size_t col = lead; col < colCo; ++col) {
+ // Watch out for overflow here! This is only OK because
+ // T is promoting the multiplication to type T.
+ mEntries[col] += dense[col] * mult;
+ }
+ }
+
+ void rowReduceByUnitary
+ (size_t pivotRow, SparseMatrix const& matrix, SparseMatrix::Scalar modulus) {
+ MATHICGB_ASSERT(pivotRow < matrix.rowCount());
+ MATHICGB_ASSERT(matrix.rowBegin(pivotRow).scalar() == 1); // unitary
+ MATHICGB_ASSERT(modulus > 1);
+
+ SparseMatrix::RowIterator begin = matrix.rowBegin(pivotRow);
+ SparseMatrix::ColIndex col = begin.index();
+ SparseMatrix::Scalar entry = mEntries[col] % modulus;
+ mEntries[col] = 0;
+ if (entry == 0)
+ return;
+ ++begin; // can skip first entry as we just set it to zero.
+ addRowMultiple(modulus - entry, begin, matrix.rowEnd(pivotRow));
+ }
+
+ void rowReduceByUnitaryPrefix
+ (size_t pivotRow, SparseMatrix const& matrix, SparseMatrix::Scalar modulus, SparseMatrix::RowIndex stopAtCol) {
+ MATHICGB_ASSERT(pivotRow < matrix.rowCount());
+ MATHICGB_ASSERT(matrix.rowBegin(pivotRow).scalar() == 1); // unitary
+ MATHICGB_ASSERT(modulus > 1);
+
+ SparseMatrix::RowIterator begin = matrix.rowBegin(pivotRow);
+ SparseMatrix::ColIndex col = begin.index();
+ if (col >= stopAtCol)
+ return;
+ SparseMatrix::Scalar entry = mEntries[col] % modulus;
+ mEntries[col] = 0;
+ if (entry == 0)
+ return;
+ ++begin; // can skip first entry as we just set it to zero.
+ addRowPrefixMultiple(modulus - entry, begin, matrix.rowEnd(pivotRow), stopAtCol);
+ }
+
+ void rowReduceByUnitary(DenseRow const& row, size_t lead, SparseMatrix::Scalar modulus) {
+ MATHICGB_ASSERT(row.colCount() == colCount());
+ MATHICGB_ASSERT(lead < row.colCount());
+ MATHICGB_ASSERT(row[lead] == 1);
+ MATHICGB_ASSERT(modulus > 1);
+
+ SparseMatrix::Scalar entry = mEntries[lead] % modulus;
+ mEntries[lead] = 0;
+ if (entry == 0)
+ return;
+ addRowMultiple(modulus - entry, row, lead + 1);
+ }
+
+ void rowReduce(size_t pivotRow, SparseMatrix const& matrix, SparseMatrix::Scalar modulus) {
+ MATHICGB_ASSERT(pivotRow < matrix.rowCount());
+ MATHICGB_ASSERT(matrix.rowBegin(pivotRow) != matrix.rowEnd(pivotRow));
+ MATHICGB_ASSERT(matrix.rowBegin(pivotRow).scalar() != 0);
+ MATHICGB_ASSERT(modulus > 1);
+
+ SparseMatrix::RowIterator begin = matrix.rowBegin(pivotRow);
+ SparseMatrix::ColIndex col = begin.index();
+ SparseMatrix::Scalar entry = mEntries[col] % modulus;
+ mEntries[col] = 0;
+ if (entry == 0)
+ return;
+ SparseMatrix::Scalar reducerLead = begin.scalar();
+ ++begin; // can skip first entry as we just set it to zero.
+ SparseMatrix::RowIterator end = matrix.rowEnd(pivotRow);
+ if (begin == end)
+ return;
+
+ SparseMatrix::Scalar inverse = modularInverse(reducerLead, modulus);
+ SparseMatrix::Scalar prod = modularProduct(inverse, entry, modulus);
+ SparseMatrix::Scalar negProd = modularNegativeNonZero(prod, modulus);
+ addRowMultiple(negProd, begin, end);
+ }
+
+ // private:
+ std::vector<T> mEntries;
+};
+
+template<typename Matrix>
+void reformMatrix(const Matrix& matA, const Matrix& matB, SparseMatrix& matAB) {
+ MATHICGB_ASSERT(matA.rowdim() == matB.rowdim());
+
+ matAB.clear(matA.coldim() + matB.coldim());
+ MATHICGB_ASSERT(matAB.colCount() == matA.coldim() + matB.coldim());
+ size_t const colCountA = matA.coldim();
+ size_t const rowCount = matA.rowdim();
+
+ typedef typename Matrix::Row::const_iterator CIter;
+ for (size_t row = 0; row < rowCount; ++row) {
+ {
+ CIter const endA = matA[row].end();
+ for (CIter it = matA[row].begin(); it != endA; ++it) {
+ MATHICGB_ASSERT(it->first < colCountA);
+ matAB.appendEntry(it->first, it->second);
+ }
+ }
+ {
+ CIter const endB = matB[row].end();
+ for (CIter it = matB[row].begin(); it != endB; ++it) {
+ MATHICGB_ASSERT(it->first < matB.coldim());
+ MATHICGB_ASSERT(it->first + colCountA < matAB.colCount());
+ matAB.appendEntry(it->first + colCountA, it->second);
+ }
+ }
+ matAB.rowDone();
+ }
+ MATHICGB_ASSERT(matAB.rowCount() == matA.rowdim());
+ MATHICGB_ASSERT(matAB.colCount() == matA.coldim() + matB.coldim());
+}
+
+void sortRowsByIncreasingPivots(SparseMatrix& in, SparseMatrix& out) {
+ MATHICGB_ASSERT(&in != &out);
+
+ // compute pairs (pivot column index, row)
+ std::vector<std::pair<SparseMatrix::ColIndex, SparseMatrix::RowIndex> > order;
+ const SparseMatrix::RowIndex rowCount = in.rowCount();
+ const SparseMatrix::ColIndex colCount = in.colCount();
+ for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
+ if (in.entryCountInRow(row) == 0)
+ order.push_back(std::make_pair(colCount, row));
+ else
+ order.push_back(std::make_pair(in.rowBegin(row).index(), row));
+ }
+
+ // sort pairs by pivot column index
+ std::sort(order.begin(), order.end());
+
+ // construct out with pivot columns in increaing order
+ out.clear(colCount);
+ for (size_t i = 0; i < rowCount; ++i) {
+ const SparseMatrix::RowIndex row = order[i].second;
+ SparseMatrix::RowIterator it = in.rowBegin(row);
+ SparseMatrix::RowIterator end = in.rowEnd(row);
+ for (; it != end; ++it)
+ out.appendEntry(it.index(), it.scalar());
+ out.rowDone();
+ }
+}
+
+void myReduce
+(SparseMatrix const& toReduce,
+ SparseMatrix const& reduceBy,
+ SparseMatrix::Scalar modulus,
+ SparseMatrix& reduced,
+ int threadCount) {
+ // assuming that the left part of reduceBy is upper triangular.
+ MATHICGB_ASSERT(reduceBy.colCount() >= reduceBy.rowCount());
+ MATHICGB_ASSERT(reduceBy.colCount() == toReduce.colCount());
+ size_t const pivotCount = reduceBy.rowCount();
+ size_t const colCount = toReduce.colCount();
+
+ reduced.clear(toReduce.colCount());
+ size_t rowCount = toReduce.rowCount();
+
+#ifdef _OPENMP
+ std::vector<DenseRow<uint64> > denseRowPerThread(threadCount);
+#else
+ DenseRow<uint64> denseRow;
+#endif
+
+#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
+ for (size_t row = 0; row < rowCount; ++row) {
+#ifdef _OPENMP
+ DenseRow<uint64>& denseRow = denseRowPerThread[omp_get_thread_num()];
+#endif
+ denseRow.reset(colCount);
+ denseRow.addRow(toReduce, row);
+ for (size_t pivot = 0; pivot < pivotCount; ++pivot) {
+ if (denseRow[pivot] == 0)
+ continue;
+ denseRow.rowReduceByUnitary(pivot, reduceBy, modulus);
+ }
+ denseRow.takeModulus(modulus, pivotCount);
+#pragma omp critical
+ {
+ denseRow.appendTo(reduced, pivotCount);
+ }
+ }
+}
+
+void myReduceToEchelonForm5
+(SparseMatrix& toReduce, SparseMatrix::Scalar modulus, size_t threadCount) {
+ // making no assumptions on toReduce.
+
+ SparseMatrix::RowIndex const rowCount = toReduce.rowCount();
+ SparseMatrix::ColIndex const colCount = toReduce.colCount();
+
+ // dense representation
+ std::vector<DenseRow<uint64> > dense(rowCount);
+#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
+ for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
+ dense[row].reset(colCount);
+ dense[row].addRow(toReduce, row);
+ }
+
+ // invariant: all columns to the left of leadCols[row] are zero.
+ std::vector<size_t> leadCols(colCount);
+
+ // pivot rows get copied here before being used to reduce the matrix.
+ SparseMatrix reduced;
+ reduced.clear(colCount);
+
+ // (col,row) in nextReducers, then use row as a pivot in column col
+ // for the next iteration.
+ std::vector<std::pair<SparseMatrix::ColIndex, SparseMatrix::RowIndex> > nextReducers;
+
+ // isPivotRow[row] is true if row is or has been used as a pivot.
+ std::vector<bool> isPivotRow(rowCount);
+
+ // columnHasPivot[col] is true if a pivot row for column col has
+ // been chosen.
+ std::vector<bool> columnHasPivot(colCount);
+
+ bool firstIteration = true;
+ while (firstIteration || reduced.rowCount() > 0) {
+ firstIteration = false;
+ size_t const reducerCount = reduced.rowCount();
+
+ //std::cout << "reducing " << reduced.rowCount() << " out of " << toReduce.rowCount() << std::endl;
+#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
+ for (size_t row = 0; row < rowCount; ++row) {
+ DenseRow<uint64>& denseRow = dense[row];
+
+ // reduce by each row of reduced.
+ for (size_t reducerRow = 0; reducerRow < reducerCount; ++reducerRow) {
+ size_t const col = reduced.rowBegin(reducerRow).index();
+ if (denseRow[col] == 0 || (isPivotRow[row] && col == leadCols[row]))
+ continue;
+ denseRow.rowReduceByUnitary(reducerRow, reduced, modulus);
+ }
+
+ // update leadCols[row]
+ size_t col;
+ for (col = leadCols[row]; col < colCount; ++col) {
+ denseRow[col] %= modulus;
+ if (denseRow[col] != 0)
+ break;
+ }
+ leadCols[row] = col;
+
+ // note if we have found a new pivot row
+ if (col < colCount) {
+ bool isNewReducer = false;
+#pragma omp critical
+ {
+ if (!columnHasPivot[col]) {
+ columnHasPivot[col] = true;
+ isNewReducer = true;
+ nextReducers.push_back(std::make_pair(col, row));
+ }
+ }
+ if (isNewReducer)
+ denseRow.normalize(modulus, col);
+ }
+ }
+ //std::cout << "done reducing that batch" << std::endl;
+
+ reduced.clear(colCount);
+ std::sort(nextReducers.begin(), nextReducers.end());
+ for (size_t i = 0; i < nextReducers.size(); ++i) {
+ MATHICGB_ASSERT(reduced.colCount() == colCount);
+ size_t const row = nextReducers[i].second;
+
+#ifdef MATHICGB_DEBUG
+ size_t const col = nextReducers[i].first;
+#endif
+ MATHICGB_ASSERT(col == nextReducers[i].first);
+ MATHICGB_ASSERT(columnHasPivot[col]);
+ MATHICGB_ASSERT(dense[row].colCount() == colCount);
+ MATHICGB_ASSERT(dense[row][col] == 1); // already normalized
+ MATHICGB_ASSERT(reduced.rowCount() == i);
+ MATHICGB_ASSERT(!isPivotRow[row]);
+
+ dense[row].appendTo(reduced); // already nornamlized
+ isPivotRow[row] = true;
+ }
+ nextReducers.clear();
+ }
+
+#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
+ for (size_t row = 0; row < rowCount; ++row)
+ dense[row].takeModulus(modulus);
+
+ toReduce.clear(colCount);
+ for (size_t row = 0; row < rowCount; ++row)
+ dense[row].appendTo(toReduce);
+}
+
+void myReduceToEchelonForm4
+(SparseMatrix& toReduce,
+ SparseMatrix::Scalar modulus,
+ SparseMatrix& reduced,
+ size_t threadCount) {
+ // making no assumptions on toReduce.
+
+ size_t const colCount = toReduce.colCount();
+ reduced.clear(colCount);
+ MATHICGB_ASSERT(reduced.colCount() == colCount);
+
+ size_t const noReducer = static_cast<size_t>(-1);
+ std::vector<size_t> reducer(colCount);
+ std::fill(reducer.begin(), reducer.end(), noReducer);
+
+ std::vector<size_t> reduceThese;
+ std::vector<size_t> lookAtThese;
+
+ SparseMatrix::RowIndex rowCount = toReduce.rowCount();
+ std::vector<DenseRow<uint64> > dense(rowCount);
+ for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
+ dense[row].reset(colCount);
+ dense[row].addRow(toReduce, row);
+ }
+
+ std::vector<size_t> leadCols(colCount);
+
+ std::vector<size_t> newReducers;
+
+ for (size_t i = 0; i < rowCount; ++i)
+ lookAtThese.push_back(i);
+
+ while (true) {
+ size_t const lookAtTheseCount = lookAtThese.size();
+ //for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
+ for (size_t i = 0; i < lookAtTheseCount; ++i) {
+ size_t const row = lookAtThese[i];
+ MATHICGB_ASSERT(!dense[row].empty());
+ for (size_t col = leadCols[row]; col < colCount; ++col) {
+ if (dense[row][col] == 0)
+ continue;
+ dense[row][col] %= modulus;
+ if (dense[row][col] == 0)
+ continue;
+ if (reducer[col] == noReducer) {
+ reducer[col] = row; //reduced.rowCount();
+ newReducers.push_back(col);
+ MATHICGB_ASSERT(reduced.colCount() == colCount);
+ MATHICGB_ASSERT(dense[row].colCount() == colCount);
+ dense[row].normalize(modulus, col);
+ MATHICGB_ASSERT(dense[row][col] == 1);
+ dense[row].appendTo(reduced);
+ MATHICGB_ASSERT(reduced.rowBegin(reduced.rowCount()-1).scalar() == 1);
+ MATHICGB_ASSERT(dense[reducer[col]][col] == 1);
+ } else {
+ leadCols[row] = col;
+ reduceThese.push_back(row);
+ }
+ break;
+ }
+ }
+ std::sort(newReducers.begin(), newReducers.end());
+ size_t const newReducerCount = newReducers.size();
+
+ size_t const reduceCount = reduceThese.size();
+ if (reduceCount == 0)
+ break;
+ //std::cout << "reducing " << reduceCount << " out of " << toReduce.rowCount() << std::;
+#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
+ for (size_t i = 0; i < reduceCount; ++i) {
+ //std::cout << omp_get_thread_num();
+ size_t row = reduceThese[i];
+ DenseRow<uint64>& denseRow = dense[row];
+ for (size_t i = 0; i < newReducerCount; ++i) {
+ size_t col = newReducers[i];
+ if (denseRow[col] == 0)
+ continue;
+ if (reducer[col] == noReducer)
+ continue;
+ MATHICGB_ASSERT(dense[reducer[col]][col] == 1);
+ denseRow.rowReduceByUnitary(dense[reducer[col]], col, modulus);
+ }
+ }
+ //std::cout << "done reducing that batch" << std::endl;
+ newReducers.clear();
+ lookAtThese.swap(reduceThese);
+ reduceThese.clear();
+ }
+}
+
+void myReduceToEchelonForm3
+(SparseMatrix& toReduce,
+ SparseMatrix::Scalar modulus,
+ SparseMatrix& reduced,
+ size_t threadCount) {
+ // making no assumptions on toReduce.
+
+ size_t const colCount = toReduce.colCount();
+ reduced.clear(colCount);
+ MATHICGB_ASSERT(reduced.colCount() == colCount);
+
+ size_t const noReducer = static_cast<size_t>(-1);
+ std::vector<size_t> reducer(colCount);
+ std::fill(reducer.begin(), reducer.end(), noReducer);
+
+ std::vector<size_t> reduceThese;
+
+ SparseMatrix::RowIndex rowCount = toReduce.rowCount();
+ std::vector<DenseRow<uint64> > dense(rowCount);
+ for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
+ dense[row].reset(colCount);
+ dense[row].addRow(toReduce, row);
+ }
+
+ std::vector<size_t> leadCols(colCount);
+
+ std::vector<size_t> newReducers;
+
+ while (true) {
+ SparseMatrix::RowIndex rowCount = toReduce.rowCount();
+ for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
+ if (dense[row].empty())
+ continue;
+ for (size_t col = 0; col < colCount; ++col) {
+ if (dense[row][col] == 0)
+ continue;
+ dense[row][col] %= modulus;
+ if (dense[row][col] == 0)
+ continue;
+ if (reducer[col] == noReducer) {
+ reducer[col] = reduced.rowCount();
+ newReducers.push_back(col);
+ MATHICGB_ASSERT(reduced.colCount() == colCount);
+ MATHICGB_ASSERT(dense[row].colCount() == colCount);
+ reduced.appendRowWithModulusNormalized(dense[row].mEntries, modulus);
+ dense[row].clear();
+ MATHICGB_ASSERT(reduced.rowBegin(reduced.rowCount()-1).scalar() == 1);
+ } else {
+ leadCols[row] = col;
+ reduceThese.push_back(row);
+ }
+ break;
+ }
+ }
+ std::sort(newReducers.begin(), newReducers.end());
+ size_t const newReducerCount = newReducers.size();
+
+ size_t const reduceCount = reduceThese.size();
+ if (reduceCount == 0)
+ break;
+ //std::cout << "reducing " << reduceCount << " out of " << toReduce.rowCount() << std::;
+#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
+ for (size_t i = 0; i < reduceCount; ++i) {
+ //std::cout << omp_get_thread_num();
+ size_t row = reduceThese[i];
+ DenseRow<uint64>& denseRow = dense[row];
+ //for (size_t col = leadCols[row]; col < colCount; ++col) {
+ for (size_t i = 0; i < newReducerCount; ++i) {
+ size_t col = newReducers[i];
+ if (denseRow[col] == 0)
+ continue;
+ if (reducer[col] == noReducer)
+ continue;
+ denseRow.rowReduceByUnitary(reducer[col], reduced, modulus);
+ }
+ }
+ //std::cout << "done reducing that batch" << std::endl;
+ reduceThese.clear();
+ newReducers.clear();
+ }
+}
+
+void myReduceToEchelonForm2
+(SparseMatrix& toReduce,
+ SparseMatrix::Scalar modulus,
+ SparseMatrix& reduced,
+ size_t threadCount) {
+ // making no assumptions on toReduce.
+
+ size_t const colCount = toReduce.colCount();
+ reduced.clear(colCount);
+
+ SparseMatrix somewhatReduced;
+ somewhatReduced.ensureAtLeastThisManyColumns(colCount);
+
+ size_t const noReducer = static_cast<size_t>(-1);
+ std::vector<size_t> reducer(colCount);
+ std::fill(reducer.begin(), reducer.end(), noReducer);
+
+
+ std::vector<size_t> reduceThese;
+
+#ifdef _OPENMP
+ std::vector<DenseRow<uint64> > denseRowPerThread(threadCount);
+#else
+ DenseRow<uint64> denseRow;
+#endif
+
+ while (toReduce.rowCount() > 0) {
+ SparseMatrix::RowIndex rowCount = toReduce.rowCount();
+ for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
+ SparseMatrix::RowIterator rowBegin = toReduce.rowBegin(row);
+ SparseMatrix::RowIterator rowEnd = toReduce.rowEnd(row);
+ if (rowBegin == rowEnd)
+ continue;
+ if (reducer[rowBegin.index()] == noReducer) {
+ reducer[rowBegin.index()] = reduced.rowCount();
+ reduced.appendRowAndNormalize(toReduce, row, modulus);
+ MATHICGB_ASSERT(reduced.rowBegin(reduced.rowCount()-1).scalar() == 1);
+ } else
+ reduceThese.push_back(row);
+ }
+
+ size_t const reduceCount = reduceThese.size();
+ // std::cout << "reducing " << reduceCount << " out of " << toReduce.rowCount() << std::endl;
+#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
+ for (size_t i = 0; i < reduceCount; ++i) {
+#ifdef _OPENMP
+ DenseRow<uint64>& denseRow = denseRowPerThread[omp_get_thread_num()];
+#endif
+ size_t row = reduceThese[i];
+ denseRow.reset(colCount);
+ denseRow.addRow(toReduce, row);
+ for (size_t col = 0; col < colCount; ++col) {
+ if (denseRow[col] == 0)
+ continue;
+ if (reducer[col] == noReducer)
+ continue;
+ denseRow.rowReduceByUnitary(reducer[col], reduced, modulus);
+ }
+
+#pragma omp critical
+ {
+ denseRow.appendToWithModulus(somewhatReduced, modulus);
+ MATHICGB_ASSERT(reducer[somewhatReduced.rowBegin(somewhatReduced.rowCount() - 1).index()] == noReducer);
+ }
+ }
+ // std::cout << "done reducing that batch" << std::endl;
+ reduceThese.clear();
+ toReduce.swap(somewhatReduced);
+ somewhatReduced.clear();
+ }
+}
+
+void myReduceToEchelonForm
+(SparseMatrix const& toReduce,
+ SparseMatrix::Scalar modulus,
+ SparseMatrix& reduced) {
+ // making no assumptions on toReduce.
+
+ size_t const colCount = toReduce.colCount();
+ reduced.clear(colCount);
+
+ size_t const noReducer = static_cast<size_t>(-1);
+ std::vector<size_t> reducer(colCount);
+ std::fill(reducer.begin(), reducer.end(), noReducer);
+
+ DenseRow<uint64> denseRow;
+ SparseMatrix::RowIndex rowCount = toReduce.rowCount();
+ for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
+ SparseMatrix::RowIterator rowEnd = toReduce.rowEnd(row);
+ SparseMatrix::RowIterator rowBegin = toReduce.rowBegin(row);
+ if (rowBegin == rowEnd) {
+ // I think we don't need to copy empty rows
+ //reduced.rowDone();
+ continue; // zero row
+ }
+ if (reducer[rowBegin.index()] == noReducer) {
+ reducer[rowBegin.index()] = reduced.rowCount();
+ reduced.appendRowAndNormalize(toReduce, row, modulus);
+ MATHICGB_ASSERT(reduced.rowBegin(reduced.rowCount()-1).scalar() == 1);
+ continue;
+ }
+
+ denseRow.reset(colCount);
+ denseRow.addRow(toReduce, row);
+ for (size_t col = 0; col < colCount; ++col) {
+ //MATHICGB_ASSERT(reducer[col] != noReducer);
+ if (denseRow[col] == 0)
+ continue;
+ if (reducer[col] == noReducer)
+ continue;
+ denseRow.rowReduceByUnitary(reducer[col], reduced, modulus);
+ }
+ reduced.appendRowWithModulusNormalized(denseRow.mEntries, modulus);
+ size_t last = reduced.rowCount() - 1;
+ if (reduced.rowBegin(last) != reduced.rowEnd(last)) {
+ reducer[reduced.rowBegin(last).index()] = last;
+ MATHICGB_ASSERT(reduced.rowBegin(reduced.rowCount()-1).scalar() == 1);
+ }
+ }
+}
+
+class IOException : public std::runtime_error {
+ public:
+ IOException(): std::runtime_error("File error.") {}
+};
+
+template<class T>
+T readOne(FILE* file) {
+ T t;
+ if(fread(&t, sizeof(T), 1, file) != 1)
+ throw IOException();
+ return t;
+}
+
+template<class T>
+void writeOne(const T& t, FILE* file) {
+ if (fwrite(&t, sizeof(T), 1, file) != 1)
+ throw IOException();
+}
+
+template<class T>
+void writeMany(const std::vector<T>& v, FILE* file) {
+ if (v.empty())
+ return;
+ if(fwrite(&v[0], sizeof(T), v.size(), file) != v.size())
+ throw IOException();
+}
+
+template<class T>
+void readMany(FILE* file, size_t count, std::vector<T>& v) {
+ size_t const originalSize = v.size();
+ v.resize(originalSize + count);
+ if(fread(&v[originalSize], sizeof(T), count, file) != count)
+ throw IOException();
+}
+
+// Writes an SparseMatrix
+void writeSparseMatrix
+(const SparseMatrix& matrix, SparseMatrix::Scalar modulus, const std::string& fileName) {
+ const uint32 rowCount = matrix.rowCount();
+ const uint32 colCount = matrix.colCount();
+ const uint64 entryCount = matrix.entryCount();
+
+ FILE* file = fopen(fileName.c_str(), "w");
+ if (file == NULL)
+ throw IOException();
+
+ writeOne<uint32>(rowCount, file);
+ writeOne<uint32>(colCount, file);
+ writeOne<uint32>(modulus, file);
+ writeOne<uint64>(entryCount, file);
+
+ writeMany<uint16>(matrix.entries(), file);
+ writeMany<uint32>(matrix.colIndices(), file);
+
+ std::vector<uint32> sizes;
+ for (size_t row = 0; row < rowCount; ++row)
+ sizes.push_back(matrix.entryCountInRow(row));
+ writeMany<uint32>(sizes, file);
+
+ // todo: don't leak file on exception.
+ fclose(file);
+}
+
+// Reads an SparseMatrix without any fseeks and returns the modulus.
+SparseMatrix::Scalar readSparseMatrix(const std::string& fileName, SparseMatrix& matrix)
+{
+ FILE* file = fopen(fileName.c_str(), "r");
+ if (file == NULL)
+ throw IOException();
+
+ uint32 const rowCount = readOne<uint32>(file);
+ uint32 const colCount = readOne<uint32>(file);
+ uint32 const modulus = readOne<uint32>(file);
+ uint64 const entryCount = readOne<uint64>(file);
+
+ std::vector<uint16> entries;
+ readMany(file, entryCount, entries);
+
+ std::vector<uint32> indices;
+ readMany(file, entryCount, indices);
+
+ std::vector<uint32> sizes;
+ readMany(file, rowCount, sizes);
+
+ matrix.setToAndTakeMemory(indices, entries, sizes, colCount);
+ return modulus;
+}
+
+// doesn't need to be fast.
+int integerLog10(unsigned int val) {
+ int ret = -1;
+ while (val != 0) {
+ val /= 10;
+ ret++;
+ }
+ return ret;
+}
+
+size_t integerPow10(int l) {
+ if (l < 0)
+ return 0;
+ size_t ret = 1;
+ for (; l > 0; --l)
+ ret *= 10;
+ return ret;
+}
+
+void printMap(std::map<int, size_t> const& m, std::ostream& out) {
+ std::map<int, size_t>::const_iterator it = m.begin();
+ std::map<int, size_t>::const_iterator end = m.end();
+ for (; it != end; ++it)
+ out << integerPow10(it->first) << ":\t" << it->second << '\n';
+}
+
+void makeLogHistogram
+(std::vector<size_t> numbers,
+ std::map<int, size_t>& logHis) {
+ logHis.clear();
+ std::vector<size_t>::const_iterator it = numbers.begin();
+ std::vector<size_t>::const_iterator end = numbers.end();
+ for (; it != end; ++it)
+ ++logHis[integerLog10(*it)];
+}
+
+void computeDensities
+(SparseMatrix const& matrix,
+ std::vector<size_t>& rowDensity,
+ std::vector<size_t>& colDensity) {
+ rowDensity.clear();
+ rowDensity.resize(matrix.rowCount());
+ colDensity.clear();
+ colDensity.resize(matrix.colCount());
+
+ size_t const rowCount = matrix.rowCount();
+ for (size_t row = 0; row < rowCount; ++row) {
+ MATHICGB_ASSERT(row < rowDensity.size());
+ SparseMatrix::RowIterator it = matrix.rowBegin(row);
+ SparseMatrix::RowIterator end = matrix.rowEnd(row);
+ for (; it != end; ++it) {
+ MATHICGB_ASSERT(it.index() < colDensity.size());
+ MATHICGB_ASSERT(it.scalar() != 0);
+ ++rowDensity[row];
+ ++colDensity[it.index()];
+ }
+ }
+}
+
+
+void printLogDensityHistograms
+(SparseMatrix const& matrix, std::ostream& out, const char* name = 0) {
+ std::vector<size_t> rowDensities;
+ std::vector<size_t> colDensities;
+ computeDensities(matrix, rowDensities, colDensities);
+
+ if (name != 0)
+ out << "/********** Density histogram (" << name << ") **********\n";
+ else
+ out << "/********** Density histogram ***********\n";
+
+ out << "Matrix has " << matrix.rowCount()
+ << " rows. log2(rows) = " << integerLog10(matrix.rowCount()) << '\n'
+ << "Matrix has " << matrix.colCount()
+ << " cols. log2(cols) = " << integerLog10(matrix.colCount()) << '\n';
+
+ out << "\nLog row density histogram:\ndens\trows\n";
+ std::map<int, size_t> rowLogHis;
+ makeLogHistogram(rowDensities, rowLogHis);
+ printMap(rowLogHis, out);
+
+ out << "\nLog col density histogram:\ndens\tcol\n";
+ std::map<int, size_t> colLogHis;
+ makeLogHistogram(colDensities, colLogHis);
+ printMap(colLogHis, out);
+
+ out << "\\****************************************\n";
+}
+
+// Takes the pivot rows from matrix and copies them into pivots. The
+// remaining rows go into nonPivots. Also reorders columns so that the
+// left part of pivots is upper triangular.
+// Also sorts rows of nonPivots to be densest first.
+// Ignores zero rows.
+void spliceMatrix(const SparseMatrix& matrix, SparseMatrix& pivots, SparseMatrix& nonPivots) {
+ const SparseMatrix::RowIndex rowCount = matrix.rowCount();
+ const SparseMatrix::ColIndex colCount = matrix.colCount();
+
+ static const SparseMatrix::RowIndex noPivot =
+ std::numeric_limits<SparseMatrix::RowIndex>::max();
+ std::vector<SparseMatrix::RowIndex> pivotRowOfCol(colCount);
+ std::fill(pivotRowOfCol.begin(), pivotRowOfCol.end(), noPivot);
+
+ // determine pivot rows and columns
+ for (size_t row = 0; row < rowCount; ++row) {
+ const SparseMatrix::ColIndex entryCount = matrix.entryCountInRow(row);
+ if (entryCount == 0)
+ continue; // ignore zero rows
+ const SparseMatrix::ColIndex pivotCol = matrix.leadCol(row);
+ SparseMatrix::RowIndex& pivotRow = pivotRowOfCol[pivotCol];
+ if (pivotRow == noPivot || entryCount < matrix.entryCountInRow(pivotRow))
+ pivotRow = row; // prefer sparse pivots
+ }
+
+ // permutation of columns to put pivots left without reordering
+ // columns in any other way.
+ std::vector<SparseMatrix::ColIndex> colPerm(colCount);
+ SparseMatrix::RowIndex columnsDecided = 0;
+
+ // choice of rows to make left of pivots matrix upper triangular
+ std::vector<SparseMatrix::RowIndex> pivotRows;
+
+ // Go through pivot columns to compute perm and pivotRows
+ for (size_t col = 0; col < colCount; ++col) {
+ if (pivotRowOfCol[col] != noPivot) {
+ colPerm[col] = columnsDecided; // pivot columns first
+ ++columnsDecided;
+ pivotRows.push_back(pivotRowOfCol[col]);
+ }
+ }
+ SparseMatrix::RowIndex minNonPivotCol = columnsDecided;
+
+ for (size_t col = 0; col < colCount; ++col) {
+ if (pivotRowOfCol[col] == noPivot) {
+ colPerm[col] = columnsDecided; // non-pivot columns last
+ ++columnsDecided;
+ }
+ }
+ MATHICGB_ASSERT(columnsDecided == colCount);
+
+ // choice of rows to make pivots matrix sorted by decreasing density.
+ std::vector<SparseMatrix::RowIndex> nonPivotRows;
+
+ // put density first in a pair to make sorting easy.
+ // TODO: use sorting object instead of making pairs like this.
+ std::vector<std::pair<SparseMatrix::ColIndex, SparseMatrix::RowIndex> > nonPivotData;
+ for (size_t row = 0; row < rowCount; ++row) {
+ const SparseMatrix::ColIndex entryCount = matrix.entryCountInRow(row);
+ if (entryCount == 0)
+ continue; // ignore zero rows
+ if (row != pivotRowOfCol[matrix.leadCol(row)])
+ nonPivotData.push_back(std::make_pair(entryCount, row));
+ }
+ std::sort(nonPivotRows.rbegin(), nonPivotRows.rend());
+ for (size_t i = 0; i < nonPivotData.size(); ++i)
+ nonPivotRows.push_back(nonPivotData[i].second);
+
+ // create matrices
+ struct LocalFunction {
+ void makeMatrix(const SparseMatrix& matrix,
+ const std::vector<SparseMatrix::RowIndex>& rowIndices,
+ const std::vector<SparseMatrix::ColIndex>& colPerm,
+ const SparseMatrix::ColIndex minNonPivotCol,
+ SparseMatrix& out) {
+ typedef std::vector<SparseMatrix::RowIndex>::const_iterator Iter;
+ Iter end = rowIndices.end();
+
+ // reserve space
+ out.clear(matrix.colCount());
+ size_t entryCount = 0;
+ for (Iter it = rowIndices.begin(); it != end; ++it)
+ entryCount += matrix.entryCountInRow(*it);
+ out.reserveEntries(entryCount);
+ out.reserveRows(rowIndices.size());
+
+ for (Iter it = rowIndices.begin(); it != end; ++it) {
+ // Do two passes to avoid having to sort indices. They will
+ // be in increasing order in this way.
+ SparseMatrix::RowIterator begin = matrix.rowBegin(*it);
+ SparseMatrix::RowIterator end = matrix.rowEnd(*it);
+ for (SparseMatrix::RowIterator it = begin; it != end; ++it)
+ if (colPerm[it.index()] < minNonPivotCol) // pivot columns first
+ out.appendEntry(colPerm[it.index()], it.scalar());
+ for (SparseMatrix::RowIterator it = begin; it != end; ++it)
+ if (colPerm[it.index()] >= minNonPivotCol) // then non-pivot columns
+ out.appendEntry(colPerm[it.index()], it.scalar());
+ out.rowDone();
+ }
+ }
+ } f; // static function on local structs not allowed :-(
+ f.makeMatrix(matrix, pivotRows, colPerm, minNonPivotCol, pivots);
+ f.makeMatrix(matrix, nonPivotRows, colPerm, minNonPivotCol, nonPivots);
+}
+
+void concatenateMatricesHorizontal
+(const SparseMatrix& a, const SparseMatrix& b, SparseMatrix& concatenation) {
+ MATHICGB_ASSERT(a.rowCount() == b.rowCount());
+ // todo: check overflow of colcount type
+ const SparseMatrix::ColIndex bOffset = a.colCount();
+ concatenation.clear(a.colCount() + b.colCount());
+ if (concatenation.colCount() < a.colCount()) {
+ MATHICGB_ASSERT(false);
+ throw std::overflow_error
+ ("Too many columns in matrices being concatenated.");
+ }
+
+ const SparseMatrix::ColIndex colBOffset = a.colCount();
+ const SparseMatrix::RowIndex rowCount = a.rowCount();
+ for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
+ {
+ auto end = a.rowEnd(row);
+ for (auto it = a.rowBegin(row); it != end; ++it)
+ concatenation.appendEntry(it.index(), it.scalar());
+ }
+ {
+ auto end = b.rowEnd(row);
+ for (auto it = b.rowBegin(row); it != end; ++it)
+ concatenation.appendEntry(it.index() + colBOffset, it.scalar());
+ }
+ concatenation.rowDone();
+ }
+}
+
+void F4MatrixReducer::reduce
+(const PolyRing& ring, QuadMatrix& matrix, SparseMatrix& newPivots) {
+ if (ring.charac() > std::numeric_limits<SparseMatrix::Scalar>::max())
+ throw std::overflow_error("Too large modulus in F4 matrix computation.");
+
+ const int threadCount = 1;
+ SparseMatrix::Scalar modulus = ring.charac();
+
+ SparseMatrix reducedD;
+ {
+ const SparseMatrix::ColIndex pivotColCount = matrix.topLeft.colCount();
+
+ SparseMatrix matrixAB;
+ {
+ SparseMatrix tmp;
+ concatenateMatricesHorizontal(matrix.topLeft, matrix.topRight, tmp);
+ sortRowsByIncreasingPivots(tmp, matrixAB);
+ }
+
+ SparseMatrix matrixCD;
+ concatenateMatricesHorizontal
+ (matrix.bottomLeft, matrix.bottomRight, matrixCD);
+
+ myReduce(matrixCD, matrixAB, modulus, reducedD, threadCount);
+ reducedD.trimLeadingZeroColumns(pivotColCount);
+ }
+
+ myReduceToEchelonForm5(reducedD, modulus, threadCount);
+ sortRowsByIncreasingPivots(reducedD, newPivots);
+}
diff --git a/src/mathicgb/F4MatrixReducer.hpp b/src/mathicgb/F4MatrixReducer.hpp
new file mode 100755
index 0000000..17a4771
--- /dev/null
+++ b/src/mathicgb/F4MatrixReducer.hpp
@@ -0,0 +1,18 @@
+#ifndef F4_MATRIX_REDUCER_GUARD
+#define F4_MATRIX_REDUCER_GUARD
+
+class QuadMatrix;
+class SparseMatrix;
+class PolyRing;
+
+/** Class that reduces an F4 matrix represented as a QuadMatrix. The
+ answer you get is the submatrix that contains new pivots. */
+class F4MatrixReducer {
+public:
+ void reduce
+ (const PolyRing& ring, QuadMatrix& matrix, SparseMatrix& newPivots);
+
+private:
+};
+
+#endif
diff --git a/src/mathicgb/F4Reducer.cpp b/src/mathicgb/F4Reducer.cpp
index 008d768..19e5188 100755
--- a/src/mathicgb/F4Reducer.cpp
+++ b/src/mathicgb/F4Reducer.cpp
@@ -2,6 +2,7 @@
#include "F4Reducer.hpp"
#include "F4MatrixBuilder.hpp"
+#include "F4MatrixReducer.hpp"
F4Reducer::F4Reducer(const PolyRing& ring, std::unique_ptr<Reducer> fallback):
mFallback(std::move(fallback)), mRing(ring) {
@@ -41,6 +42,12 @@ std::unique_ptr<Poly> F4Reducer::classicReduceSPoly
builder.buildMatrixAndClear(qm);
}
+ SparseMatrix reduced;
+ {
+ F4MatrixReducer red;
+ red.reduce(basis.ring(), qm, reduced);
+ }
+
return p;
}
diff --git a/src/mathicgb/PolyRing.hpp b/src/mathicgb/PolyRing.hpp
index 5c6a471..803dbd2 100755
--- a/src/mathicgb/PolyRing.hpp
+++ b/src/mathicgb/PolyRing.hpp
@@ -14,6 +14,7 @@
#define EQ 0
#define GT 1
+/** Returns a^-1 mod modulus. It is required that 0 < a < modulus. */
template<class T>
T modularInverse(T a, T modulus) {
// we do two turns of the extended Euclidian algorithm per
@@ -21,7 +22,7 @@ T modularInverse(T a, T modulus) {
// but we avoid that by representing every other x as its negative,
// which is the value minusLastX. This way no negative values show
// up.
- MATHICGB_ASSERT(a != 0);
+ MATHICGB_ASSERT(0 < a);
MATHICGB_ASSERT(a < modulus);
#ifdef MATHICGB_DEBUG
T origA = a;
@@ -36,7 +37,7 @@ T modularInverse(T a, T modulus) {
// first turn
if (a == 1)
break;
- T const firstQuotient = b / a;
+ const T firstQuotient = b / a;
b -= firstQuotient * a;
minusLastX += firstQuotient * x;
@@ -47,7 +48,7 @@ T modularInverse(T a, T modulus) {
x = modulus - minusLastX;
break;
}
- T const secondQuotient = a / b;
+ const T secondQuotient = a / b;
a -= secondQuotient * b;
x += secondQuotient * minusLastX;
}
@@ -57,6 +58,43 @@ T modularInverse(T a, T modulus) {
return x;
}
+template<class T>
+struct ModularProdType {};
+template<> struct ModularProdType<uint8> {typedef uint16 type;};
+template<> struct ModularProdType<uint16> {typedef uint32 type;};
+template<> struct ModularProdType<uint32> {typedef uint64 type;};
+template<> struct ModularProdType<int8> {typedef int16 type;};
+template<> struct ModularProdType<int16> {typedef int32 type;};
+template<> struct ModularProdType<int32> {typedef int64 type;};
+
+/** Returns a*b mod modulus. It is required that 0 <= a, b < modulus. */
+template<class T, class BigT = typename ModularProdType<T>::type>
+T modularProduct(T a, T b, T modulus) {
+ MATHICGB_ASSERT(0 <= a);
+ MATHICGB_ASSERT(a < modulus);
+ MATHICGB_ASSERT(0 <= b);
+ MATHICGB_ASSERT(b < modulus);
+ BigT bigProd = static_cast<BigT>(a) * b;
+ MATHICGB_ASSERT(a == 0 || bigProd / a == b);
+ return static_cast<T>(bigProd % modulus);
+}
+
+/** Returns -a mod modulus. It is required that 0 <= a < modulus. */
+template<class T>
+T modularNegative(T a, T modulus) {
+ MATHICGB_ASSERT(0 <= a);
+ MATHICGB_ASSERT(a < modulus);
+ return a == 0 ? 0 : modulus - a;
+}
+
+/** Returns -a mod modulus. It is required that 0 < a < modulus. */
+template<class T>
+T modularNegativeNonZero(T a, T modulus) {
+ MATHICGB_ASSERT(0 < a);
+ MATHICGB_ASSERT(a < modulus);
+ return modulus - a;
+}
+
typedef int exponent ;
typedef long coefficient;
typedef exponent *vecmonomial; // includes a component
diff --git a/src/mathicgb/SparseMatrix.cpp b/src/mathicgb/SparseMatrix.cpp
index 1fac2f2..d60b8aa 100755
--- a/src/mathicgb/SparseMatrix.cpp
+++ b/src/mathicgb/SparseMatrix.cpp
@@ -1,3 +1,7 @@
#include "stdinc.h"
#include "SparseMatrix.hpp"
+std::ostream& operator<<(std::ostream& out, SparseMatrix& matrix) {
+ matrix.print(out);
+ return out;
+}
diff --git a/src/mathicgb/SparseMatrix.hpp b/src/mathicgb/SparseMatrix.hpp
index 808c266..69f1963 100755
--- a/src/mathicgb/SparseMatrix.hpp
+++ b/src/mathicgb/SparseMatrix.hpp
@@ -369,4 +369,6 @@ private:
ColIndex mColCount;
};
+std::ostream& operator<<(std::ostream& out, SparseMatrix& matrix);
+
#endif
diff --git a/src/mathicgb/stdinc.h b/src/mathicgb/stdinc.h
index dc4333d..6189c7c 100755
--- a/src/mathicgb/stdinc.h
+++ b/src/mathicgb/stdinc.h
@@ -1,7 +1,7 @@
-#ifdef STDINC_GUARD
+#ifdef MATHICGB_STDINC_GUARD
#error stdinc.h included twice. Only include stdinc.h once per cpp file.
#endif
-#define STDINC_GUARD
+#define MATHICGB_STDINC_GUARD
#ifdef _MSC_VER // For Microsoft Compiler in Visual Studio C++.
#pragma warning (push, 1) // Reduce warning level for GMP headers.
@@ -68,6 +68,11 @@ typedef unsigned int uint32;
typedef unsigned short uint16;
typedef unsigned char uint8;
+typedef signed long long int64;
+typedef signed int int32;
+typedef signed short int16;
+typedef signed char int8;
+
static const size_t BitsPerByte = 8;
static const size_t MemoryAlignment = sizeof(void*);
diff --git a/src/test/F4MatrixBuilder.cpp b/src/test/F4MatrixBuilder.cpp
index d9b0630..d475592 100755
--- a/src/test/F4MatrixBuilder.cpp
+++ b/src/test/F4MatrixBuilder.cpp
@@ -7,6 +7,7 @@
#include "mathicgb/Ideal.hpp"
#include "mathicgb/PolyBasis.hpp"
#include "mathicgb/io-util.hpp"
+
#include <gtest/gtest.h>
#include <memory>
diff --git a/src/test/F4MatrixReducer.cpp b/src/test/F4MatrixReducer.cpp
new file mode 100755
index 0000000..47251ad
--- /dev/null
+++ b/src/test/F4MatrixReducer.cpp
@@ -0,0 +1,96 @@
+#include "mathicgb/stdinc.h"
+
+#include "mathicgb/F4MatrixReducer.hpp"
+#include "mathicgb/SparseMatrix.hpp"
+#include "mathicgb/QuadMatrix.hpp"
+#include "mathicgb/io-util.hpp"
+#include "mathicgb/Poly.hpp"
+#include <gtest/gtest.h>
+#include <sstream>
+
+TEST(F4MatrixReducer, Reduce) {
+ auto ring = ringFromString("101 6 1\n1 1 1 1 1 1");
+
+ QuadMatrix m;
+ m.ring = ring.get();
+
+ Poly p(ring.get());
+ std::istringstream in("a1+a2+a3+a4+b1+b2+b3+b4+b5");
+ p.parse(in);
+ size_t count = 0;
+ for (Poly::iterator it = p.begin(); it != p.end(); ++it) {
+ monomial mono = it.getMonomial();
+ if (count < 4)
+ m.leftColumnMonomials.push_back(mono);
+ else
+ m.rightColumnMonomials.push_back(mono);
+ ++count;
+ }
+
+ // top left
+ m.topLeft.clear(4);
+ m.topLeft.appendEntry(0, 1);
+ m.topLeft.appendEntry(1, 2);
+ m.topLeft.appendEntry(3, 3);
+ m.topLeft.rowDone();
+ m.topLeft.appendEntry(1, 1);
+ m.topLeft.appendEntry(2, 3);
+ m.topLeft.rowDone();
+ m.topLeft.appendEntry(2, 1);
+ m.topLeft.appendEntry(3, 7);
+ m.topLeft.rowDone();
+ m.topLeft.appendEntry(3, 1);
+ m.topLeft.rowDone();
+
+ // top right
+ m.topRight.clear(5);
+ m.topRight.appendEntry(2,8);
+ m.topRight.rowDone();
+ m.topRight.appendEntry(3,9);
+ m.topRight.rowDone();
+ m.topRight.appendEntry(4,10);
+ m.topRight.rowDone();
+ m.topRight.rowDone();
+
+ // bottom left
+ m.bottomLeft.clear(4);
+ m.bottomLeft.appendEntry(0, 1);
+ m.bottomLeft.appendEntry(1, 1);
+ m.bottomLeft.appendEntry(3, 24);
+ m.bottomLeft.rowDone();
+ m.bottomLeft.appendEntry(1, 9);
+ m.bottomLeft.rowDone();
+
+ // bottom right
+ m.bottomRight.clear(5);
+ m.bottomRight.appendEntry(0, 1);
+ m.bottomRight.appendEntry(2, 12);
+ m.bottomRight.appendEntry(3, 13);
+ m.bottomRight.appendEntry(4, 41);
+ m.bottomRight.rowDone();
+ m.bottomRight.appendEntry(1, 2);
+ m.bottomRight.appendEntry(3, 11);
+ m.bottomRight.rowDone();
+
+ MATHICGB_ASSERT(m.debugAssertValid());
+ const char* origStr =
+ "Left columns: a a2 a3 a4\n"
+ "Right columns: b b2 b3 b4 b5\n"
+ "0: 0#1 1#2 3#3 | 0: 2#8 \n"
+ "1: 1#1 2#3 | 1: 3#9 \n"
+ "2: 2#1 3#7 | 2: 4#10 \n"
+ "3: 3#1 | 3: \n"
+ " | \n"
+ "0: 0#1 1#1 3#24 | 0: 0#1 2#12 3#13 4#41\n"
+ "1: 1#9 | 1: 1#2 3#11 \n";
+ ASSERT_EQ(origStr, m.toString());
+
+ SparseMatrix reduced;
+ F4MatrixReducer red;
+ red.reduce(*ring, m, reduced);
+
+ const char* redStr =
+ "0: 0#1 2#4 3#22 4#11\n"
+ "1: 1#1 3#66 4#34\n";
+ ASSERT_EQ(redStr, reduced.toString());
+}
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/mathicgb.git
More information about the debian-science-commits
mailing list