[mathicgb] 125/393: merge
Doug Torrance
dtorrance-guest at moszumanska.debian.org
Fri Apr 3 15:58:46 UTC 2015
This is an automated email from the git hooks/post-receive script.
dtorrance-guest pushed a commit to branch upstream
in repository mathicgb.
commit b7e4378c6ef19f628ae28e66f4893a49b1ddde45
Merge: 1f656d0 a382a51
Author: Sharwan Kumar Tiwari <stiwari at mumin.mathematik.uni-kl.de>
Date: Thu Dec 13 18:37:06 2012 +0100
merge
.gitignore | 3 +
Makefile.am | 18 +-
build/vs12/mathicgb-exe/mathicgb-exe.vcxproj | 84 +--
.../vs12/mathicgb-exe/mathicgb-exe.vcxproj.filters | 38 ++
build/vs12/mathicgb-lib/mathicgb-lib.vcxproj | 100 +++-
.../vs12/mathicgb-lib/mathicgb-lib.vcxproj.filters | 21 +
build/vs12/mathicgb-test/mathicgb-test.vcxproj | 70 ++-
build/vs12/mathicgb.sln | 3 -
configure.ac | 2 +-
src/cli/CommonParams.cpp | 115 +++++
src/cli/CommonParams.hpp | 51 ++
src/cli/GBAction.cpp | 138 +++++
src/cli/GBAction.hpp | 38 ++
src/cli/GBCommonParams.cpp | 83 +++
src/cli/GBCommonParams.hpp | 23 +
src/cli/GBMain.cpp | 363 +------------
src/cli/HelpAction.cpp | 20 +
src/cli/HelpAction.hpp | 11 +
src/cli/MatrixAction.cpp | 129 +++++
src/cli/MatrixAction.hpp | 31 ++
src/cli/SigGBAction.cpp | 127 +++++
src/cli/SigGBAction.hpp | 36 ++
src/mathicgb/Atomic.hpp | 389 ++++++++++++++
src/mathicgb/BjarkeGeobucket2.cpp | 10 +-
src/mathicgb/BuchbergerAlg.cpp | 30 +-
src/mathicgb/BuchbergerAlg.hpp | 15 +-
src/mathicgb/CFile.cpp | 28 +
src/mathicgb/CFile.hpp | 37 ++
src/mathicgb/F4MatrixBuilder.cpp | 528 +++++++++----------
src/mathicgb/F4MatrixBuilder.hpp | 80 ++-
src/mathicgb/F4MatrixReducer.cpp | 273 +++++-----
src/mathicgb/F4MatrixReducer.hpp | 31 +-
src/mathicgb/F4Reducer.cpp | 139 ++---
src/mathicgb/F4Reducer.hpp | 48 +-
src/mathicgb/FixedSizeMonomialMap.h | 370 ++++++++++++++
src/mathicgb/LogDomain.cpp | 21 +
src/mathicgb/LogDomain.hpp | 99 ++++
src/mathicgb/LogDomainSet.hpp | 39 ++
src/mathicgb/MonomialMap.hpp | 539 +++++++-------------
src/mathicgb/PolyRing.cpp | 55 +-
src/mathicgb/PolyRing.hpp | 74 ++-
src/mathicgb/QuadMatrix.cpp | 561 +++++++++++++--------
src/mathicgb/QuadMatrix.hpp | 22 +
src/mathicgb/QuadMatrixBuilder.cpp | 286 ++---------
src/mathicgb/QuadMatrixBuilder.hpp | 138 +----
src/mathicgb/Reducer.cpp | 11 +-
src/mathicgb/Reducer.hpp | 10 +-
src/mathicgb/SparseMatrix.cpp | 182 ++++++-
src/mathicgb/SparseMatrix.hpp | 39 +-
src/mathicgb/TypicalReducer.cpp | 4 -
src/mathicgb/TypicalReducer.hpp | 2 -
src/mathicgb/stdinc.h | 93 +++-
src/test/F4MatrixBuilder.cpp | 24 +-
src/test/F4MatrixReducer.cpp | 11 +-
src/test/QuadMatrixBuilder.cpp | 118 ++---
src/test/SparseMatrix.cpp | 14 +-
src/test/gb-test.cpp | 10 +-
src/test/poly-test.cpp | 2 +-
58 files changed, 3650 insertions(+), 2186 deletions(-)
diff --cc src/mathicgb/F4MatrixReducer.cpp
index 0d7b385,41c33a9..019bd00
mode 100644,100755..100644
--- a/src/mathicgb/F4MatrixReducer.cpp
+++ b/src/mathicgb/F4MatrixReducer.cpp
@@@ -404,220 -375,23 +375,228 @@@ namespace
}
}
+// todo: use auto instead of these typedefs where possible/reasonable
+// todo: do SparseMatrix::Scalar instead of Scalar and remove this typedef :: DONE
+//typedef SparseMatrix::Scalar Scalar; :: DONE
+//typedef SparseMatrix::RowIndex RowIndex; // todo: same :: DONE
+//typedef SparseMatrix::ColIndex ColIndex; // todo: same :: DONE
+
+//Scalar modPrime = 11; // todo: remove this variable :: DONE
+
+const SharwanMatrix m1;
+int x = m1[2,3];
+
+SharwanMatrix m2;
+m2[2,3] = 5;
+
+class SharwanMatrix {
+public:
+// typedefs for scalar, row index and col index
+// typedef Row to be your representation of a row
+
+ const Scalar& operator[](RowIndex row, ColIndex col) const {return mMatrix[row][col];}
+ Scalar& operator[](RowIndex row, ColIndex col) {return mMatrix[row][col];}
+
+ Row& operator[](RowIndex) {}
+ const Row& operator[](RowIndex) const {}
+
+ // example of setter. Do not make a setter for modulus, row index or col index. No setters, except for entries of the matrix.
+ void setX(int value) {mX = value;}
+
+
+// store matrix, modulus, rowCount and colCount
+// accessor for getting modulus: modulus()
+private:
+ int mX; // todo: remove, just example
+ // all member variables go here. member x is written mX.
+};
+
+void addRowMultipleInplace(
+ std::vector<std::vector<SparseMatrix::Scalar> >& matrix,
+ const SparseMatrix::RowIndex addRow,
+ const SparseMatrix::Scalar multiple,
+ const SparseMatrix::RowIndex row,
+ const SparseMatrix::ColIndex leadingCol,
+ const SparseMatrix::ColIndex colCount,
+ const SparseMatrix::Scalar modulus
+) {
+ assert(addRow < matrix.size());
+ assert(row < matrix.size());
+ assert(row != addRow);
+ assert(leadingCol < colCount);
+ assert(matrix[row].size() == colCount);
+ assert(matrix[addRow].size() == colCount);
+ for(auto col = leadingCol; col < colCount; ++col){
+ const auto product = modularProduct
+ (multiple, matrix[addRow][col], modulus);
+ matrix[row][col] = modularSum(matrix[row][col], product, modulus);
+ }
+}
+
+void makeRowUnitary(
+ std::vector<std::vector<SparseMatrix::Scalar>>& matrix,
+ const SparseMatrix::RowIndex row,
+ const SparseMatrix::ColIndex colCount,
+ const SparseMatrix::ColIndex leadingCol,
+ const SparseMatrix::Scalar modulus
+) {
+ assert(row<matrix.size());
+ assert(matrix[row].size() == colCount);
+ assert(leadingCol < colCount);
+ assert(modulus> 1);
+ const auto leadingScalar = matrix[row][leadingCol];
+ assert(leadingScalar != 0);
+ auto multiply = modularInverse(leadingScalar, modulus);
+ for(SparseMatrix::ColIndex col = leadingCol; col < colCount; ++col)
+ matrix[row][col] = modularProduct(matrix[row][col], multiply, modulus);
+
+ // todo: use modularProduct on above line ::DONE
+}
+
+// todo: make this take a parameter startAtCol ::DONE
+SparseMatrix::ColIndex leadingColumn(
+ const std::vector<std::vector<SparseMatrix::Scalar>>& matrix,
+ const SparseMatrix::RowIndex row,
+ const SparseMatrix::ColIndex colCount,
+ SparseMatrix::ColIndex startAtCol
+) {
+ assert(row < matrix.size());
+ assert(matrix[row].size() == colCount);
+ for(auto col = startAtCol; col < colCount; ++col){
+ if(matrix[row][col] != 0)
+ return col;
+ }
+ return colCount;
+}
+
+void rowReducedEchelonMatrix(
+ std::vector<std::vector<SparseMatrix::Scalar> >& matrix,
+ const SparseMatrix::ColIndex colCount,
+ const SparseMatrix::Scalar modulus
+) {
+ assert(matrix.empty() || matrix[0].size() == colCount);
+ const auto rowCount=matrix.size();
+ // pivotRowOfCol[i] is the pivot in column i or rowCount
+ // if we have not identified such a pivot so far.
+ std::vector<SparseMatrix::Scalar> pivotRowOfCol(colCount, rowCount);
+ for(SparseMatrix::RowIndex row=0; row<rowCount;++row){
+ SparseMatrix::ColIndex leadingCol = 0;
+ while (true) { // reduce row by previous pivots
+ leadingCol = leadingColumn(matrix, row, colCount, leadingCol);
+ if(leadingCol==colCount)
+ break; // row was zero
+ const auto pivotRow = pivotRowOfCol[leadingCol];
+ if(pivotRow == rowCount) {
+ makeRowUnitary(matrix, row, colCount, leadingCol, modulus);
+ pivotRowOfCol[leadingCol] = row;
+ break; // row is now a pivot
+ }
+ const auto multiple = modularNegative(matrix[row][leadingCol], modulus);
+ addRowMultipleInplace
+ (matrix, pivotRow, multiple, row, leadingCol, colCount, modulus);
+ }
+ }
+}
+
+void reduceToEchelonFormShrawan
+ (SparseMatrix& toReduce, SparseMatrix::Scalar modulus, int threadCount)
+{
+ const SparseMatrix::RowIndex rowCount = toReduce.rowCount();
+ const SparseMatrix::ColIndex colCount = toReduce.colCount();
+
+ // Convert input matrix to dense format
+ std::vector<std::vector<SparseMatrix::Scalar>> matrix(rowCount);
+ for (SparseMatrix::RowIndex row; row < rowCount; ++row) {
+ MATHICGB_ASSERT(!toReduce.emptyRow(row));
+ matrix[row].resize(colCount);
+ const auto end = toReduce.rowEnd(row);
+ for (auto it = toReduce.rowBegin(row); it != end; ++it) {
+ MATHICGB_ASSERT(it.index() < colCount);
+ matrix[row][it.index()] = it.scalar();
+ }
+ }
+
+ // todo: make modPrime a parameter and rename it to modulus. :: DONE
+ // modPrime = modulus; :: DONE
+ rowReducedEchelonMatrix(matrix, colCount, modulus);
+
+ // convert reduced matrix to SparseMatrix.
+ toReduce.clear(colCount);
+ for (size_t row = 0; row < rowCount; ++row) {
+ bool rowIsZero = true;
+ for (size_t col = 0; col < colCount; ++col) {
+ if (matrix[row][col] != 0) {
+ rowIsZero = false;
+ toReduce.appendEntry(col, matrix[row][col]);
+ }
+ }
+ if (!rowIsZero)
+ toReduce.rowDone();
+ }
+}
+
+void reduceToEchelonFormShrawanDelayedModulus
+ (SparseMatrix& toReduce, SparseMatrix::Scalar modulus, int threadCount)
+{
+ // todo: implement delayed modulus
+ const SparseMatrix::RowIndex rowCount = toReduce.rowCount();
+ const SparseMatrix::ColIndex colCount = toReduce.colCount();
+
+ // Convert input matrix to dense format
+ std::vector<std::vector<SparseMatrix::Scalar>> matrix(rowCount);
+ for (SparseMatrix::RowIndex row; row < rowCount; ++row) {
+ MATHICGB_ASSERT(!toReduce.emptyRow(row));
+ matrix[row].resize(colCount);
+ const auto end = toReduce.rowEnd(row);
+ for (auto it = toReduce.rowBegin(row); it != end; ++it) {
+ MATHICGB_ASSERT(it.index() < colCount);
+ matrix[row][it.index()] = it.scalar();
+ }
+ }
+
+ rowReducedEchelonMatrix(matrix, colCount, modulus);
+
+ // convert reduced matrix to SparseMatrix.
+ toReduce.clear(colCount);
+ for (size_t row = 0; row < rowCount; ++row) {
+ bool rowIsZero = true;
+ for (size_t col = 0; col < colCount; ++col) {
+ if (matrix[row][col] != 0) {
+ rowIsZero = false;
+ toReduce.appendEntry(col, matrix[row][col]);
+ }
+ }
+ if (!rowIsZero)
+ toReduce.rowDone();
+ }
+}
+
- SparseMatrix F4MatrixReducer::reduce(const QuadMatrix& matrix) {
- MATHICGB_ASSERT(mThreadCount >= 1);
+ SparseMatrix F4MatrixReducer::reduceToBottomRight(const QuadMatrix& matrix) {
MATHICGB_ASSERT(matrix.debugAssertValid());
- if (tracingLevel >= 3)
- matrix.printSizes(std::cerr);
+ if (::logs::F4MatrixReduce.enabled())
+ matrix.printSizes(::logs::F4MatrixReduce.stream());
+ return reduce(matrix, mModulus);
+ }
- SparseMatrix newPivots(::reduce(matrix, mModulus, mThreadCount));
+ SparseMatrix F4MatrixReducer::reducedRowEchelonForm(
+ const SparseMatrix& matrix
+ ) {
- return reduceToEchelonForm(matrix, mModulus);
+ const bool useShrawan = true;
+ const bool useDelayedModulus = false;
+ if (useShrawan) {
+ if (useDelayedModulus)
- reduceToEchelonFormShrawanDelayedModulus
- (newPivots, mModulus, mThreadCount);
++ reduceToEchelonFormShrawanDelayedModulus(matrix, mModulus);
+ else
- reduceToEchelonFormShrawan(newPivots, mModulus, mThreadCount);
++ reduceToEchelonFormShrawan(matrix, mModulus);
+ } else
- reduceToEchelonForm(newPivots, mModulus, mThreadCount);
- return std::move(newPivots);
++ reduceToEchelonForm(matrix, mModulus);
++ return std::move(matrix);
+ }
+
+ SparseMatrix F4MatrixReducer::reducedRowEchelonFormBottomRight(
+ const QuadMatrix& matrix
+ ) {
+ return reducedRowEchelonForm(reduceToBottomRight(matrix));
}
namespace {
diff --cc src/mathicgb/stdinc.h
index 6813c7f,4d24cc0..73a5dbd
--- a/src/mathicgb/stdinc.h
+++ b/src/mathicgb/stdinc.h
@@@ -115,6 -144,18 +144,18 @@@
#define MATHICGB_SLOW_ASSERT(X)
#endif
+ /// Concatenates A to B without expanding A and B. This is achieved since
+ /// token pasting (##) defeats macro expansion.
-#define MATHICGB_CONCATENATE(A,B) A##B
-
++#define MATHICGB_CONCATENATE(A,B) A##B
++
+ /// Concatenates A to B after expanding A and B. This is achieved since
+ /// macro parameters are expanded before expanding the macro itself,
+ /// so the token pasting inside MATHICGB_CONCATENATE does not defeat
+ /// expansion of the parameters. So even though this macro just evaluates
+ /// directly to MATHICGB_CONCATENATE(A,B) it does not do the same thing
+ /// as that macro does.
-#define MATHICGB_CONCATENATE_AFTER_EXPANSION(A,B) MATHICGB_CONCATENATE(A,B)
++#define MATHICGB_CONCATENATE_AFTER_EXPANSION(A,B) MATHICGB_CONCATENATE(A,B)
+
#include <utility>
/*
See http://herbsutter.com/gotw/_102/ for a reason to have a
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/mathicgb.git
More information about the debian-science-commits
mailing list