[mathicgb] 54/393: Made std::unordered_map in QuadMatrixBuilder use an Arena for allocation. Fixed MSVC warnings and MSVC OpenMP issue (only allows signed index variables). Fixed a bug for threadCount == 0.
Doug Torrance
dtorrance-guest at moszumanska.debian.org
Fri Apr 3 15:58:32 UTC 2015
This is an automated email from the git hooks/post-receive script.
dtorrance-guest pushed a commit to branch upstream
in repository mathicgb.
commit d03aae2828ac664596fd74868b4c5da8cbde0171
Author: Bjarke Hammersholt Roune <bjarkehr.code at gmail.com>
Date: Thu Oct 11 13:46:33 2012 +0200
Made std::unordered_map in QuadMatrixBuilder use an Arena for allocation. Fixed MSVC warnings and MSVC OpenMP issue (only allows signed index variables). Fixed a bug for threadCount == 0.
---
src/mathicgb/F4MatrixReducer.cpp | 194 ++++++-------------------------------
src/mathicgb/F4MatrixReducer.hpp | 2 +-
src/mathicgb/F4Reducer.cpp | 5 +-
src/mathicgb/FreeModuleOrder.cpp | 9 +-
src/mathicgb/Poly.cpp | 5 +
src/mathicgb/Poly.hpp | 6 +-
src/mathicgb/QuadMatrixBuilder.cpp | 10 +-
src/mathicgb/QuadMatrixBuilder.hpp | 47 ++++++++-
src/mathicgb/SparseMatrix.cpp | 1 +
src/mathicgb/SparseMatrix.hpp | 4 +-
src/test/F4MatrixReducer.cpp | 2 +-
11 files changed, 102 insertions(+), 183 deletions(-)
diff --git a/src/mathicgb/F4MatrixReducer.cpp b/src/mathicgb/F4MatrixReducer.cpp
index 3f9b242..0f5a967 100755
--- a/src/mathicgb/F4MatrixReducer.cpp
+++ b/src/mathicgb/F4MatrixReducer.cpp
@@ -74,7 +74,7 @@ public:
matrix.appendRowWithModulus(mEntries, modulus);
}
- void appendTo(SparseMatrix& matrix, size_t leadCol = 0) {
+ void appendTo(SparseMatrix& matrix, SparseMatrix::ColIndex leadCol = 0) {
matrix.appendRow(mEntries, leadCol);
}
@@ -257,151 +257,6 @@ public:
std::vector<T> mEntries;
};
-template<typename Matrix>
-void reformMatrix(const Matrix& matA, const Matrix& matB, SparseMatrix& matAB) {
- MATHICGB_ASSERT(matA.rowdim() == matB.rowdim());
-
- matAB.clear(matA.coldim() + matB.coldim());
- MATHICGB_ASSERT(matAB.colCount() == matA.coldim() + matB.coldim());
- size_t const colCountA = matA.coldim();
- size_t const rowCount = matA.rowdim();
-
- typedef typename Matrix::Row::const_iterator CIter;
- for (size_t row = 0; row < rowCount; ++row) {
- {
- CIter const endA = matA[row].end();
- for (CIter it = matA[row].begin(); it != endA; ++it) {
- MATHICGB_ASSERT(it->first < colCountA);
- matAB.appendEntry(it->first, it->second);
- }
- }
- {
- CIter const endB = matB[row].end();
- for (CIter it = matB[row].begin(); it != endB; ++it) {
- MATHICGB_ASSERT(it->first < matB.coldim());
- MATHICGB_ASSERT(it->first + colCountA < matAB.colCount());
- matAB.appendEntry(it->first + colCountA, it->second);
- }
- }
- matAB.rowDone();
- }
- MATHICGB_ASSERT(matAB.rowCount() == matA.rowdim());
- MATHICGB_ASSERT(matAB.colCount() == matA.coldim() + matB.coldim());
-}
-
-void myReduce
-(SparseMatrix const& toReduce,
- SparseMatrix const& reduceBy,
- SparseMatrix::Scalar modulus,
- SparseMatrix& reduced,
- int threadCount) {
- MATHICGB_ASSERT(reduceBy.colCount() >= reduceBy.rowCount());
- MATHICGB_ASSERT(reduceBy.colCount() == toReduce.colCount());
- const auto pivotCount = reduceBy.rowCount();
- const auto colCount = toReduce.colCount();
- const auto rowCount = toReduce.rowCount();
-
- reduced.clear(toReduce.colCount());
-
- // pre-calculate what rows are pivots for what columns
- std::vector<SparseMatrix::RowIndex> rowThatReducesCol(pivotCount);
-#ifdef MATHICGB_DEBUG
- // fill in an invalid value that can be recognized by asserts to be invalid.
- std::fill(rowThatReducesCol.begin(), rowThatReducesCol.end(), pivotCount);
-#endif
- for (SparseMatrix::RowIndex pivot = 0; pivot < pivotCount; ++pivot) {
- MATHICGB_ASSERT(!reduceBy.emptyRow(pivot));
- SparseMatrix::ColIndex col = reduceBy.leadCol(pivot);
- MATHICGB_ASSERT(rowThatReducesCol[col] == pivotCount);
- rowThatReducesCol[col] = pivot;
- }
-
-#ifdef _OPENMP
- std::vector<DenseRow<uint64> > denseRowPerThread(threadCount);
-#else
- DenseRow<uint64> denseRow;
-#endif
-
-#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
- for (size_t row = 0; row < rowCount; ++row) {
- if (toReduce.emptyRow(row))
- continue;
-#ifdef _OPENMP
- DenseRow<uint64>& denseRow = denseRowPerThread[omp_get_thread_num()];
-#endif
- denseRow.reset(colCount);
- denseRow.addRow(toReduce, row);
- for (size_t pivot = 0; pivot < pivotCount; ++pivot) {
- if (denseRow[pivot] == 0)
- continue;
- denseRow.rowReduceByUnitary(rowThatReducesCol[pivot], reduceBy, modulus);
- }
- if (denseRow.takeModulus(modulus, pivotCount)) {
-#pragma omp critical
- {
- denseRow.appendTo(reduced, pivotCount);
- }
- }
- }
-}
-
-void myReduce
-(SparseMatrix const& toReduce,
- SparseMatrix const& reduceByLeft,
- SparseMatrix const& reduceByRight,
- SparseMatrix::Scalar modulus,
- SparseMatrix& reduced,
- int threadCount) {
- MATHICGB_ASSERT(reduceByLeft.colCount() == reduceByLeft.rowCount());
- MATHICGB_ASSERT(reduceByLeft.colCount() + reduceByRight.colCount() == toReduce.colCount());
- const auto pivotCount = reduceByLeft.rowCount();
- const auto colCount = toReduce.colCount();
- const auto rowCount = toReduce.rowCount();
-
- reduced.clear(toReduce.colCount());
-
- // pre-calculate what rows are pivots for what columns
- std::vector<SparseMatrix::RowIndex> rowThatReducesCol(pivotCount);
-#ifdef MATHICGB_DEBUG
- // fill in an invalid value that can be recognized by asserts to be invalid.
- std::fill(rowThatReducesCol.begin(), rowThatReducesCol.end(), pivotCount);
-#endif
- for (SparseMatrix::RowIndex pivot = 0; pivot < pivotCount; ++pivot) {
- MATHICGB_ASSERT(!reduceByLeft.emptyRow(pivot));
- SparseMatrix::ColIndex col = reduceByLeft.leadCol(pivot);
- MATHICGB_ASSERT(rowThatReducesCol[col] == pivotCount);
- rowThatReducesCol[col] = pivot;
- }
-
-#ifdef _OPENMP
- std::vector<DenseRow<uint64> > denseRowPerThread(threadCount);
-#else
- DenseRow<uint64> denseRow;
-#endif
-
-#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
- for (size_t row = 0; row < rowCount; ++row) {
- if (toReduce.emptyRow(row))
- continue;
-#ifdef _OPENMP
- DenseRow<uint64>& denseRow = denseRowPerThread[omp_get_thread_num()];
-#endif
- denseRow.reset(colCount);
- denseRow.addRow(toReduce, row);
- for (size_t pivot = 0; pivot < pivotCount; ++pivot) {
- if (denseRow[pivot] == 0)
- continue;
- denseRow.rowReduceByUnitary(rowThatReducesCol[pivot], reduceByLeft, reduceByRight, modulus);
- }
- if (denseRow.takeModulus(modulus, pivotCount)) {
-#pragma omp critical
- {
- denseRow.appendTo(reduced, pivotCount);
- }
- }
- }
-}
-
void myReduce
(SparseMatrix const& toReduceLeft,
SparseMatrix const& toReduceRight,
@@ -409,20 +264,23 @@ void myReduce
SparseMatrix const& reduceByRight,
SparseMatrix::Scalar modulus,
SparseMatrix& reduced,
- int threadCount) {
+ size_t threadCount) {
MATHICGB_ASSERT(reduceByLeft.colCount() == reduceByLeft.rowCount());
- const auto pivotCount = reduceByLeft.rowCount();
+ const auto pivotCount = reduceByLeft.colCount();
const auto rowCount = toReduceLeft.rowCount();
const auto colCountLeft = toReduceLeft.colCount();
const auto colCountRight = toReduceRight.colCount();
- // pre-calculate what rows are pivots for what columns
- std::vector<SparseMatrix::RowIndex> rowThatReducesCol(pivotCount);
+ // ** pre-calculate what rows are pivots for what columns.
+
+ // Store column indexes as the matrix is square anyway (so all indices
+ // fit) and we are going to store this as a column index later on.
+ std::vector<SparseMatrix::ColIndex> rowThatReducesCol(pivotCount);
#ifdef MATHICGB_DEBUG
// fill in an invalid value that can be recognized by asserts to be invalid.
std::fill(rowThatReducesCol.begin(), rowThatReducesCol.end(), pivotCount);
#endif
- for (SparseMatrix::RowIndex pivot = 0; pivot < pivotCount; ++pivot) {
+ for (SparseMatrix::ColIndex pivot = 0; pivot < pivotCount; ++pivot) {
MATHICGB_ASSERT(!reduceByLeft.emptyRow(pivot));
SparseMatrix::ColIndex col = reduceByLeft.leadCol(pivot);
MATHICGB_ASSERT(rowThatReducesCol[col] == pivotCount);
@@ -442,7 +300,7 @@ void myReduce
std::vector<SparseMatrix::RowIndex> rowOrder(rowCount);
#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
- for (size_t row = 0; row < rowCount; ++row) {
+ for (long row = 0; row < rowCount; ++row) {
#ifdef _OPENMP
auto& denseRow = denseRowPerThread[omp_get_thread_num()];
#endif
@@ -471,7 +329,7 @@ void myReduce
#pragma omp critical
{
for (size_t pivot = 0; pivot < pivotCount; ++pivot) {
- MATHICGB_ASSERT(denseRow[pivot] < std::numeric_limits<Scalar>::max());
+ MATHICGB_ASSERT(denseRow[pivot] < std::numeric_limits<SparseMatrix::Scalar>::max());
if (denseRow[pivot] != 0)
tmp.appendEntry(rowThatReducesCol[pivot], static_cast<SparseMatrix::Scalar>(denseRow[pivot]));
}
@@ -482,7 +340,7 @@ void myReduce
#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
- for (size_t i = 0; i < rowCount; ++i) {
+ for (long i = 0; i < rowCount; ++i) {
#ifdef _OPENMP
auto& denseRow = denseRowPerThread[omp_get_thread_num()];
#endif
@@ -548,14 +406,14 @@ void myReduceToEchelonForm5
// dense representation
std::vector<DenseRow<uint64> > dense(rowCount);
#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
- for (SparseMatrix::RowIndex row = 0; row < rowCount; ++row) {
+ for (long row = 0; row < rowCount; ++row) {
MATHICGB_ASSERT(!toReduce.emptyRow(row));
dense[row].reset(colCount);
dense[row].addRow(toReduce, row);
}
// invariant: all columns in row to the left of leadCols[row] are zero.
- std::vector<size_t> leadCols(rowCount);
+ std::vector<SparseMatrix::ColIndex> leadCols(rowCount);
// pivot rows get copied here before being used to reduce the matrix.
SparseMatrix reduced;
@@ -579,7 +437,7 @@ void myReduceToEchelonForm5
//std::cout << "reducing " << reduced.rowCount() << " out of " << toReduce.rowCount() << std::endl;
#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
- for (size_t row = 0; row < rowCount; ++row) {
+ for (long row = 0; row < rowCount; ++row) {
MATHICGB_ASSERT(leadCols[row] <= colCount);
DenseRow<uint64>& denseRow = dense[row];
if (denseRow.empty())
@@ -594,7 +452,7 @@ void myReduceToEchelonForm5
}
// update leadCols[row]
- size_t col;
+ SparseMatrix::ColIndex col;
MATHICGB_ASSERT(leadCols[row] <= colCount);
for (col = leadCols[row]; col < colCount; ++col) {
denseRow[col] %= modulus;
@@ -640,14 +498,14 @@ void myReduceToEchelonForm5
MATHICGB_ASSERT(reduced.rowCount() == i);
MATHICGB_ASSERT(!isPivotRow[row]);
- dense[row].appendTo(reduced); // already nornamlized
+ dense[row].appendTo(reduced); // already normalized
isPivotRow[row] = true;
}
nextReducers.clear();
}
#pragma omp parallel for num_threads(threadCount) schedule(dynamic)
- for (size_t row = 0; row < rowCount; ++row)
+ for (long row = 0; row < rowCount; ++row)
dense[row].takeModulus(modulus);
toReduce.clear(colCount);
@@ -694,9 +552,9 @@ void readMany(FILE* file, size_t count, std::vector<T>& v) {
// Writes an SparseMatrix
void writeSparseMatrix
(const SparseMatrix& matrix, SparseMatrix::Scalar modulus, const std::string& fileName) {
- MATHICGB_ASSERT(rowCount <= std::numeric_limits<uint32>::max());
- MATHICGB_ASSERT(colCount <= std::numeric_limits<uint32>::max());
- MATHICGB_ASSERT(entryCount <= std::numeric_limits<uint64>::max());
+ MATHICGB_ASSERT(matrix.rowCount() <= std::numeric_limits<uint32>::max());
+ MATHICGB_ASSERT(matrix.colCount() <= std::numeric_limits<uint32>::max());
+ MATHICGB_ASSERT(matrix.entryCount() <= std::numeric_limits<uint64>::max());
const uint32 rowCount = static_cast<uint32>(matrix.rowCount());
const uint32 colCount = static_cast<uint32>(matrix.colCount());
@@ -749,7 +607,7 @@ SparseMatrix::Scalar readSparseMatrix(const std::string& fileName, SparseMatrix&
}
// doesn't need to be fast.
-int integerLog10(unsigned int val) {
+int integerLog10(size_t val) {
int ret = -1;
while (val != 0) {
val /= 10;
@@ -865,7 +723,7 @@ void spliceMatrix(const SparseMatrix& matrix, SparseMatrix& pivots, SparseMatrix
// permutation of columns to put pivots left without reordering
// columns in any other way.
std::vector<SparseMatrix::ColIndex> colPerm(colCount);
- SparseMatrix::RowIndex columnsDecided = 0;
+ SparseMatrix::ColIndex columnsDecided = 0;
// choice of rows to make left of pivots matrix upper triangular
std::vector<SparseMatrix::RowIndex> pivotRows;
@@ -878,7 +736,7 @@ void spliceMatrix(const SparseMatrix& matrix, SparseMatrix& pivots, SparseMatrix
pivotRows.push_back(pivotRowOfCol[col]);
}
}
- SparseMatrix::RowIndex minNonPivotCol = columnsDecided;
+ SparseMatrix::ColIndex minNonPivotCol = columnsDecided;
for (size_t col = 0; col < colCount; ++col) {
if (pivotRowOfCol[col] == noPivot) {
@@ -973,6 +831,7 @@ void concatenateMatricesHorizontal
void F4MatrixReducer::reduce
(const PolyRing& ring, QuadMatrix& matrix, SparseMatrix& newPivots) {
+ MATHICGB_ASSERT(mThreadCount >= 1);
if (tracingLevel >= 3)
std::cerr << "Row reducing (" << matrix.topLeft.rowCount()
<< " + " << matrix.bottomLeft.rowCount()
@@ -1003,3 +862,6 @@ void F4MatrixReducer::reduce
myReduceToEchelonForm5(newPivots, modulus, mThreadCount);
}
+
+F4MatrixReducer::F4MatrixReducer(size_t threadCount):
+ mThreadCount(std::max(threadCount, static_cast<size_t>(1))) {}
diff --git a/src/mathicgb/F4MatrixReducer.hpp b/src/mathicgb/F4MatrixReducer.hpp
index f50006a..97941c8 100755
--- a/src/mathicgb/F4MatrixReducer.hpp
+++ b/src/mathicgb/F4MatrixReducer.hpp
@@ -9,7 +9,7 @@ class PolyRing;
answer you get is the submatrix that contains new pivots. */
class F4MatrixReducer {
public:
- F4MatrixReducer(size_t threadCount = 1): mThreadCount(threadCount) {}
+ F4MatrixReducer(size_t threadCount);
void reduce
(const PolyRing& ring, QuadMatrix& matrix, SparseMatrix& newPivots);
diff --git a/src/mathicgb/F4Reducer.cpp b/src/mathicgb/F4Reducer.cpp
index 02b8032..d747cb2 100755
--- a/src/mathicgb/F4Reducer.cpp
+++ b/src/mathicgb/F4Reducer.cpp
@@ -12,7 +12,8 @@ F4Reducer::F4Reducer(
std::unique_ptr<Reducer> fallback
):
mFallback(std::move(fallback)),
- mRing(ring) {
+ mRing(ring),
+ mThreadCount(1) {
}
std::unique_ptr<Poly> F4Reducer::classicReduce
@@ -132,7 +133,7 @@ void F4Reducer::classicReducePolySet
mFallback->classicReducePolySet(polys, basis, reducedOut);
return;
}
-
+
reducedOut.clear();
if (polys.empty())
return;
diff --git a/src/mathicgb/FreeModuleOrder.cpp b/src/mathicgb/FreeModuleOrder.cpp
index 3125ac2..451a58f 100755
--- a/src/mathicgb/FreeModuleOrder.cpp
+++ b/src/mathicgb/FreeModuleOrder.cpp
@@ -43,15 +43,16 @@ namespace {
template<class PairIterator>
class IndexIterator {
public:
+
typedef typename PairIterator::iterator_category iterator_category;
- typedef typename PairIterator::value_type value_type;
+ typedef decltype(reinterpret_cast<typename PairIterator::value_type*>(0)->i) value_type;
typedef typename PairIterator::difference_type difference_type;
- typedef typename PairIterator::pointer pointer;
- typedef typename PairIterator::reference reference;
+ typedef value_type* pointer;
+ typedef value_type& reference;
IndexIterator(PairIterator pairIterator): mIterator(pairIterator) {}
IndexIterator& operator++() {++mIterator; return *this;}
- size_t operator*() const {return mIterator->i;}
+ const value_type operator*() const {return mIterator->i;}
difference_type operator-(const IndexIterator<PairIterator>& it) const {
return mIterator - it.mIterator;
}
diff --git a/src/mathicgb/Poly.cpp b/src/mathicgb/Poly.cpp
index 0c16514..3a4a745 100755
--- a/src/mathicgb/Poly.cpp
+++ b/src/mathicgb/Poly.cpp
@@ -215,6 +215,7 @@ void Poly::multByTerm(coefficient a, const_monomial m)
R->monomialMultTo(nmon, m); // changes the monomial pointed to by n.
}
}
+
void Poly::multByMonomial(const_monomial m)
{
size_t p = 0;
@@ -342,6 +343,10 @@ std::ostream& operator<<(std::ostream& out, const Poly& p) {
return out;
}
+void Poly::reserve(size_t spaceForThisManyTerms) {
+ monoms.reserve(spaceForThisManyTerms * R->maxMonomialSize());
+}
+
bool Poly::termsAreInDescendingOrder() const {
if (isZero())
return true;
diff --git a/src/mathicgb/Poly.hpp b/src/mathicgb/Poly.hpp
index 3fbaa77..82912b8 100755
--- a/src/mathicgb/Poly.hpp
+++ b/src/mathicgb/Poly.hpp
@@ -76,8 +76,12 @@ public:
coefficient& coefficientAt(size_t index);
const coefficient coefficientAt(size_t index) const;
+ /// all iterators are invalid after this
void appendTerm(coefficient a, const_monomial m);
- // all iterators are invalid after this
+
+ /// Hint that space for termCount terms is going to be needed so the internal
+ /// storage should be expanded to fit that many terms.
+ void reserve(size_t spaceForThisManyTerms);
const_iterator begin() const { return const_iterator(*this); }
const_iterator end() const { return const_iterator(*this,1); }
diff --git a/src/mathicgb/QuadMatrixBuilder.cpp b/src/mathicgb/QuadMatrixBuilder.cpp
index 4243b4d..a70c0f2 100755
--- a/src/mathicgb/QuadMatrixBuilder.cpp
+++ b/src/mathicgb/QuadMatrixBuilder.cpp
@@ -10,7 +10,9 @@ QuadMatrixBuilder::QuadMatrixBuilder(const PolyRing& ring):
#ifndef MATHICGB_USE_QUADMATRIX_STD_HASH
mMonomialToCol(ArbitraryOrdering(ring)) {}
#else
-mMonomialToCol(100, Hash(ring), Equal(ring)) {
+mMonomialToColArena(),
+ mMonomialToCol(100, Hash(ring), Equal(ring),
+ SpecificHashAllocator(mMonomialToColArena)) {
mMonomialToCol.max_load_factor(0.3f);
}
#endif
@@ -18,7 +20,7 @@ mMonomialToCol(100, Hash(ring), Equal(ring)) {
namespace {
/// Creates a column and updates the associated data structures that
/// are passed in. Copies mono - ownership is not taken over. The
- /// purpose of this function is to avoid code duplication. It is a
+ /// purpose of this function is to avoid code duplication. It is a
/// template in order to avoid referring to private types of
/// QuadMatrixBuilder.
template<class ToMono, class ToCol>
@@ -120,7 +122,7 @@ namespace {
// monomials back into the vector of monomials which is not const.
std::vector<std::pair<monomial, ColIndex> > columns;
columns.reserve(colCount);
- for (size_t col = 0; col < colCount; ++col)
+ for (ColIndex col = 0; col < colCount; ++col)
columns.push_back(std::make_pair(monomials[col], col));
std::sort(columns.begin(), columns.end(), ColumnComparer(order));
@@ -135,7 +137,7 @@ namespace {
// Construct permutation of indices to match permutation of monomials
std::vector<ColIndex> permutation(colCount);
- for (size_t col = 0; col < colCount; ++col) {
+ for (ColIndex col = 0; col < colCount; ++col) {
// The monomial for column columns[col].second is now the
// monomial for col, so we need the inverse map for indices.
permutation[columns[col].second] = col;
diff --git a/src/mathicgb/QuadMatrixBuilder.hpp b/src/mathicgb/QuadMatrixBuilder.hpp
index c7021e9..289325f 100755
--- a/src/mathicgb/QuadMatrixBuilder.hpp
+++ b/src/mathicgb/QuadMatrixBuilder.hpp
@@ -7,8 +7,10 @@
#include "PolyRing.hpp"
#include <vector>
#include <map>
+#include <limits>
#include <string>
#include <ostream>
+#include <memtailor.h>
#ifdef MATHICGB_USE_QUADMATRIX_STD_HASH
#include <unordered_map>
#endif
@@ -271,8 +273,49 @@ private:
const PolyRing& mRing;
};
- typedef std::unordered_map<const_monomial, LeftRightColIndex, Hash, Equal>
- MonomialToColType;
+ template<class T>
+ class HashAllocator {
+ public:
+ HashAllocator(memt::Arena& arena): mArena(arena) {}
+
+ typedef T value_type;
+ typedef T* pointer;
+ typedef T& reference;
+ typedef const T* const_pointer;
+ typedef const T& const_reference;
+ typedef ::size_t size_type;
+ typedef ::ptrdiff_t difference_type;
+
+ template<class T2>
+ struct rebind {
+ typedef HashAllocator<T2> other;
+ };
+
+ HashAllocator() {}
+ template<class X>
+ HashAllocator(const HashAllocator<X>& a): mArena(a.arena()) {}
+ HashAllocator(const HashAllocator<T>& a): mArena(a.arena()) {}
+
+ pointer address(reference x) {return &x;}
+ const_pointer address(const_reference x) const {return &x;}
+ pointer allocate(size_type n, void* hint = 0) {
+ return static_cast<pointer>(mArena.alloc(sizeof(T) * n));
+ }
+ void deallocate(pointer p, size_t n) {}
+ size_type max_size() const {return std::numeric_limits<size_type>::max();}
+ void construct(pointer p, const_reference val) {new (p) T(val);}
+ void destroy(pointer p) {p->~T();}
+ memt::Arena& arena() const {return mArena;}
+
+ private:
+ mutable memt::Arena& mArena;
+ };
+ typedef HashAllocator<std::pair<const const_monomial, LeftRightColIndex> >
+ SpecificHashAllocator;
+
+ typedef std::unordered_map<const_monomial, LeftRightColIndex, Hash, Equal,
+ SpecificHashAllocator> MonomialToColType;
+ memt::Arena mMonomialToColArena;
MonomialToColType mMonomialToCol;
#endif
diff --git a/src/mathicgb/SparseMatrix.cpp b/src/mathicgb/SparseMatrix.cpp
index 6697291..ff28dc3 100755
--- a/src/mathicgb/SparseMatrix.cpp
+++ b/src/mathicgb/SparseMatrix.cpp
@@ -14,6 +14,7 @@ void SparseMatrix::rowToPolynomial
MATHICGB_ASSERT(colMonomials.size() == colCount());
poly.setToZero();
auto end = rowEnd(row);
+ poly.reserve(entryCountInRow(row));
for (auto it = rowBegin(row); it != end; ++it) {
MATHICGB_ASSERT(it.index() < colMonomials.size());
if (it.scalar() != 0)
diff --git a/src/mathicgb/SparseMatrix.hpp b/src/mathicgb/SparseMatrix.hpp
index 3ccf16e..0a13d57 100755
--- a/src/mathicgb/SparseMatrix.hpp
+++ b/src/mathicgb/SparseMatrix.hpp
@@ -97,9 +97,9 @@ class SparseMatrix {
}
/** Returns the number of entries in the given row. */
- size_t entryCountInRow(RowIndex row) const {
+ ColIndex entryCountInRow(RowIndex row) const {
MATHICGB_ASSERT(row < rowCount());
- return mRowOffsets[row + 1] - mRowOffsets[row];
+ return static_cast<ColIndex>(mRowOffsets[row + 1] - mRowOffsets[row]);
}
/** Returns the number of entries in the whole matrix. */
diff --git a/src/test/F4MatrixReducer.cpp b/src/test/F4MatrixReducer.cpp
index c2839a3..7bd050c 100755
--- a/src/test/F4MatrixReducer.cpp
+++ b/src/test/F4MatrixReducer.cpp
@@ -117,7 +117,7 @@ TEST(F4MatrixReducer, Reduce) {
ASSERT_EQ(origStr, m.toString()) << "Printed m:\n" << m;
SparseMatrix reduced;
- F4MatrixReducer red;
+ F4MatrixReducer red(1);
red.reduce(*ring, m, reduced);
const char* redStr =
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/mathicgb.git
More information about the debian-science-commits
mailing list