[mathicgb] 77/393: Made SparseMatrix allocate memory in multiple blocks instead of reallocating an ever-larger buffer. This saves the copy and will make the parallelization-to-come easier. It was a 4% performance improvement at 2 threads on hcyc8.
Doug Torrance
dtorrance-guest at moszumanska.debian.org
Fri Apr 3 15:58:34 UTC 2015
This is an automated email from the git hooks/post-receive script.
dtorrance-guest pushed a commit to branch upstream
in repository mathicgb.
commit bdcdbbc748a445b5f81c6662d6625afe132afeac
Author: Bjarke Hammersholt Roune <bjarkehr.code at gmail.com>
Date: Wed Oct 24 18:06:46 2012 +0200
Made SparseMatrix allocate memory in multiple blocks instead of reallocating an ever-larger buffer. This saves the copy and will make the parallelization-to-come easier. It was a 4% performance improvement at 2 threads on hcyc8.
---
src/mathicgb/QuadMatrixBuilder.cpp | 4 -
src/mathicgb/RawVector.hpp | 20 ++--
src/mathicgb/SparseMatrix.cpp | 198 +++++++++++++++++++++++--------------
src/mathicgb/SparseMatrix.hpp | 124 +++++++++++++++--------
4 files changed, 218 insertions(+), 128 deletions(-)
diff --git a/src/mathicgb/QuadMatrixBuilder.cpp b/src/mathicgb/QuadMatrixBuilder.cpp
index bf94019..12eece4 100755
--- a/src/mathicgb/QuadMatrixBuilder.cpp
+++ b/src/mathicgb/QuadMatrixBuilder.cpp
@@ -191,10 +191,6 @@ std::string QuadMatrixBuilder::toString() const {
}
void QuadMatrixBuilder::buildMatrixAndClear(QuadMatrix& out) {
- // we cannot use std::move as the builder is supposed to remain in a
- // valid state. @todo: consider using a QuadMatrix as the internal
- // data representation.
-
mTopLeft.swap(out.topLeft);
mTopRight.swap(out.topRight);
mBottomLeft.swap(out.bottomLeft);
diff --git a/src/mathicgb/RawVector.hpp b/src/mathicgb/RawVector.hpp
index aef9bc7..95981a3 100755
--- a/src/mathicgb/RawVector.hpp
+++ b/src/mathicgb/RawVector.hpp
@@ -39,8 +39,8 @@ public:
/// Copies the pointers from v. It is a shallow copy.
RawVector(const RawVector& v):
- mBegin(v.mBegin()),
- mEnd(v.mEnd()),
+ mBegin(v.mBegin),
+ mEnd(v.mEnd),
mCapacityEnd(v.mCapacityEnd) {}
/// Copies the pointers from v. It is a shallow copy. Sets v to a null state.
@@ -104,15 +104,15 @@ public:
size_type max_size() const {return std::numeric_limits<size_type>::max();}
/// There must be enough capacity for the new size.
- size_type resize(const size_type newSize) {
+ void resize(const size_type newSize) {
MATHICGB_ASSERT(newSize <= capacity());
- while (newSize < size()) {
- new (end) T();
- ++end;
- }
while (newSize > size()) {
- --end;
- end->~T();
+ new (mEnd) T();
+ ++mEnd;
+ }
+ while (newSize < size()) {
+ --mEnd;
+ mEnd->~T();
}
MATHICGB_ASSERT(newSize == size());
}
@@ -166,7 +166,7 @@ public:
/// There must be enough capacity for the new size.
template<class Iter>
void rawAssign(Iter begin, Iter end) {
- const auto count = std::distance(begin, end);
+ const size_t count = std::distance(begin, end);
MATHICGB_ASSERT(count <= capacity());
if (count > size())
resize(count);
diff --git a/src/mathicgb/SparseMatrix.cpp b/src/mathicgb/SparseMatrix.cpp
index bc205bf..c3790e7 100755
--- a/src/mathicgb/SparseMatrix.cpp
+++ b/src/mathicgb/SparseMatrix.cpp
@@ -4,17 +4,15 @@
#include "Poly.hpp"
#include <algorithm>
-std::ostream& operator<<(std::ostream& out, const SparseMatrix& matrix) {
- matrix.print(out);
- return out;
-}
-
-void SparseMatrix::rowToPolynomial
-(RowIndex row, std::vector<monomial> colMonomials, Poly& poly) {
+void SparseMatrix::rowToPolynomial(
+ const RowIndex row,
+ const std::vector<monomial>& colMonomials,
+ Poly& poly
+) {
MATHICGB_ASSERT(colMonomials.size() == colCount());
poly.setToZero();
- auto end = rowEnd(row);
poly.reserve(entryCountInRow(row));
+ const auto end = rowEnd(row);
for (auto it = rowBegin(row); it != end; ++it) {
MATHICGB_ASSERT(it.index() < colMonomials.size());
if (it.scalar() != 0)
@@ -42,10 +40,9 @@ void SparseMatrix::sortRowsByIncreasingPivots() {
// construct ordered with pivot columns in increaing order
ordered.clear(lColCount);
for (size_t i = 0; i < lRowCount; ++i) {
- const SparseMatrix::RowIndex row = order[i].second;
- auto it = rowBegin(row);
+ const auto row = order[i].second;
const auto end = rowEnd(row);
- for (; it != end; ++it)
+ for (auto it = rowBegin(row); it != end; ++it)
ordered.appendEntry(it.index(), it.scalar());
ordered.rowDone();
}
@@ -53,12 +50,15 @@ void SparseMatrix::sortRowsByIncreasingPivots() {
*this = std::move(ordered);
}
-void SparseMatrix::applyColumnMap(std::vector<ColIndex> colMap) {
+void SparseMatrix::applyColumnMap(const std::vector<ColIndex>& colMap) {
MATHICGB_ASSERT(colMap.size() >= colCount());
- const auto end = mColIndices.end();
- for (auto it = mColIndices.begin(); it != end; ++it) {
- MATHICGB_ASSERT(*it < colCount());
- *it = colMap[*it];
+ Block* block = &mBlock;
+ for (; block != 0; block = block->mPreviousBlock) {
+ const auto end = block->mColIndices.end();
+ for (auto it = block->mColIndices.begin(); it != end; ++it) {
+ MATHICGB_ASSERT(*it < colCount());
+ *it = colMap[*it];
+ }
}
}
@@ -82,19 +82,23 @@ std::string SparseMatrix::toString() const {
return out.str();
}
-void SparseMatrix::appendRowAndNormalize(const SparseMatrix& matrix, RowIndex row, Scalar modulus) {
+void SparseMatrix::appendRowAndNormalize(
+ const SparseMatrix& matrix,
+ const RowIndex row,
+ const Scalar modulus
+) {
MATHICGB_ASSERT(row < matrix.rowCount());
auto it = matrix.rowBegin(row);
const auto end = matrix.rowEnd(row);
if (it != end) {
appendEntry(it.index(), 1);
- Scalar lead = it.scalar();
+ const Scalar lead = it.scalar();
++it;
if (it != end) {
- Scalar inverse = modularInverse(lead, modulus);
+ const Scalar inverse = modularInverse(lead, modulus);
do {
- uint32 prod = static_cast<uint32>(inverse) * it.scalar();
- uint16 prodMod = static_cast<uint16>(prod % modulus);
+ const uint32 prod = static_cast<uint32>(inverse) * it.scalar();
+ const uint16 prodMod = static_cast<uint16>(prod % modulus);
appendEntry(it.index(), prodMod);
++it;
} while (it != end);
@@ -103,7 +107,7 @@ void SparseMatrix::appendRowAndNormalize(const SparseMatrix& matrix, RowIndex ro
rowDone();
}
-void SparseMatrix::appendRow(const SparseMatrix& matrix, RowIndex row) {
+void SparseMatrix::appendRow(const SparseMatrix& matrix, const RowIndex row) {
MATHICGB_ASSERT(row < matrix.rowCount());
auto it = matrix.rowBegin(row);
const auto end = matrix.rowEnd(row);
@@ -113,31 +117,46 @@ void SparseMatrix::appendRow(const SparseMatrix& matrix, RowIndex row) {
}
void SparseMatrix::swap(SparseMatrix& matrix) {
- std::swap(mColIndices, matrix.mColIndices);
- std::swap(mEntries, matrix.mEntries);
- std::swap(mRows, matrix.mRows);
- std::swap(mColCount, matrix.mColCount);
+ mBlock.swap(matrix.mBlock);
+ using std::swap;
+ swap(mRows, matrix.mRows);
+ swap(mColCount, matrix.mColCount);
}
-
-void SparseMatrix::clear(ColIndex newColCount) {
- mColIndices.clear();
- mEntries.clear();
+
+void SparseMatrix::clear(const ColIndex newColCount) {
+ Block* block = &mBlock;
+ while (block != 0) {
+ delete[] block->mColIndices.releaseMemory();
+ delete[] block->mScalars.releaseMemory();
+ Block* const tmp = block->mPreviousBlock;
+ if (block != &mBlock)
+ delete block;
+ block = tmp;
+ }
+ mBlock.mPreviousBlock = 0;
+ mBlock.mHasNoRows = true;
mRows.clear();
mColCount = newColCount;
}
-void SparseMatrix::appendRowWithModulus(std::vector<uint64> const& v, Scalar modulus) {
+void SparseMatrix::appendRowWithModulus(
+ std::vector<uint64> const& v,
+ const Scalar modulus
+) {
MATHICGB_ASSERT(v.size() == colCount());
- ColIndex count = colCount();
+ const ColIndex count = colCount();
for (ColIndex col = 0; col < count; ++col) {
- Scalar scalar = static_cast<Scalar>(v[col] % modulus);
+ const Scalar scalar = static_cast<Scalar>(v[col] % modulus);
if (scalar != 0)
appendEntry(col, scalar);
}
rowDone();
}
-void SparseMatrix::appendRow(std::vector<uint64> const& v, ColIndex leadCol) {
+void SparseMatrix::appendRow(
+ std::vector<uint64> const& v,
+ const ColIndex leadCol
+) {
MATHICGB_ASSERT(v.size() == colCount());
#ifdef MATHICGB_DEBUG
for (ColIndex col = leadCol; col < leadCol; ++col) {
@@ -145,7 +164,7 @@ void SparseMatrix::appendRow(std::vector<uint64> const& v, ColIndex leadCol) {
}
#endif
- ColIndex count = colCount();
+ const ColIndex count = colCount();
for (ColIndex col = leadCol; col < count; ++col) {
MATHICGB_ASSERT(v[col] < std::numeric_limits<Scalar>::max());
if (v[col] != 0)
@@ -154,12 +173,14 @@ void SparseMatrix::appendRow(std::vector<uint64> const& v, ColIndex leadCol) {
rowDone();
}
-void SparseMatrix::appendRowWithModulusNormalized(std::vector<uint64> const& v, Scalar modulus) {
+void SparseMatrix::appendRowWithModulusNormalized(
+ std::vector<uint64> const& v,
+ const Scalar modulus
+) {
MATHICGB_ASSERT(v.size() == colCount());
- ColIndex count = colCount();
- uint16 multiply = 1;
-
+ uint16 multiply = 1;
bool first = true;
+ const ColIndex count = colCount();
for (ColIndex col = 0; col < count; ++col) {
Scalar scalar = static_cast<Scalar>(v[col] % modulus);
if (scalar == 0)
@@ -177,7 +198,10 @@ void SparseMatrix::appendRowWithModulusNormalized(std::vector<uint64> const& v,
rowDone();
}
-bool SparseMatrix::appendRowWithModulusIfNonZero(std::vector<uint64> const& v, Scalar modulus) {
+bool SparseMatrix::appendRowWithModulusIfNonZero(
+ std::vector<uint64> const& v,
+ const Scalar modulus
+) {
appendRowWithModulus(v, modulus);
MATHICGB_ASSERT(rowCount() > 0);
if (mRows.back().empty()) {
@@ -187,56 +211,84 @@ bool SparseMatrix::appendRowWithModulusIfNonZero(std::vector<uint64> const& v, S
return true;
}
-void SparseMatrix::trimLeadingZeroColumns(ColIndex trimThisMany) {
+void SparseMatrix::trimLeadingZeroColumns(const ColIndex trimThisMany) {
MATHICGB_ASSERT(trimThisMany <= colCount());
- const auto end = mColIndices.end();
- for (auto it = mColIndices.begin(); it != end; ++it) {
- MATHICGB_ASSERT(*it >= trimThisMany);
- *it -= trimThisMany;
+ Block* block = &mBlock;
+ for (; block != 0; block = block->mPreviousBlock) {
+ const auto end = block->mColIndices.end();
+ for (auto it = block->mColIndices.begin(); it != end; ++it) {
+ MATHICGB_ASSERT(*it >= trimThisMany);
+ *it -= trimThisMany;
+ }
}
mColCount -= trimThisMany;
}
-void SparseMatrix::reserveEntries(size_t count) {
- if (count < mEntries.capacity())
+void SparseMatrix::reserveFreeEntries(const size_t freeCount) {
+ if (freeCount <= mBlock.mColIndices.capacity() - mBlock.mColIndices.size())
return;
+ // We need to copy over the pending entries, so we need space for those
+ // entries on top of freeCount.
+ const size_t count = freeCount + ( // todo: detect overflow for this addition
+ mBlock.mHasNoRows ?
+ mBlock.mColIndices.size() :
+ std::distance(mRows.back().mIndicesEnd, mBlock.mColIndices.end())
+ );
+
+ auto oldBlock = new Block(std::move(mBlock));
+ MATHICGB_ASSERT(mBlock.mColIndices.begin() == 0);
+ MATHICGB_ASSERT(mBlock.mScalars.begin() == 0);
+ MATHICGB_ASSERT(mBlock.mHasNoRows);
+ MATHICGB_ASSERT(mBlock.mPreviousBlock == 0);
+ mBlock.mPreviousBlock = oldBlock;
- ptrdiff_t scalarPtrDelta;
{
- const auto begin = new Scalar[count];
+ const auto begin = new ColIndex[count];
const auto capacityEnd = begin + count;
- scalarPtrDelta = begin - mEntries.begin();
- delete[] mEntries.setMemoryAndCopy(begin, capacityEnd);
+ mBlock.mColIndices.releaseAndSetMemory(begin, begin, capacityEnd);
}
- ptrdiff_t entryPtrDelta;
{
- const auto begin = new ColIndex[count];
+ const auto begin = new Scalar[count];
const auto capacityEnd = begin + count;
- entryPtrDelta = begin - mColIndices.begin();
- delete[] mColIndices.setMemoryAndCopy(begin, capacityEnd);
+ mBlock.mScalars.releaseAndSetMemory(begin, begin, capacityEnd);
}
- const auto rowEnd = mRows.end();
- for (auto it = mRows.begin(); it != rowEnd; ++it) {
- it->mIndicesBegin += entryPtrDelta;
- it->mIndicesEnd += entryPtrDelta;
- it->mScalarsBegin += scalarPtrDelta;
- it->mScalarsEnd += scalarPtrDelta;
+ // copy pending entries over
+ if (oldBlock->mHasNoRows) {
+ mBlock.mColIndices.rawAssign
+ (oldBlock->mColIndices.begin(), oldBlock->mColIndices.end());
+ mBlock.mScalars.rawAssign
+ (oldBlock->mScalars.begin(), oldBlock->mScalars.end());
+ } else {
+ mBlock.mColIndices.rawAssign
+ (mRows.back().mIndicesEnd, oldBlock->mColIndices.end());
+ mBlock.mScalars.rawAssign
+ (mRows.back().mScalarsEnd, oldBlock->mScalars.end());
}
}
void SparseMatrix::growEntryCapacity() {
- MATHICGB_ASSERT(mColIndices.size() == mEntries.size());
- MATHICGB_ASSERT(mColIndices.capacity() == mEntries.capacity());
-
- const size_t initialCapacity = 1 << 16;
- const size_t growthFactor = 2;
- const size_t newCapacity =
- mEntries.empty() ? initialCapacity : mEntries.capacity() * growthFactor;
- reserveEntries(newCapacity);
-
- MATHICGB_ASSERT(mColIndices.size() == mEntries.size());
- MATHICGB_ASSERT(mColIndices.capacity() == newCapacity);
- MATHICGB_ASSERT(mEntries.capacity() == newCapacity);
+ MATHICGB_ASSERT(mBlock.mColIndices.size() == mBlock.mScalars.size());
+ MATHICGB_ASSERT(mBlock.mColIndices.capacity() == mBlock.mScalars.capacity());
+
+ // TODO: handle overflow of multiplication below
+ const size_t minBlockSize = 1 << 20;
+ const size_t minMultipleOfPending = 2;
+ const size_t pendingCount = mBlock.mHasNoRows ?
+ mBlock.mColIndices.size() :
+ std::distance(mRows.back().mIndicesEnd, mBlock.mColIndices.end());
+ const size_t blockSize =
+ std::max(minBlockSize, pendingCount * minMultipleOfPending);
+
+ reserveFreeEntries(blockSize);
+
+ MATHICGB_ASSERT(mBlock.mColIndices.size() == mBlock.mScalars.size());
+ MATHICGB_ASSERT(mBlock.mColIndices.capacity() == blockSize + pendingCount);
+ MATHICGB_ASSERT(mBlock.mScalars.capacity() == blockSize + pendingCount);
+}
+
+std::ostream& operator<<(std::ostream& out, const SparseMatrix& matrix) {
+ matrix.print(out);
+ return out;
}
diff --git a/src/mathicgb/SparseMatrix.hpp b/src/mathicgb/SparseMatrix.hpp
index 4bc1f30..e7d6b59 100755
--- a/src/mathicgb/SparseMatrix.hpp
+++ b/src/mathicgb/SparseMatrix.hpp
@@ -51,9 +51,8 @@ public:
SparseMatrix(ColIndex colCount = 0): mColCount(colCount) {}
SparseMatrix(SparseMatrix&& matrix):
- mColIndices(std::move(matrix.mColIndices)),
- mEntries(std::move(matrix.mEntries)),
mRows(std::move(matrix.mRows)),
+ mBlock(std::move(matrix.mBlock)),
mColCount(matrix.mColCount)
{
}
@@ -64,10 +63,7 @@ public:
return *this;
}
- ~SparseMatrix() {
- delete[] mColIndices.releaseMemory();
- delete[] mEntries.releaseMemory();
- }
+ ~SparseMatrix() {clear();}
void swap(SparseMatrix& matrix);
@@ -76,8 +72,15 @@ public:
RowIndex rowCount() const {return mRows.size();}
ColIndex colCount() const {return mColCount;}
- /// Returns the number of entries in the whole matrix.
- size_t entryCount() const {return mEntries.size();}
+ /// Returns the number of entries in the whole matrix. Is not constant time
+ /// so avoid calling too many times.
+ size_t entryCount() const {
+ size_t count = 0;
+ const Block* block = &mBlock;
+ for (; block != 0; block = block->mPreviousBlock)
+ count += block->mColIndices.size();
+ return count;
+ }
/// Returns the number of entries in the given row.
ColIndex entryCountInRow(RowIndex row) const {
@@ -126,8 +129,11 @@ public:
/// trimThisMany, even if the scalar of that entry is set to zero.
void trimLeadingZeroColumns(ColIndex trimThisMany);
- /// Preallocate space for at least count entries.
- void reserveEntries(size_t count);
+ /// Ensure that there is enough space for at least freeCount additional
+ /// entries without needing to allocate more memory for entries.
+ /// Pending entries that are not fixed into a row yet do not count as
+ /// free for this calculation.
+ void reserveFreeEntries(size_t freeCount);
/// Preallocate space for at least count rows.
void reserveRows(size_t count) {mRows.reserve(count);}
@@ -135,13 +141,14 @@ public:
/// Adds a new row that contains all terms that have been appended
/// since the last time a row was added or the matrix was created.
void rowDone() {
- MATHICGB_ASSERT(mColIndices.size() == entryCount());
+ MATHICGB_ASSERT(mBlock.mColIndices.size() == mBlock.mScalars.size());
Row row;
- row.mIndicesEnd = mColIndices.end();
- row.mScalarsEnd = mEntries.end();
- if (mRows.empty()) {
- row.mIndicesBegin = mColIndices.begin();
- row.mScalarsBegin = mEntries.begin();
+ row.mIndicesEnd = mBlock.mColIndices.end();
+ row.mScalarsEnd = mBlock.mScalars.end();
+ if (mBlock.mHasNoRows) {
+ row.mIndicesBegin = mBlock.mColIndices.begin();
+ row.mScalarsBegin = mBlock.mScalars.begin();
+ mBlock.mHasNoRows = false;
} else {
row.mIndicesBegin = mRows.back().mIndicesEnd;
row.mScalarsBegin = mRows.back().mScalarsEnd;
@@ -152,20 +159,21 @@ public:
/// Appends an entry to the matrix. Will not appear in the matrix
/// until rowDone is called. Do not call other methods that add rows
/// after calling this method until rowDone has been called.
- void appendEntry(ColIndex colIndex, Scalar scalar) {
- MATHICGB_ASSERT(mColIndices.size() == entryCount());
+ inline void appendEntry(ColIndex colIndex, Scalar scalar) {
+ MATHICGB_ASSERT(mBlock.mColIndices.size() == mBlock.mScalars.size());
MATHICGB_ASSERT(colIndex < colCount());
- MATHICGB_ASSERT(mEntries.atCapacity() == mColIndices.atCapacity());
- if (mEntries.atCapacity())
+ MATHICGB_ASSERT(mBlock.mScalars.atCapacity() ==
+ mBlock.mColIndices.atCapacity());
+ if (mBlock.mScalars.atCapacity())
growEntryCapacity();
- MATHICGB_ASSERT(!mEntries.atCapacity());
- MATHICGB_ASSERT(!mColIndices.atCapacity());
+ MATHICGB_ASSERT(!mBlock.mScalars.atCapacity());
+ MATHICGB_ASSERT(!mBlock.mColIndices.atCapacity());
- mColIndices.rawPushBack(colIndex);
- mEntries.rawPushBack(scalar);
+ mBlock.mColIndices.rawPushBack(colIndex);
+ mBlock.mScalars.rawPushBack(scalar);
- MATHICGB_ASSERT(mColIndices.size() == entryCount());
+ MATHICGB_ASSERT(mBlock.mColIndices.size() == mBlock.mScalars.size());
}
void appendRowAndNormalize(const SparseMatrix& matrix, RowIndex row, Scalar modulus);
@@ -186,22 +194,24 @@ public:
return mColCount - 1;
}
- void appendRowWithModulus(std::vector<uint64> const& v, Scalar modulus);
+ void appendRowWithModulus(const std::vector<uint64>& v, Scalar modulus);
- void appendRow(std::vector<uint64> const& v, ColIndex leadCol = 0);
+ void appendRow(const std::vector<uint64>& v, ColIndex leadCol = 0);
- void appendRowWithModulusNormalized(std::vector<uint64> const& v, Scalar modulus);
+ void appendRowWithModulusNormalized(const std::vector<uint64>& v, Scalar modulus);
// Returns true if the row was non-zero. Otherwise the row was not
// appended.
- bool appendRowWithModulusIfNonZero(std::vector<uint64> const& v, Scalar modulus);
+ bool appendRowWithModulusIfNonZero(const std::vector<uint64>& v, Scalar modulus);
/// Replaces all column indices i with colMap[i].
- void applyColumnMap(std::vector<ColIndex> colMap);
+ void applyColumnMap(const std::vector<ColIndex>& colMap);
/// Let poly be the dot product of colMonomials and the given row.
- void rowToPolynomial
- (RowIndex row, std::vector<monomial> colMonomials, Poly& poly);
+ void rowToPolynomial(
+ RowIndex row,
+ const std::vector<monomial>& colMonomials,
+ Poly& poly);
/// Reorders the rows so that the index of the leading column in
/// each row is weakly increasing going from top to bottom. Quite
@@ -261,7 +271,7 @@ private:
SparseMatrix(const SparseMatrix&); // not available
void operator=(const SparseMatrix&); // not available
- void growEntryCapacity();
+ NO_INLINE void growEntryCapacity();
/// Contains information about a row in the matrix.
struct Row {
@@ -277,19 +287,51 @@ private:
return static_cast<ColIndex>(std::distance(mIndicesBegin, mIndicesEnd));
}
};
-
- /// We need a RawVector here to tie the checks for the need to reallocate
- /// together between mColIndices and mEntries. We only need to check
- /// the capacity once, which, believe it or not, is a significant performance
- /// win. Not least because it decreases the amount of code and therefore
- /// causes different compiler inlining decisions.
- RawVector<Scalar> mEntries;
- RawVector<ColIndex> mColIndices;
std::vector<Row> mRows;
+ /// Memory is allocated a block at a time. This avoids the need for copying
+ /// that a std::vector normally does on reallocation. Believe it or not,
+ /// copying sparse matrix memory due to reallocation was accounting for 5%
+ /// of the running time before this change.
+ struct Block {
+ Block(): mPreviousBlock(0), mHasNoRows(true) {}
+ Block(Block&& block):
+ mColIndices(std::move(block.mColIndices)),
+ mScalars(std::move(block.mScalars)),
+ mPreviousBlock(block.mPreviousBlock),
+ mHasNoRows(block.mHasNoRows)
+ {
+ block.mPreviousBlock = 0;
+ block.mHasNoRows = true;
+ }
+
+ void swap(Block& block) {
+ std::swap(mColIndices, block.mColIndices);
+ std::swap(mScalars, block.mScalars);
+ std::swap(mPreviousBlock, block.mPreviousBlock);
+ std::swap(mHasNoRows, block.mHasNoRows);
+ }
+
+ /// We need a RawVector here to tie the checks for the need to reallocate
+ /// together between mColIndices and mEntries. We only need to check
+ /// the capacity once, which, believe it or not, is a significant performance
+ /// win. Not least because it decreases the amount of code and therefore
+ /// causes better compiler inlining decisions.
+ RawVector<ColIndex> mColIndices;
+ RawVector<Scalar> mScalars;
+ Block* mPreviousBlock; /// is null if there are no previous blocks
+ bool mHasNoRows; /// true if no rows have been made from this block yet
+ };
+ Block mBlock;
+
ColIndex mColCount;
};
+inline void swap(SparseMatrix& a, SparseMatrix& b) {
+ a.swap(b);
+}
+
std::ostream& operator<<(std::ostream& out, const SparseMatrix& matrix);
+
#endif
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/mathicgb.git
More information about the debian-science-commits
mailing list