[mathicgb] 106/393: SparseMatrix cannot efficiently keep track of its number of columns during concurrent matrix construction with a shared hash table, so it no longer does to prepare for that.

Doug Torrance dtorrance-guest at moszumanska.debian.org
Fri Apr 3 15:58:41 UTC 2015


This is an automated email from the git hooks/post-receive script.

dtorrance-guest pushed a commit to branch upstream
in repository mathicgb.

commit 37851e8fd28aa2bc3be3a807cc386c69adff63d1
Author: Bjarke Hammersholt Roune <bjarkehr.code at gmail.com>
Date:   Tue Nov 6 17:32:32 2012 +0100

    SparseMatrix cannot efficiently keep track of its number of columns during concurrent matrix construction with a shared hash table, so it no longer does to prepare for that.
---
 src/mathicgb/Atomic.hpp            | 460 ++++++++++++++++++-------------------
 src/mathicgb/F4MatrixReducer.cpp   |  40 ++--
 src/mathicgb/QuadMatrix.cpp        |  30 ++-
 src/mathicgb/QuadMatrixBuilder.cpp |  44 ++--
 src/mathicgb/QuadMatrixBuilder.hpp |  23 +-
 src/mathicgb/SparseMatrix.cpp      |  45 ++--
 src/mathicgb/SparseMatrix.hpp      |  23 +-
 src/test/F4MatrixBuilder.cpp       |   4 +-
 src/test/SparseMatrix.cpp          |  12 +-
 9 files changed, 326 insertions(+), 355 deletions(-)

diff --git a/src/mathicgb/Atomic.hpp b/src/mathicgb/Atomic.hpp
index 0aad203..1d4332c 100755
--- a/src/mathicgb/Atomic.hpp
+++ b/src/mathicgb/Atomic.hpp
@@ -1,11 +1,11 @@
 #ifndef MATHICGB_ATOMIC_GUARD
 #define MATHICGB_ATOMIC_GUARD
 
-// We need this include for std::memory_order even if we are not
+// We need this include for std::memory_order even if we are not
 // using std::atomic.
 #include <atomic>
 
-#if defined(_MSC_VER) && defined(MATHICGB_USE_CUSTOM_ATOMIC_X86_X64)
+#if defined(_MSC_VER) && defined(MATHICGB_USE_CUSTOM_ATOMIC_X86_X64)
 
 /// Tells the compiler (not the CPU) to not reorder reads across this line.
 #define MATHICGB_COMPILER_READ_MEMORY_BARRIER _ReadBarrier()
@@ -36,28 +36,28 @@
 #include <Windows.h>
 // Windows.h defines macroes max and min that mess up things like std::max and
 // std::numeric_limits<T>::max. So we need to undefine those macroes.
-#undef max
+#undef max
 #undef min
 namespace AtomicInternalMsvc {
   template<class T, size_t size> struct SeqCst {};
-#ifdef MATHICGB_USE_CUSTOM_ATOMIC_4BYTE
+#ifdef MATHICGB_USE_CUSTOM_ATOMIC_4BYTE
   template<class T> struct SeqCst<T, 4> {
     static T load(const T& ref) {
       return (T)_InterlockedOr((volatile LONG*)&ref, 0);
     }
     static void store(const T value, T& ref) {
-      _InterlockedExchange((volatile LONG*)&ref, (LONG)value);
-    }
-  };
-#endif
-#ifdef MATHICGB_USE_CUSTOM_ATOMIC_8BYTE
+      _InterlockedExchange((volatile LONG*)&ref, (LONG)value);
+    }
+  };
+#endif
+#ifdef MATHICGB_USE_CUSTOM_ATOMIC_8BYTE
   template<class T> struct SeqCst<T, 8> {
     static T load(const T& ref) {
       return (T)_InterlockedOr64((volatile _LONGLONG*)&ref, 0);
     }
     static void store(const T value, T& ref) {
-      _InterlockedExchange64((volatile _LONGLONG*)&ref, (_LONGLONG)value);
-    }
+      _InterlockedExchange64((volatile _LONGLONG*)&ref, (_LONGLONG)value);
+    }
   };
 #endif
   template<class T> struct SeqCstSelect : public SeqCst<T, sizeof(T)> {};
@@ -122,232 +122,232 @@ namespace AtomicInternal {
     T mValue;
   };
 
-  template<class T, size_t size>
-  struct ChooseAtomic {
-    typedef FakeAtomic<T> type;
-  };
+  template<class T, size_t size>
+  struct ChooseAtomic {
+    typedef FakeAtomic<T> type;
+  };
 
 #else
-  /// Class for deciding which implementation of atomic to use. The default is
-  /// to use std::atomic which is a fine choice if std::atomic is implemented
-  /// in a reasonable way by the standard library implementation you are using.
-  template<class T, size_t size>
-  struct ChooseAtomic {
-    typedef std::atomic<T> type;
-  };
+  /// Class for deciding which implementation of atomic to use. The default is
+  /// to use std::atomic which is a fine choice if std::atomic is implemented
+  /// in a reasonable way by the standard library implementation you are using.
+  template<class T, size_t size>
+  struct ChooseAtomic {
+    typedef std::atomic<T> type;
+  };
 #endif
-}
-
-#ifdef MATHICGB_USE_CUSTOM_ATOMIC_X86_X64
-namespace AtomicInternal {
-  /// Custom Atomic class for x86 and x64. Uses special compiler instructions
-  /// for barriers. Only instantiate this for sizes where aligned reads and
-  /// writes are guaranteed to be atomic - this class only takes care of the
-  /// ordering constraints using CPU and compiler fences. Since the directives
-  /// to achieve this are coming from the compiler it is very strange that
-  /// any compiler ships with a std::atomic that is worse than this - but
-  /// that is very much the case.
-  ///
-  /// There are 5 kinds of reorderings that we are concerned with here. Let
-  /// S,S' be stores and let L,L' be stores. Note that these short-hands may
-  /// be idiosyncratic - feel free to find some standard terminology from
-  /// some prominent source and fix this to reflect that.
-  ///
-  ///   SS: Store-after-store: Reorder S,S' to S',S
-  ///   SL: Store-after-load: Reorder S,L to L,S
-  ///   LS: Load-after-store: Reorder L,S to S,L
-  ///   LL: Load-after-load: Reorder L,L' to L',L
-  ///   DLL: Dependent-load-after-load: As LL but L' depends on L. For example
-  ///     reordering the load of p->a to before the load of p is a DLL.
-  ///
-  /// The DEC Alpha processor will perform all of these reorderings in the
-  /// absense of memory barriers telling it not to do that, including DLL.
-  /// DLL can happen on DEC Alpha if p->a is cached locally while p is not.
-  /// Then p will be loaded from memory while p->a is loaded from the cache,
-  /// which is functionally identical to loading p->a before p since we may
-  /// see a value of p->a that was stored before the value of p. This happens
-  /// even if the processor that stored p did a full memory barrier between
-  /// storing p->a and storing p.
-  ///
-  /// Compilers will also perform all of these reorderings to optimize the
-  /// code - even including DLL. DLL happens if the compiler guesses what
-  /// the value of p is, loads p->a and then checks that the guess for p
-  /// was correct. This directly causes p->a to be actually loaded before p.
-  /// These kinds of optimizations turn up in profile-driven optimization,
-  /// but it is always allowed unless we tell the compiler not to do it.
-  ///
-  /// You can check this out here:
-  ///   http://en.wikipedia.org/wiki/Memory_ordering
-  ///
-  /// On x86 and x64 only SL is doe by the CPU, so we need a CPU barrier to
-  /// prevent that and nothing else. The compiler is free to perform all of
-  /// these reorderings, so we need lots of compiler optimization barriers
-  /// to deal with all of these cases.
-  ///
-  /// Some of the quotes below are from
-  ///
-  ///   http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1525.htm
-  template<class T>
-  class CustomAtomicX86X64 {
-  public:
-    CustomAtomicX86X64(): mValue() {}
-    CustomAtomicX86X64(T value): mValue(value) {}
-
-    MATHICGB_INLINE
-    T load(const std::memory_order order) const {
-      switch (order) {
-      case std::memory_order_relaxed:
-        // The only constraint here is that if you read *p, then you will never
-        // after that read a value of *p that was stored before the value
-        // you just read, where "before" is in terms of either the same thread
-        // that did the writing or external synchronization of another thread
-        // with this thread. This is automaticaly guaranteed on this platform
-        // and the compiler cannot break this guarantee.
-        return mValue;
-
-      case std::memory_order_consume: {
-        // Loads in this thread that depend on the loaded value must not be
-        // reordered to before this load. So no DLL reorderings past this
-        // load from after to before (up). So we need a read barrier AFTER the
-        // load. It is a compiler only barrier since the CPU does not do DLL
-        // reorderings. 
-        const auto value = mValue;
-        MATHICGB_COMPILER_READ_MEMORY_BARRIER;
-        return value;
-      }
-
-      case std::memory_order_acquire: {
-        // Loads in this thread must not be reordered to before this load.
-        // So no LL reorderings past this load from after to before (up).
-        // So we need a barrier AFTER the load. It is a compiler only barrier
-        // since the CPU does not do LL reorderings.
-        const auto value = mValue;
-        MATHICGB_COMPILER_READ_MEMORY_BARRIER;
-        return mValue;
-      }
-
-      case std::memory_order_seq_cst:
-        // There must be some global order in which all sequentially consistent
-        // atomic operations are considered to have happened in. This is automatic
-        // on x64, ARM, SPARC and x64 too for reads (but not writes) - see:
-        //   http://www.stdthread.co.uk/forum/index.php?topic=72.0
-        return MATHICGB_SEQ_CST_LOAD(mValue);
-
-      case std::memory_order_release: // not available for load
-      case std::memory_order_acq_rel: // not available for load
-      default:
-        MATHICGB_UNREACHABLE;
-      }
-    }
-
-    MATHICGB_INLINE
-    void store(const T value, const std::memory_order order) {
-      switch (order) {
-      case std::memory_order_relaxed:
-        // No ordering constraints here other than atomicity and as noted
-        // for relaxed load so we can just store directly.
-        mValue = value;
-        break;
-
-      case std::memory_order_release:
-        // Stores in this thread must not be reordered to after this store.
-        // So no SS reorderings past this load from before to after (down).
-        // So we need a barrier BEFORE the load. It is a compiler only barrier
-        // since the CPU does not do SS reorderings.
-        MATHICGB_COMPILER_WRITE_MEMORY_BARRIER;
-        mValue = value;
-        break;
-
-      case std::memory_order_acq_rel:
-        // Combine the guarantees for std::memory_order_acquire and
-        // std::memory_order_release. So no loads moved up past here (SL) and
-        // no stores moved down past here (LL). We need a compiler barrier
-        // BEFORE the load to avoid LL and a CPU barrier (implies also a
-        // compiler barrier AFTER the load to avoid SL, since the CPU can in
-        // fact do SL reordering.
-        MATHICGB_COMPILER_WRITE_MEMORY_BARRIER;
-        mValue = value;
-        MATHICGB_CPU_READ_WRITE_MEMORY_BARRIER;
-        break;
-
-      case std::memory_order_seq_cst:
-        // All operations happen in a globally consistent linear order. I am
-        // sure if this can be achieved with barriers but I know that it can be
-        // achieved with locked instructions, so I am using that.
-        MATHICGB_SEQ_CST_STORE(value, mValue);
-        break;
-
-      case std::memory_order_consume: // not available for store
-      case std::memory_order_acquire: // not available for store
-      default:
-        MATHICGB_UNREACHABLE;
-      }
-    }
-
-  private:
-    T mValue;
-  };
-
-#ifdef MATHICGB_USE_CUSTOM_ATOMIC_4BYTE
-  template<class T>
-  struct ChooseAtomic<T, 4> {
-    typedef CustomAtomicX86X64<T> type;
-  };
-#endif
-
-#ifdef MATHICGB_USE_CUSTOM_ATOMIC_8BYTE
-  template<class T>
-  struct ChooseAtomic<T, 8> {
-    typedef CustomAtomicX86X64<T> type;
-  };
-#endif
-}
-#endif
-
-/// This class is equivalent to std::atomic<T*>. Some functions from the
-/// interface of std::atomic are missing - add them as necessary. Do not add
-/// operator= and operator T() --- it is better to make the code explicit
-/// about when and how loading and storing of atomic variables occurs.
-///
-/// The purpose of the class is that it performs far better than
-/// std::atomic for some implementations. For example the std::atomic in MSVC
-/// 2012 performs a compare-and-swap operation on a load even with the
-/// paramter std::memory_order_relaxed.
-///
-/// We force all the functions to be inline because they can contain switches
-/// on the value of std::memory_order. This will usually be a compile-time
-/// constant parameter so that after inlining the switch will disappear. Yet
-/// the code size of the switch may make some compilers avoid the inline.
-template<class T>
-class Atomic {
-public:
-  Atomic(): mValue() {}
-  Atomic(T value): mValue(value) {}
-
+}
+
+#ifdef MATHICGB_USE_CUSTOM_ATOMIC_X86_X64
+namespace AtomicInternal {
+  /// Custom Atomic class for x86 and x64. Uses special compiler instructions
+  /// for barriers. Only instantiate this for sizes where aligned reads and
+  /// writes are guaranteed to be atomic - this class only takes care of the
+  /// ordering constraints using CPU and compiler fences. Since the directives
+  /// to achieve this are coming from the compiler it is very strange that
+  /// any compiler ships with a std::atomic that is worse than this - but
+  /// that is very much the case.
+  ///
+  /// There are 5 kinds of reorderings that we are concerned with here. Let
+  /// S,S' be stores and let L,L' be stores. Note that these short-hands may
+  /// be idiosyncratic - feel free to find some standard terminology from
+  /// some prominent source and fix this to reflect that.
+  ///
+  ///   SS: Store-after-store: Reorder S,S' to S',S
+  ///   SL: Store-after-load: Reorder S,L to L,S
+  ///   LS: Load-after-store: Reorder L,S to S,L
+  ///   LL: Load-after-load: Reorder L,L' to L',L
+  ///   DLL: Dependent-load-after-load: As LL but L' depends on L. For example
+  ///     reordering the load of p->a to before the load of p is a DLL.
+  ///
+  /// The DEC Alpha processor will perform all of these reorderings in the
+  /// absense of memory barriers telling it not to do that, including DLL.
+  /// DLL can happen on DEC Alpha if p->a is cached locally while p is not.
+  /// Then p will be loaded from memory while p->a is loaded from the cache,
+  /// which is functionally identical to loading p->a before p since we may
+  /// see a value of p->a that was stored before the value of p. This happens
+  /// even if the processor that stored p did a full memory barrier between
+  /// storing p->a and storing p.
+  ///
+  /// Compilers will also perform all of these reorderings to optimize the
+  /// code - even including DLL. DLL happens if the compiler guesses what
+  /// the value of p is, loads p->a and then checks that the guess for p
+  /// was correct. This directly causes p->a to be actually loaded before p.
+  /// These kinds of optimizations turn up in profile-driven optimization,
+  /// but it is always allowed unless we tell the compiler not to do it.
+  ///
+  /// You can check this out here:
+  ///   http://en.wikipedia.org/wiki/Memory_ordering
+  ///
+  /// On x86 and x64 only SL is doe by the CPU, so we need a CPU barrier to
+  /// prevent that and nothing else. The compiler is free to perform all of
+  /// these reorderings, so we need lots of compiler optimization barriers
+  /// to deal with all of these cases.
+  ///
+  /// Some of the quotes below are from
+  ///
+  ///   http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1525.htm
+  template<class T>
+  class CustomAtomicX86X64 {
+  public:
+    CustomAtomicX86X64(): mValue() {}
+    CustomAtomicX86X64(T value): mValue(value) {}
+
+    MATHICGB_INLINE
+    T load(const std::memory_order order) const {
+      switch (order) {
+      case std::memory_order_relaxed:
+        // The only constraint here is that if you read *p, then you will never
+        // after that read a value of *p that was stored before the value
+        // you just read, where "before" is in terms of either the same thread
+        // that did the writing or external synchronization of another thread
+        // with this thread. This is automaticaly guaranteed on this platform
+        // and the compiler cannot break this guarantee.
+        return mValue;
+
+      case std::memory_order_consume: {
+        // Loads in this thread that depend on the loaded value must not be
+        // reordered to before this load. So no DLL reorderings past this
+        // load from after to before (up). So we need a read barrier AFTER the
+        // load. It is a compiler only barrier since the CPU does not do DLL
+        // reorderings. 
+        const auto value = mValue;
+        MATHICGB_COMPILER_READ_MEMORY_BARRIER;
+        return value;
+      }
+
+      case std::memory_order_acquire: {
+        // Loads in this thread must not be reordered to before this load.
+        // So no LL reorderings past this load from after to before (up).
+        // So we need a barrier AFTER the load. It is a compiler only barrier
+        // since the CPU does not do LL reorderings.
+        const auto value = mValue;
+        MATHICGB_COMPILER_READ_MEMORY_BARRIER;
+        return mValue;
+      }
+
+      case std::memory_order_seq_cst:
+        // There must be some global order in which all sequentially consistent
+        // atomic operations are considered to have happened in. This is automatic
+        // on x64, ARM, SPARC and x64 too for reads (but not writes) - see:
+        //   http://www.stdthread.co.uk/forum/index.php?topic=72.0
+        return MATHICGB_SEQ_CST_LOAD(mValue);
+
+      case std::memory_order_release: // not available for load
+      case std::memory_order_acq_rel: // not available for load
+      default:
+        MATHICGB_UNREACHABLE;
+      }
+    }
+
+    MATHICGB_INLINE
+    void store(const T value, const std::memory_order order) {
+      switch (order) {
+      case std::memory_order_relaxed:
+        // No ordering constraints here other than atomicity and as noted
+        // for relaxed load so we can just store directly.
+        mValue = value;
+        break;
+
+      case std::memory_order_release:
+        // Stores in this thread must not be reordered to after this store.
+        // So no SS reorderings past this load from before to after (down).
+        // So we need a barrier BEFORE the load. It is a compiler only barrier
+        // since the CPU does not do SS reorderings.
+        MATHICGB_COMPILER_WRITE_MEMORY_BARRIER;
+        mValue = value;
+        break;
+
+      case std::memory_order_acq_rel:
+        // Combine the guarantees for std::memory_order_acquire and
+        // std::memory_order_release. So no loads moved up past here (SL) and
+        // no stores moved down past here (LL). We need a compiler barrier
+        // BEFORE the load to avoid LL and a CPU barrier (implies also a
+        // compiler barrier AFTER the load to avoid SL, since the CPU can in
+        // fact do SL reordering.
+        MATHICGB_COMPILER_WRITE_MEMORY_BARRIER;
+        mValue = value;
+        MATHICGB_CPU_READ_WRITE_MEMORY_BARRIER;
+        break;
+
+      case std::memory_order_seq_cst:
+        // All operations happen in a globally consistent linear order. I am
+        // sure if this can be achieved with barriers but I know that it can be
+        // achieved with locked instructions, so I am using that.
+        MATHICGB_SEQ_CST_STORE(value, mValue);
+        break;
+
+      case std::memory_order_consume: // not available for store
+      case std::memory_order_acquire: // not available for store
+      default:
+        MATHICGB_UNREACHABLE;
+      }
+    }
+
+  private:
+    T mValue;
+  };
+
+#ifdef MATHICGB_USE_CUSTOM_ATOMIC_4BYTE
+  template<class T>
+  struct ChooseAtomic<T, 4> {
+    typedef CustomAtomicX86X64<T> type;
+  };
+#endif
+
+#ifdef MATHICGB_USE_CUSTOM_ATOMIC_8BYTE
+  template<class T>
+  struct ChooseAtomic<T, 8> {
+    typedef CustomAtomicX86X64<T> type;
+  };
+#endif
+}
+#endif
+
+/// This class is equivalent to std::atomic<T*>. Some functions from the
+/// interface of std::atomic are missing - add them as necessary. Do not add
+/// operator= and operator T() --- it is better to make the code explicit
+/// about when and how loading and storing of atomic variables occurs.
+///
+/// The purpose of the class is that it performs far better than
+/// std::atomic for some implementations. For example the std::atomic in MSVC
+/// 2012 performs a compare-and-swap operation on a load even with the
+/// paramter std::memory_order_relaxed.
+///
+/// We force all the functions to be inline because they can contain switches
+/// on the value of std::memory_order. This will usually be a compile-time
+/// constant parameter so that after inlining the switch will disappear. Yet
+/// the code size of the switch may make some compilers avoid the inline.
+template<class T>
+class Atomic {
+public:
+  Atomic(): mValue() {}
+  Atomic(T value): mValue(value) {}
+
   MATHICGB_INLINE
   T load(const std::memory_order order = std::memory_order_seq_cst) const {
-    MATHICGB_ASSERT(debugAligned());
-    return mValue.load(order);
-  }
-
+    MATHICGB_ASSERT(debugAligned());
+    return mValue.load(order);
+  }
+
   MATHICGB_INLINE
   void store(
     const T value,
     const std::memory_order order = std::memory_order_seq_cst
   ) {
-    MATHICGB_ASSERT(debugAligned());
-    mValue.store(value, order);
-  }
-
-private:
-  Atomic(const Atomic<T>&); // not available
-  void operator=(const Atomic<T>&); // not available
-
-  bool debugAligned() const {
-    return reinterpret_cast<size_t>(&mValue) % sizeof(void*) == 0;
-  }
-
-  typename AtomicInternal::ChooseAtomic<T, sizeof(T)>::type mValue;
-};
-
-#endif
+    MATHICGB_ASSERT(debugAligned());
+    mValue.store(value, order);
+  }
+
+private:
+  Atomic(const Atomic<T>&); // not available
+  void operator=(const Atomic<T>&); // not available
+
+  bool debugAligned() const {
+    return reinterpret_cast<size_t>(&mValue) % sizeof(void*) == 0;
+  }
+
+  typename AtomicInternal::ChooseAtomic<T, sizeof(T)>::type mValue;
+};
+
+#endif
diff --git a/src/mathicgb/F4MatrixReducer.cpp b/src/mathicgb/F4MatrixReducer.cpp
index aac24b4..574124d 100755
--- a/src/mathicgb/F4MatrixReducer.cpp
+++ b/src/mathicgb/F4MatrixReducer.cpp
@@ -75,7 +75,6 @@ namespace {
 
     void addRow(const SparseMatrix& matrix, SparseMatrix::RowIndex row) {
       MATHICGB_ASSERT(row < matrix.rowCount());
-      MATHICGB_ASSERT(matrix.colCount() == colCount());
       const auto end = matrix.rowEnd(row);
       for (auto it = matrix.rowBegin(row); it != end; ++it) {
         MATHICGB_ASSERT(it.index() < colCount());
@@ -141,7 +140,6 @@ namespace {
       const SparseMatrix& matrix,
       const SparseMatrix::Scalar modulus
     ) {
-      MATHICGB_ASSERT(pivotRow < matrix.rowCount());
       MATHICGB_ASSERT(matrix.rowBegin(pivotRow).scalar() == 1); // unitary
       MATHICGB_ASSERT(modulus > 1);
 
@@ -169,11 +167,13 @@ namespace {
     const SparseMatrix& reduceByLeft = qm.topLeft;
     const SparseMatrix& reduceByRight = qm.topRight;
 
-    MATHICGB_ASSERT(reduceByLeft.colCount() == reduceByLeft.rowCount());
-    const auto pivotCount = reduceByLeft.colCount();
+    const auto leftColCount =
+      static_cast<SparseMatrix::ColIndex>(qm.leftColumnMonomials.size());
+    const auto rightColCount =
+      static_cast<SparseMatrix::ColIndex>(qm.rightColumnMonomials.size());
+    MATHICGB_ASSERT(leftColCount == reduceByLeft.rowCount());
+    const auto pivotCount = leftColCount;
     const auto rowCount = toReduceLeft.rowCount();
-    const auto colCountLeft = toReduceLeft.colCount();
-    const auto colCountRight = toReduceRight.colCount();
 
     // ** pre-calculate what rows are pivots for what columns.
 
@@ -192,7 +192,7 @@ namespace {
       rowThatReducesCol[col] = pivot;
     }
 
-    SparseMatrix reduced(colCountRight, qm.topRight.memoryQuantum());
+    SparseMatrix reduced(qm.topRight.memoryQuantum());
 
 #ifdef _OPENMP
     std::vector<DenseRow<uint64> > denseRowPerThread(threadCount);
@@ -200,7 +200,7 @@ namespace {
     DenseRow<uint64> denseRow;
 #endif
 
-    SparseMatrix tmp(pivotCount, qm.topRight.memoryQuantum());
+    SparseMatrix tmp(qm.topRight.memoryQuantum());
 
     std::vector<SparseMatrix::RowIndex> rowOrder(rowCount);
 
@@ -211,9 +211,9 @@ namespace {
 #ifdef _OPENMP
       auto& denseRow = denseRowPerThread[omp_get_thread_num()];
 #endif
-      denseRow.clear(colCountLeft);
+      denseRow.clear(leftColCount);
       denseRow.addRow(toReduceLeft, row);
-      MATHICGB_ASSERT(colCountLeft == pivotCount);
+      MATHICGB_ASSERT(leftColCount == pivotCount);
 
       for (size_t pivot = 0; pivot < pivotCount; ++pivot) {
         if (denseRow[pivot] == 0)
@@ -253,7 +253,7 @@ namespace {
 #endif
       size_t row = rowOrder[i];
 
-      denseRow.clear(colCountRight);
+      denseRow.clear(rightColCount);
       denseRow.addRow(toReduceRight, row);
       auto it = tmp.rowBegin(i);
       const auto end = tmp.rowEnd(i);
@@ -266,7 +266,7 @@ namespace {
 #pragma omp critical
       {
         bool zero = true;
-	    for (SparseMatrix::ColIndex col = 0; col < colCountRight; ++col) {
+	    for (SparseMatrix::ColIndex col = 0; col < rightColCount; ++col) {
           const auto entry =
             static_cast<SparseMatrix::Scalar>(denseRow[col] % modulus);
           if (entry != 0) {
@@ -281,12 +281,15 @@ namespace {
     return std::move(reduced);
   }
 
-  void reduceToEchelonForm
-  (SparseMatrix& toReduce, SparseMatrix::Scalar modulus, int threadCount) {
+  void reduceToEchelonForm(
+    SparseMatrix& toReduce,
+    const SparseMatrix::ColIndex colCount,
+    const SparseMatrix::Scalar modulus,
+    int threadCount
+  ) {
     // making no assumptions on toReduce except no zero rows
 
     SparseMatrix::RowIndex const rowCount = toReduce.rowCount();
-    SparseMatrix::ColIndex const colCount = toReduce.colCount();
 
     // dense representation 
     std::vector<DenseRow<uint64> > dense(rowCount);
@@ -303,7 +306,7 @@ namespace {
     std::vector<SparseMatrix::ColIndex> leadCols(rowCount);
 
     // pivot rows get copied here before being used to reduce the matrix.
-    SparseMatrix reduced(colCount, toReduce.memoryQuantum());
+    SparseMatrix reduced(toReduce.memoryQuantum());
 
     // (col,row) in nextReducers, then use row as a pivot in column col
     // for the next iteration.
@@ -373,7 +376,6 @@ namespace {
       reduced.clear(colCount);
       std::sort(nextReducers.begin(), nextReducers.end());
       for (size_t i = 0; i < nextReducers.size(); ++i) {
-        MATHICGB_ASSERT(reduced.colCount() == colCount);
         size_t const row = nextReducers[i].second;
 
         MATHICGB_ASSERT(static_cast<bool>
@@ -409,8 +411,10 @@ SparseMatrix F4MatrixReducer::reduce(const QuadMatrix& matrix) {
   if (tracingLevel >= 3)
     matrix.printSizes(std::cerr);
 
+  const auto rightColCount =
+    static_cast<SparseMatrix::ColIndex>(matrix.rightColumnMonomials.size());
   SparseMatrix newPivots(::reduce(matrix, mModulus, mThreadCount));
-  ::reduceToEchelonForm(newPivots, mModulus, mThreadCount);
+  ::reduceToEchelonForm(newPivots, rightColCount, mModulus, mThreadCount);
   return std::move(newPivots);
 }
 
diff --git a/src/mathicgb/QuadMatrix.cpp b/src/mathicgb/QuadMatrix.cpp
index 68031d0..bd57bb5 100755
--- a/src/mathicgb/QuadMatrix.cpp
+++ b/src/mathicgb/QuadMatrix.cpp
@@ -9,13 +9,14 @@ bool QuadMatrix::debugAssertValid() const {
 #ifndef MATHICGB_DEBUG
   return true;
 #else
-  MATHICGB_ASSERT(topLeft.colCount() == bottomLeft.colCount());
+  MATHICGB_ASSERT(topLeft.computeColCount() <= leftColumnMonomials.size());
+  MATHICGB_ASSERT(bottomLeft.computeColCount() <= leftColumnMonomials.size());
   MATHICGB_ASSERT(topLeft.rowCount() == topRight.rowCount());
-  MATHICGB_ASSERT(topLeft.colCount() == leftColumnMonomials.size());
 
-  MATHICGB_ASSERT(bottomRight.colCount() == topRight.colCount());
+  MATHICGB_ASSERT(topRight.computeColCount() <= rightColumnMonomials.size());
+  MATHICGB_ASSERT(bottomRight.computeColCount() <=
+    rightColumnMonomials.size());
   MATHICGB_ASSERT(bottomRight.rowCount() == bottomLeft.rowCount());
-  MATHICGB_ASSERT(bottomRight.colCount() == rightColumnMonomials.size());   
   return true;
 #endif
 }
@@ -33,13 +34,15 @@ void QuadMatrix::print(std::ostream& out) const {
 
   // column monomials
   out << "Left columns:";
-  for (ColIndex leftCol = 0; leftCol < topLeft.colCount(); ++leftCol) {
+  const auto leftColCount = leftColumnMonomials.size();
+  for (ColIndex leftCol = 0; leftCol < leftColCount; ++leftCol) {
     out << ' ';
     ring->monomialDisplay(out, leftColumnMonomials[leftCol], false, true);
   }
 
   out << "\nRight columns:";
-  for (ColIndex rightCol = 0; rightCol < topRight.colCount(); ++rightCol) {
+  const auto rightColCount = rightColumnMonomials.size();
+  for (ColIndex rightCol = 0; rightCol < rightColCount; ++rightCol) {
     out << ' ';
     ring->monomialDisplay(out, rightColumnMonomials[rightCol], false, true);
   }
@@ -84,8 +87,8 @@ void QuadMatrix::printSizes(std::ostream& out) const {
   const char* const line = "----------";
 
   pr[0] << '\n';
-  pr[1] << ColPr::commafy(topLeft.colCount()) << "  \n";
-  pr[2] << ColPr::commafy(topRight.colCount()) << "  \n";
+  pr[1] << ColPr::commafy(leftColumnMonomials.size()) << "  \n";
+  pr[2] << ColPr::commafy(rightColumnMonomials.size()) << "  \n";
 
   pr[0] << "/\n";
   pr[1] << line << "|\n";
@@ -163,6 +166,9 @@ QuadMatrix QuadMatrix::toCanonical() const {
     const SparseMatrix& mMatrix;
   };
 
+  const auto leftColCount = leftColumnMonomials.size();
+  const auto rightColCount = rightColumnMonomials.size();
+
   // todo: eliminate left/right code duplication here
   QuadMatrix matrix;
   { // left side
@@ -174,8 +180,8 @@ QuadMatrix QuadMatrix::toCanonical() const {
       std::sort(rows.begin(), rows.end(), comparer);
     }
 
-    matrix.topLeft.clear(topLeft.colCount());
-    matrix.topRight.clear(topRight.colCount());
+    matrix.topLeft.clear();
+    matrix.topRight.clear();
     for (size_t i = 0; i < rows.size(); ++i) {
       matrix.topLeft.appendRow(topLeft, rows[i]);
       matrix.topRight.appendRow(topRight, rows[i]);
@@ -190,8 +196,8 @@ QuadMatrix QuadMatrix::toCanonical() const {
       std::sort(rows.begin(), rows.end(), comparer);
     }
 
-    matrix.bottomLeft.clear(bottomLeft.colCount());
-    matrix.bottomRight.clear(bottomRight.colCount());
+    matrix.bottomLeft.clear();
+    matrix.bottomRight.clear();
     for (size_t i = 0; i < rows.size(); ++i) {
       matrix.bottomLeft.appendRow(bottomLeft, rows[i]);
       matrix.bottomRight.appendRow(bottomRight, rows[i]);
diff --git a/src/mathicgb/QuadMatrixBuilder.cpp b/src/mathicgb/QuadMatrixBuilder.cpp
index a1ae117..5401221 100755
--- a/src/mathicgb/QuadMatrixBuilder.cpp
+++ b/src/mathicgb/QuadMatrixBuilder.cpp
@@ -11,10 +11,10 @@ QuadMatrixBuilder::QuadMatrixBuilder(
   const size_t memoryQuantum
 ):
   mMonomialToCol(ring),
-  mTopLeft(0, memoryQuantum),
-  mTopRight(0, memoryQuantum),
-  mBottomLeft(0, memoryQuantum),
-  mBottomRight(0, memoryQuantum)
+  mTopLeft(memoryQuantum),
+  mTopRight(memoryQuantum),
+  mBottomLeft(memoryQuantum),
+  mBottomRight(memoryQuantum)
 {}
 
 void QuadMatrixBuilder::takeRowsFrom(QuadMatrix&& matrix) {
@@ -25,14 +25,12 @@ void QuadMatrixBuilder::takeRowsFrom(QuadMatrix&& matrix) {
     !matrix.rightColumnMonomials.empty()
   ) {
     // check left column monomials are the same
-    MATHICGB_ASSERT(matrix.leftColumnMonomials.size() <= leftColCount());
     for (ColIndex col = 0; col < matrix.leftColumnMonomials.size(); ++col) {
       MATHICGB_ASSERT(ring().monomialEQ
         (matrix.leftColumnMonomials[col], monomialOfLeftCol(col)));
     }
 
     // check right column monomials are the same
-    MATHICGB_ASSERT(matrix.rightColumnMonomials.size() <= rightColCount());
     for (ColIndex col = 0; col < matrix.rightColumnMonomials.size(); ++col) {
       MATHICGB_ASSERT(ring().monomialEQ
         (matrix.rightColumnMonomials[col], monomialOfRightCol(col)));
@@ -67,11 +65,10 @@ namespace {
    const PolyRing& ring,
    const bool left)
   {
-    MATHICGB_ASSERT(top.colCount() == bottom.colCount());
-    MATHICGB_ASSERT(toMonomial.size() == bottom.colCount());
     MATHICGB_ASSERT(typename ToCol::Reader(toCol).find(mono) == 0);
 
-    const QuadMatrixBuilder::ColIndex colCount = top.colCount();
+    const auto colCount =
+      static_cast<QuadMatrixBuilder::ColIndex>(toMonomial.size());
     if (colCount == std::numeric_limits<QuadMatrixBuilder::ColIndex>::max())
       throw std::overflow_error("Too many columns in QuadMatrixBuilder");
 
@@ -89,10 +86,6 @@ namespace {
     }
     toMonomial.back() = copied;
 
-    top.appendColumn();
-    MATHICGB_ASSERT(top.colCount() == colCount + 1);
-    bottom.appendColumn();
-    MATHICGB_ASSERT(bottom.colCount() == colCount + 1);
     return QuadMatrixBuilder::LeftRightColIndex(colCount, left);
   }
 }
@@ -107,8 +100,6 @@ QuadMatrixBuilder::LeftRightColIndex QuadMatrixBuilder::createColumnLeft
      mMonomialToCol,
      ring(),
      true);
-  MATHICGB_ASSERT
-    (findColumn(monomialToBeCopied).leftIndex() == leftColCount() - 1);
 }
 
 QuadMatrixBuilder::LeftRightColIndex QuadMatrixBuilder::createColumnRight
@@ -121,8 +112,6 @@ QuadMatrixBuilder::LeftRightColIndex QuadMatrixBuilder::createColumnRight
      mMonomialToCol,
      ring(),
      false);
-  MATHICGB_ASSERT
-    (findColumn(monomialToBeCopied).rightIndex() == rightColCount() - 1);
 }
 
 namespace {
@@ -181,15 +170,14 @@ namespace {
 
   // The purpose of this function is to avoid code duplication for
   // left/right variants.
-  void sortColumns
-  (const FreeModuleOrder& order,
-   std::vector<monomial>& monomials,
-   SparseMatrix& topMatrix,
-   SparseMatrix& bottomMatrix)
-  {
+  void sortColumns(
+    const FreeModuleOrder& order,
+    std::vector<monomial>& monomials,
+    SparseMatrix& topMatrix,
+    SparseMatrix& bottomMatrix
+  ) {
     typedef SparseMatrix::ColIndex ColIndex;
-    MATHICGB_ASSERT(topMatrix.colCount() == bottomMatrix.colCount());
-    const ColIndex colCount = topMatrix.colCount();
+    const auto colCount = static_cast<ColIndex>(monomials.size());
 
     // Monomial needs to be non-const as we are going to put these
     // monomials back into the vector of monomials which is not const.
@@ -269,13 +257,15 @@ void QuadMatrixBuilder::print(std::ostream& out) const {
 
   // column monomials
   out << "Left columns:";
-  for (ColIndex leftCol = 0; leftCol < leftColCount(); ++leftCol) {
+  const auto leftColCount = static_cast<ColIndex>(mMonomialsLeft.size());
+  for (ColIndex leftCol = 0; leftCol < leftColCount; ++leftCol) {
     out << ' ';
     ring().monomialDisplay(out, monomialOfLeftCol(leftCol), false, true);
   }
 
   out << "\nRight columns:";
-  for (ColIndex rightCol = 0; rightCol < rightColCount(); ++rightCol) {
+  const auto rightColCount = static_cast<ColIndex>(mMonomialsRight.size());
+  for (ColIndex rightCol = 0; rightCol < rightColCount; ++rightCol) {
     out << ' ';
     ring().monomialDisplay(out, monomialOfRightCol(rightCol), false, true);
   }
diff --git a/src/mathicgb/QuadMatrixBuilder.hpp b/src/mathicgb/QuadMatrixBuilder.hpp
index 964da39..65451df 100755
--- a/src/mathicgb/QuadMatrixBuilder.hpp
+++ b/src/mathicgb/QuadMatrixBuilder.hpp
@@ -93,17 +93,24 @@ class QuadMatrixBuilder {
     bool mLeft;
   };
 
+  ColIndex leftColCount() const {
+    return static_cast<ColIndex>(mMonomialsLeft.size());
+  }
+
+  ColIndex rightColCount() const {
+    return static_cast<ColIndex>(mMonomialsRight.size());
+  }
+
+
   // **** Appending entries to top matrices.
   // Same interface as SparseMatrix except with two matrices and here
   // you have to create columns before you can use them.
 
   void appendEntryTopLeft(ColIndex col, Scalar scalar) {
-    MATHICGB_ASSERT(col < leftColCount());
     mTopLeft.appendEntry(col, scalar);
   }
 
   void appendEntryTopRight(ColIndex col, Scalar scalar) {
-    MATHICGB_ASSERT(col < rightColCount());
     mTopRight.appendEntry(col, scalar);
   }
 
@@ -125,12 +132,10 @@ class QuadMatrixBuilder {
   // you have to create columns before you can use them.
 
   void appendEntryBottomLeft(ColIndex col, Scalar scalar) {
-    MATHICGB_ASSERT(col < leftColCount());
     mBottomLeft.appendEntry(col, scalar);
   }
 
   void appendEntryBottomRight(ColIndex col, Scalar scalar) {
-    MATHICGB_ASSERT(col < rightColCount());
     mBottomRight.appendEntry(col, scalar);
   }
 
@@ -256,16 +261,6 @@ class QuadMatrixBuilder {
 
   const PolyRing& ring() const {return mMonomialToCol.ring();}
 
-  ColIndex leftColCount() const {
-    MATHICGB_ASSERT(topLeft().colCount() == bottomLeft().colCount());
-    return topLeft().colCount();
-  }
-
-  ColIndex rightColCount() const {
-    MATHICGB_ASSERT(topRight().colCount() == bottomRight().colCount());
-    return topRight().colCount();
-  }
-
   /// Returns the built matrix and sets the builder to a state
   /// with no columns and no rows.
   QuadMatrix buildMatrixAndClear();
diff --git a/src/mathicgb/SparseMatrix.cpp b/src/mathicgb/SparseMatrix.cpp
index 37e8dcd..3cef908 100755
--- a/src/mathicgb/SparseMatrix.cpp
+++ b/src/mathicgb/SparseMatrix.cpp
@@ -5,15 +5,11 @@
 #include <algorithm>
 
 void SparseMatrix::takeRowsFrom(SparseMatrix&& matrix) {
-  MATHICGB_ASSERT(matrix.colCount() <= colCount());
-
   if (matrix.mRows.empty())
     return;
 
   if (mRows.empty()) {
-    const auto savedColCount = colCount();
     *this = std::move(matrix);
-    mColCount = savedColCount;
     return;
   }
 
@@ -36,7 +32,6 @@ void SparseMatrix::rowToPolynomial(
   const std::vector<monomial>& colMonomials,
   Poly& poly
 ) {
-  MATHICGB_ASSERT(colMonomials.size() == colCount());
   poly.setToZero();
   poly.reserve(entryCountInRow(row));
   const auto end = rowEnd(row);
@@ -54,7 +49,7 @@ void SparseMatrix::sortRowsByIncreasingPivots() {
   // compute pairs (pivot column index, row)
   std::vector<std::pair<SparseMatrix::ColIndex, SparseMatrix::RowIndex> > order;
   const SparseMatrix::RowIndex lRowCount = rowCount();
-  const SparseMatrix::ColIndex lColCount = colCount();
+  const SparseMatrix::ColIndex lColCount = computeColCount();
   for (SparseMatrix::RowIndex row = 0; row < lRowCount; ++row) {
     if (entryCountInRow(row) == 0)
       order.push_back(std::make_pair(lColCount, row));
@@ -79,14 +74,12 @@ void SparseMatrix::sortRowsByIncreasingPivots() {
 }
 
 void SparseMatrix::applyColumnMap(const std::vector<ColIndex>& colMap) {
-  MATHICGB_ASSERT(colMap.size() >= colCount());
+  MATHICGB_ASSERT(colMap.size() >= computeColCount());
   Block* block = &mBlock;
   for (; block != 0; block = block->mPreviousBlock) {
     const auto end = block->mColIndices.end();
-    for (auto it = block->mColIndices.begin(); it != end; ++it) {
-      MATHICGB_ASSERT(*it < colCount());
+    for (auto it = block->mColIndices.begin(); it != end; ++it)
       *it = colMap[*it];
-    }
   }
 }
 
@@ -96,10 +89,8 @@ void SparseMatrix::print(std::ostream& out) const {
   for (RowIndex row = 0; row < rowCount(); ++row) {
     out << row << ':';
     const auto end = rowEnd(row);
-    for (auto it = rowBegin(row); it != end; ++it) {
-      MATHICGB_ASSERT(it.index() < colCount());
+    for (auto it = rowBegin(row); it != end; ++it)
       out << ' ' << it.index() << '#' << it.scalar();
-    }
     out << '\n';
   }
 }
@@ -145,7 +136,8 @@ void SparseMatrix::appendRow(const SparseMatrix& matrix, const RowIndex row) {
 }
   
 SparseMatrix& SparseMatrix::operator=(const SparseMatrix& matrix) {
-  clear(matrix.colCount());
+  // todo: use copy-swap or copy-move.
+  clear();
   // A version that works on each block would be faster, but this is not
   // used anywhere time-critical right now. Improve this if it turns
   // up in profiling at some point.
@@ -158,10 +150,21 @@ void SparseMatrix::swap(SparseMatrix& matrix) {
   mBlock.swap(matrix.mBlock);
   using std::swap;
   swap(mRows, matrix.mRows);
-  swap(mColCount, matrix.mColCount);
   swap(mMemoryQuantum, matrix.mMemoryQuantum);
 }
 
+SparseMatrix::ColIndex SparseMatrix::computeColCount() const {
+  // Obviously this can be done faster, but there has not been a need for that
+  // so far.
+  ColIndex colCount = 0;
+  for (size_t row = 0; row < rowCount(); ++row) {
+    const auto end = rowEnd(row);
+    for (auto it = rowBegin(row); it != end; ++it)
+      colCount = std::max(colCount, it.index() + 1);
+  }
+  return colCount;
+}
+
 void SparseMatrix::clear(const ColIndex newColCount) {
   Block* block = &mBlock;
   while (block != 0) {
@@ -175,15 +178,13 @@ void SparseMatrix::clear(const ColIndex newColCount) {
   mBlock.mPreviousBlock = 0;
   mBlock.mHasNoRows = true;
   mRows.clear();
-  mColCount = newColCount;
 }
 
 void SparseMatrix::appendRowWithModulus(
   std::vector<uint64> const& v,
   const Scalar modulus
 ) {
-  MATHICGB_ASSERT(v.size() == colCount());
-  const ColIndex count = colCount();
+  const auto count = static_cast<ColIndex>(v.size());
   for (ColIndex col = 0; col < count; ++col) {
     const Scalar scalar = static_cast<Scalar>(v[col] % modulus);
     if (scalar != 0)
@@ -196,14 +197,13 @@ void SparseMatrix::appendRow(
   std::vector<uint64> const& v,
   const ColIndex leadCol
 ) {
-  MATHICGB_ASSERT(v.size() == colCount());
 #ifdef MATHICGB_DEBUG
   for (ColIndex col = leadCol; col < leadCol; ++col) {
     MATHICGB_ASSERT(v[col] == 0);
   }
 #endif
 
-  const ColIndex count = colCount();
+  const auto count = static_cast<ColIndex>(v.size());
   for (ColIndex col = leadCol; col < count; ++col) {
 	MATHICGB_ASSERT(v[col] < std::numeric_limits<Scalar>::max());
     if (v[col] != 0)
@@ -216,10 +216,9 @@ void SparseMatrix::appendRowWithModulusNormalized(
   std::vector<uint64> const& v,
   const Scalar modulus
 ) {
-  MATHICGB_ASSERT(v.size() == colCount());
   uint16 multiply = 1; 
   bool first = true;
-  const ColIndex count = colCount();
+  const auto count = static_cast<ColIndex>(v.size());
   for (ColIndex col = 0; col < count; ++col) {
     Scalar scalar = static_cast<Scalar>(v[col] % modulus);
     if (scalar == 0)
@@ -251,7 +250,6 @@ bool SparseMatrix::appendRowWithModulusIfNonZero(
 }
 
 void SparseMatrix::trimLeadingZeroColumns(const ColIndex trimThisMany) {
-  MATHICGB_ASSERT(trimThisMany <= colCount());
   Block* block = &mBlock;
   for (; block != 0; block = block->mPreviousBlock) {
     const auto end = block->mColIndices.end();
@@ -260,7 +258,6 @@ void SparseMatrix::trimLeadingZeroColumns(const ColIndex trimThisMany) {
       *it -= trimThisMany;
     }
   }
-  mColCount -= trimThisMany;
 }
 
 void SparseMatrix::reserveFreeEntries(const size_t freeCount) {
diff --git a/src/mathicgb/SparseMatrix.hpp b/src/mathicgb/SparseMatrix.hpp
index d4e99d1..22557fd 100755
--- a/src/mathicgb/SparseMatrix.hpp
+++ b/src/mathicgb/SparseMatrix.hpp
@@ -48,15 +48,13 @@ public:
   class ConstRowIterator;
 
   /// Construct a matrix with no rows.
-  SparseMatrix(const ColIndex colCount = 0, const size_t memoryQuantum = 0):
-    mColCount(colCount),
+  SparseMatrix(const size_t memoryQuantum = 0):
     mMemoryQuantum(memoryQuantum)
   {}
 
   SparseMatrix(SparseMatrix&& matrix):
     mRows(std::move(matrix.mRows)),
     mBlock(std::move(matrix.mBlock)),
-    mColCount(matrix.mColCount),
     mMemoryQuantum(matrix.mMemoryQuantum)
   {
   }
@@ -83,7 +81,7 @@ public:
   void takeRowsFrom(SparseMatrix&& matrix);
 
   RowIndex rowCount() const {return mRows.size();}
-  ColIndex colCount() const {return mColCount;}
+  ColIndex computeColCount() const;
   size_t memoryQuantum() const {return mMemoryQuantum;}
 
   /// Returns the number of entries in the whole matrix. Is not constant time
@@ -180,7 +178,6 @@ public:
   /// after calling this method until rowDone has been called.
   inline void appendEntry(ColIndex colIndex, Scalar scalar) {
     MATHICGB_ASSERT(mBlock.mColIndices.size() == mBlock.mScalars.size());
-    MATHICGB_ASSERT(colIndex < colCount());
 
     MATHICGB_ASSERT(mBlock.mScalars.atCapacity() ==
       mBlock.mColIndices.atCapacity());
@@ -199,20 +196,6 @@ public:
   
   void appendRow(const SparseMatrix& matrix, RowIndex row);
 
-  void ensureAtLeastThisManyColumns(ColIndex count) {
-    if (count > colCount())
-      mColCount = count;
-  }
-
-  /// Adds one more column to the matrix and returns the index of the new
-  /// column.
-  ColIndex appendColumn() {
-    if (colCount() == std::numeric_limits<ColIndex>::max())
-      mathic::reportError("Too many columns in SparseMatrix.");
-    ++mColCount;
-    return mColCount - 1;
-  }
-
   void appendRowWithModulus(const std::vector<uint64>& v, Scalar modulus);
   
   void appendRow(const std::vector<uint64>& v, ColIndex leadCol = 0);
@@ -349,8 +332,6 @@ private:
     bool mHasNoRows; /// true if no rows have been made from this block yet
   };
   Block mBlock;
-
-  ColIndex mColCount;
   size_t mMemoryQuantum;
 };
 
diff --git a/src/test/F4MatrixBuilder.cpp b/src/test/F4MatrixBuilder.cpp
index c5d8191..623982a 100755
--- a/src/test/F4MatrixBuilder.cpp
+++ b/src/test/F4MatrixBuilder.cpp
@@ -61,8 +61,8 @@ TEST(F4MatrixBuilder, Empty) {
     builder.buildMatrixAndClear(matrix);
     ASSERT_EQ(0, matrix.topLeft.rowCount());
     ASSERT_EQ(0, matrix.bottomLeft.rowCount());
-    ASSERT_EQ(0, matrix.topLeft.colCount());
-    ASSERT_EQ(0, matrix.topRight.colCount());
+    ASSERT_EQ(0, matrix.topLeft.computeColCount());
+    ASSERT_EQ(0, matrix.topRight.computeColCount());
     ASSERT_EQ(0, matrix.leftColumnMonomials.size());
     ASSERT_EQ(0, matrix.rightColumnMonomials.size());
   }
diff --git a/src/test/SparseMatrix.cpp b/src/test/SparseMatrix.cpp
index 0eba726..97ba272 100755
--- a/src/test/SparseMatrix.cpp
+++ b/src/test/SparseMatrix.cpp
@@ -20,19 +20,18 @@ TEST(SparseMatrix, NoRows) {
   SparseMatrix mat; // test a matrix with no rows
   ASSERT_EQ(0, mat.entryCount());
   ASSERT_EQ(0, mat.rowCount());
-  ASSERT_EQ(0, mat.colCount());
+  ASSERT_EQ(0, mat.computeColCount());
   ASSERT_EQ("matrix with no rows\n", mat.toString()); 
 }
 
 TEST(SparseMatrix, Simple) {
-  SparseMatrix mat(2000);
-  mat.appendColumn();
+  SparseMatrix mat;
 
   mat.appendEntry(5, 101);
   mat.rowDone();
   ASSERT_EQ(1, mat.entryCount());
   ASSERT_EQ(1, mat.rowCount());
-  ASSERT_EQ(2001, mat.colCount());
+  ASSERT_EQ(6, mat.computeColCount());
   ASSERT_EQ(5, mat.leadCol(0));
   ASSERT_EQ(1, mat.entryCountInRow(0));
   ASSERT_EQ("0: 5#101\n", mat.toString()); 
@@ -41,19 +40,18 @@ TEST(SparseMatrix, Simple) {
   mat.rowDone(); // add a row with no entries
   ASSERT_EQ(1, mat.entryCount());
   ASSERT_EQ(2, mat.rowCount());
-  ASSERT_EQ(2001, mat.colCount());
+  ASSERT_EQ(6, mat.computeColCount());
   ASSERT_EQ(5, mat.leadCol(0));
   ASSERT_EQ(0, mat.entryCountInRow(1));
   ASSERT_EQ("0: 5#101\n1:\n", mat.toString()); 
   ASSERT_TRUE(mat.emptyRow(1));
 
   mat.appendEntry(5, 102);
-  mat.appendColumn();
   mat.appendEntry(2001, 0); // scalar zero
   mat.rowDone(); // add a row with two entries
   ASSERT_EQ(3, mat.entryCount());
   ASSERT_EQ(3, mat.rowCount());
-  ASSERT_EQ(2002, mat.colCount());
+  ASSERT_EQ(6, mat.computeColCount());
   ASSERT_EQ(5, mat.leadCol(2));
   ASSERT_EQ(2, mat.entryCountInRow(2));
   ASSERT_EQ("0: 5#101\n1:\n2: 5#102 2001#0\n", mat.toString()); 

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/mathicgb.git



More information about the debian-science-commits mailing list