rev 11990 - branches/kde4/packages/koffice/debian/patches

Ana Beatriz Guerrero López ana at alioth.debian.org
Wed Aug 27 09:45:12 UTC 2008


Author: ana
Date: 2008-08-27 09:45:11 +0000 (Wed, 27 Aug 2008)
New Revision: 11990

Added:
   branches/kde4/packages/koffice/debian/patches/embed_eigen_in_koffice2_alpha10.diff
Log:
this is the right one


Added: branches/kde4/packages/koffice/debian/patches/embed_eigen_in_koffice2_alpha10.diff
===================================================================
--- branches/kde4/packages/koffice/debian/patches/embed_eigen_in_koffice2_alpha10.diff	                        (rev 0)
+++ branches/kde4/packages/koffice/debian/patches/embed_eigen_in_koffice2_alpha10.diff	2008-08-27 09:45:11 UTC (rev 11990)
@@ -0,0 +1,19955 @@
+This patch adds eigen2 svn r850507 to koffice Alpha 10.
+Eigen2 is under the GPLv3 license, that can be found in Debian Systems
+at /usr/share/common-licenses/GPL-3
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/CMakeLists.txt koffice-1.9.96.0~that.is.really.1.9.95.10/CMakeLists.txt
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/CMakeLists.txt	2008-08-19 11:39:11.000000000 +0200
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/CMakeLists.txt	2008-08-27 11:42:03.000000000 +0200
+@@ -1,5 +1,6 @@
+ project(koffice)
+ 
++set(EIGEN2_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/Eigen/)
+ set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules )
+ 
+ # define the generic version of the KOffice libraries here
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/Array koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/Array
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/Array	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/Array	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,33 @@
++#ifndef EIGEN_ARRAY_MODULE_H
++#define EIGEN_ARRAY_MODULE_H
++
++#include "Core"
++
++namespace Eigen {
++
++/** \defgroup Array Array module
++  * This module provides several handy features to manipulate matrices as simple array of values.
++  * In addition to listed classes, it defines various methods of the Cwise interface
++  * (accessible from MatrixBase::cwise()), including:
++  *  - matrix-scalar sum,
++  *  - coeff-wise comparison operators,
++  *  - sin, cos, sqrt, pow, exp, log, square, cube, inverse (reciprocal).
++  *
++  * This module also provides various MatrixBase methods, including:
++  *  - \ref MatrixBase::all() "all", \ref MatrixBase::any() "any",
++  *  - \ref MatrixBase::Random() "random matrix initialization"
++  *
++  * \code
++  * #include <Eigen/Array>
++  * \endcode
++  */
++
++#include "src/Array/CwiseOperators.h"
++#include "src/Array/Functors.h"
++#include "src/Array/AllAndAny.h"
++#include "src/Array/PartialRedux.h"
++#include "src/Array/Random.h"
++
++} // namespace Eigen
++
++#endif // EIGEN_ARRAY_MODULE_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/Cholesky koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/Cholesky
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/Cholesky	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/Cholesky	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,58 @@
++#ifndef EIGEN_CHOLESKY_MODULE_H
++#define EIGEN_CHOLESKY_MODULE_H
++
++#include "Core"
++
++// Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module
++#if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2)
++  #ifndef EIGEN_HIDE_HEAVY_CODE
++  #define EIGEN_HIDE_HEAVY_CODE
++  #endif
++#elif defined EIGEN_HIDE_HEAVY_CODE
++  #undef EIGEN_HIDE_HEAVY_CODE
++#endif
++
++namespace Eigen {
++
++/** \defgroup Cholesky_Module Cholesky module
++  * This module provides two variants of the Cholesky decomposition for selfadjoint (hermitian) matrices.
++  * Those decompositions are accessible via the following MatrixBase methods:
++  *  - MatrixBase::cholesky(),
++  *  - MatrixBase::choleskyNoSqrt()
++  *
++  * \code
++  * #include <Eigen/Cholesky>
++  * \endcode
++  */
++
++#include "src/Array/CwiseOperators.h"
++#include "src/Array/Functors.h"
++#include "src/Cholesky/Cholesky.h"
++#include "src/Cholesky/CholeskyWithoutSquareRoot.h"
++
++} // namespace Eigen
++
++#define EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MATRIXTYPE,PREFIX) \
++  PREFIX template class Cholesky<MATRIXTYPE>; \
++  PREFIX template class CholeskyWithoutSquareRoot<MATRIXTYPE>
++
++#define EIGEN_CHOLESKY_MODULE_INSTANTIATE(PREFIX) \
++  EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix2f,PREFIX); \
++  EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix2d,PREFIX); \
++  EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix3f,PREFIX); \
++  EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix3d,PREFIX); \
++  EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix4f,PREFIX); \
++  EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(Matrix4d,PREFIX); \
++  EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXf,PREFIX); \
++  EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXd,PREFIX); \
++  EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXcf,PREFIX); \
++  EIGEN_CHOLESKY_MODULE_INSTANTIATE_TYPE(MatrixXcd,PREFIX)
++
++#ifdef EIGEN_EXTERN_INSTANTIATIONS
++
++namespace Eigen {
++  EIGEN_CHOLESKY_MODULE_INSTANTIATE(extern);
++} // namespace Eigen
++#endif
++
++#endif // EIGEN_CHOLESKY_MODULE_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/Core koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/Core
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/Core	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/Core	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,67 @@
++#ifndef EIGEN_CORE_H
++#define EIGEN_CORE_H
++
++#include "CoreDeclarations"
++#include <iostream>
++#include <cstring>
++
++#ifdef EIGEN_VECTORIZE
++// it seems we cannot assume posix_memalign is defined in the stdlib header
++extern "C" int posix_memalign (void **, size_t, size_t) throw ();
++#endif
++
++namespace Eigen {
++
++#include "src/Core/NumTraits.h"
++#include "src/Core/MathFunctions.h"
++#include "src/Core/DummyPacketMath.h"
++
++#if defined EIGEN_VECTORIZE_SSE
++#include "src/Core/arch/SSE/PacketMath.h"
++#elif defined EIGEN_VECTORIZE_ALTIVEC
++#include "src/Core/arch/AltiVec/PacketMath.h"
++#endif
++
++#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
++#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 16
++#endif
++
++#include "src/Core/Functors.h"
++#include "src/Core/MatrixBase.h"
++#include "src/Core/Coeffs.h"
++#ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874
++                                // at least confirmed with Doxygen 1.5.5 and 1.5.6
++#include "src/Core/Assign.h"
++#endif
++#include "src/Core/MatrixStorage.h"
++#include "src/Core/NestByValue.h"
++#include "src/Core/Flagged.h"
++#include "src/Core/Matrix.h"
++#include "src/Core/Cwise.h"
++#include "src/Core/CwiseBinaryOp.h"
++#include "src/Core/CwiseUnaryOp.h"
++#include "src/Core/CwiseNullaryOp.h"
++#include "src/Core/Dot.h"
++#include "src/Core/Product.h"
++#include "src/Core/DiagonalProduct.h"
++#include "src/Core/SolveTriangular.h"
++#include "src/Core/MapBase.h"
++#include "src/Core/Map.h"
++#include "src/Core/Block.h"
++#include "src/Core/Minor.h"
++#include "src/Core/Transpose.h"
++#include "src/Core/DiagonalMatrix.h"
++#include "src/Core/DiagonalCoeffs.h"
++#include "src/Core/Sum.h"
++#include "src/Core/Redux.h"
++#include "src/Core/Visitor.h"
++#include "src/Core/Fuzzy.h"
++#include "src/Core/IO.h"
++#include "src/Core/Swap.h"
++#include "src/Core/CommaInitializer.h"
++#include "src/Core/Part.h"
++#include "src/Core/CacheFriendlyProduct.h"
++
++} // namespace Eigen
++
++#endif // EIGEN_CORE_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/CoreDeclarations koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/CoreDeclarations
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/CoreDeclarations	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/CoreDeclarations	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,50 @@
++#ifndef EIGEN_CORE_DECLARATIONS_H
++#define EIGEN_CORE_DECLARATIONS_H
++
++#ifdef __GNUC__
++#define EIGEN_GNUC_AT_LEAST(x,y) ((__GNUC__>=x && __GNUC_MINOR__>=y) || __GNUC__>x)
++#else
++#define EIGEN_GNUC_AT_LEAST(x,y) 0
++#endif
++
++#ifndef EIGEN_DONT_VECTORIZE
++  #if (defined __SSE2__) && ( (!defined __GNUC__) || EIGEN_GNUC_AT_LEAST(4,2) )
++    #define EIGEN_VECTORIZE
++    #define EIGEN_VECTORIZE_SSE
++    #include <emmintrin.h>
++    #include <xmmintrin.h>
++    #ifdef __SSE3__
++      #include <pmmintrin.h>
++    #endif
++    #ifdef __SSSE3__
++      #include <tmmintrin.h>
++    #endif
++
++/*** Disable AltiVec code for now as it's out of date
++  #elif (defined __ALTIVEC__)
++    #define EIGEN_VECTORIZE
++    #define EIGEN_VECTORIZE_ALTIVEC
++    #include <altivec.h>
++    // We _need_ to #undef bool as it's defined in <altivec.h> for some reason.
++    #undef bool
++***/
++  #endif
++#endif
++
++#include <cstdlib>
++#include <cmath>
++#include <complex>
++#include <cassert>
++#include <functional>
++
++namespace Eigen {
++
++#include "src/Core/util/Macros.h"
++#include "src/Core/util/Constants.h"
++#include "src/Core/util/ForwardDeclarations.h"
++#include "src/Core/util/Meta.h"
++#include "src/Core/util/StaticAssert.h"
++
++} // namespace Eigen
++
++#endif // EIGEN_CORE_DECLARATIONS_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/Geometry koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/Geometry
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/Geometry	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/Geometry	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,37 @@
++#ifndef EIGEN_GEOMETRY_MODULE_H
++#define EIGEN_GEOMETRY_MODULE_H
++
++#include "Core"
++
++#ifndef M_PI
++#define M_PI 3.14159265358979323846
++#endif
++
++namespace Eigen {
++
++/** \defgroup Geometry Geometry module
++  * This module provides support for:
++  *  - fixed-size homogeneous transformations
++  *  - 2D and 3D rotations
++  *  - quaternions
++  *  - \ref MatrixBase::cross() "cross product"
++  *  - \ref MatrixBase::someOrthognal() "orthognal vector generation"
++  * 
++  * \code
++  * #include <Eigen/Geometry>
++  * \endcode
++  */
++
++#include "src/Geometry/OrthoMethods.h"
++#include "src/Geometry/Quaternion.h"
++#include "src/Geometry/AngleAxis.h"
++#include "src/Geometry/Rotation.h"
++#include "src/Geometry/Transform.h"
++
++// the Geometry module use cwiseCos and cwiseSin which are defined in the Array module
++#include "src/Array/CwiseOperators.h"
++#include "src/Array/Functors.h"
++
++} // namespace Eigen
++
++#endif // EIGEN_GEOMETRY_MODULE_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/LU koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/LU
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/LU	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/LU	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,25 @@
++#ifndef EIGEN_LU_MODULE_H
++#define EIGEN_LU_MODULE_H
++
++#include "Core"
++
++namespace Eigen {
++
++/** \defgroup LU_Module LU module
++  * This module includes %LU decomposition and related notions such as matrix inversion and determinant.
++  * This module defines the following MatrixBase methods:
++  *  - MatrixBase::inverse()
++  *  - MatrixBase::determinant()
++  *
++  * \code
++  * #include <Eigen/LU>
++  * \endcode
++  */
++
++#include "src/LU/LU.h"
++#include "src/LU/Determinant.h"
++#include "src/LU/Inverse.h"
++
++} // namespace Eigen
++
++#endif // EIGEN_LU_MODULE_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/QR koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/QR
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/QR	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/QR	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,65 @@
++#ifndef EIGEN_QR_MODULE_H
++#define EIGEN_QR_MODULE_H
++
++#include "Core"
++#include "Cholesky"
++
++// Note that EIGEN_HIDE_HEAVY_CODE has to be defined per module
++#if (defined EIGEN_EXTERN_INSTANTIATIONS) && (EIGEN_EXTERN_INSTANTIATIONS>=2)
++  #ifndef EIGEN_HIDE_HEAVY_CODE
++  #define EIGEN_HIDE_HEAVY_CODE
++  #endif
++#elif defined EIGEN_HIDE_HEAVY_CODE
++  #undef EIGEN_HIDE_HEAVY_CODE
++#endif
++
++namespace Eigen {
++
++/** \defgroup QR_Module QR module
++  * This module mainly provides QR decomposition and an eigen value solver.
++  * This module also provides some MatrixBase methods, including:
++  *  - MatrixBase::qr(),
++  *  - MatrixBase::eigenvalues(),
++  *  - MatrixBase::operatorNorm()
++  *
++  * \code
++  * #include <Eigen/QR>
++  * \endcode
++  */
++
++#include "src/QR/QR.h"
++#include "src/QR/Tridiagonalization.h"
++#include "src/QR/EigenSolver.h"
++#include "src/QR/SelfAdjointEigenSolver.h"
++#include "src/QR/HessenbergDecomposition.h"
++
++// declare all classes for a given matrix type
++#define EIGEN_QR_MODULE_INSTANTIATE_TYPE(MATRIXTYPE,PREFIX) \
++  PREFIX template class QR<MATRIXTYPE>; \
++  PREFIX template class Tridiagonalization<MATRIXTYPE>; \
++  PREFIX template class HessenbergDecomposition<MATRIXTYPE>; \
++  PREFIX template class SelfAdjointEigenSolver<MATRIXTYPE>
++
++// removed because it does not support complex yet
++//  PREFIX template class EigenSolver<MATRIXTYPE>
++
++// declare all class for all types
++#define EIGEN_QR_MODULE_INSTANTIATE(PREFIX) \
++  EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix2f,PREFIX); \
++  EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix2d,PREFIX); \
++  EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix3f,PREFIX); \
++  EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix3d,PREFIX); \
++  EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix4f,PREFIX); \
++  EIGEN_QR_MODULE_INSTANTIATE_TYPE(Matrix4d,PREFIX); \
++  EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXf,PREFIX); \
++  EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXd,PREFIX); \
++  EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXcf,PREFIX); \
++  EIGEN_QR_MODULE_INSTANTIATE_TYPE(MatrixXcd,PREFIX)
++
++#ifdef EIGEN_EXTERN_INSTANTIATIONS
++  EIGEN_QR_MODULE_INSTANTIATE(extern);
++#endif // EIGEN_EXTERN_INSTANTIATIONS
++
++} // namespace Eigen
++
++#endif // EIGEN_QR_MODULE_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/Sparse koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/Sparse
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/Sparse	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/Sparse	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,25 @@
++#ifndef EIGEN_SPARSE_MODULE_H
++#define EIGEN_SPARSE_MODULE_H
++
++#include "Core"
++#include <vector>
++#include <map>
++#include <stdlib.h>
++#include <string.h>
++
++namespace Eigen {
++
++#include "src/Sparse/SparseUtil.h"
++#include "src/Sparse/SparseMatrixBase.h"
++#include "src/Sparse/SparseArray.h"
++#include "src/Sparse/SparseMatrix.h"
++#include "src/Sparse/HashMatrix.h"
++#include "src/Sparse/LinkedVectorMatrix.h"
++#include "src/Sparse/CoreIterators.h"
++#include "src/Sparse/SparseSetter.h"
++#include "src/Sparse/SparseProduct.h"
++#include "src/Sparse/TriangularSolver.h"
++
++} // namespace Eigen
++
++#endif // EIGEN_SPARSE_MODULE_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Array/AllAndAny.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Array/AllAndAny.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Array/AllAndAny.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Array/AllAndAny.h	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,133 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_ALLANDANY_H
++#define EIGEN_ALLANDANY_H
++
++template<typename Derived, int UnrollCount>
++struct ei_all_unroller
++{
++  enum {
++    col = (UnrollCount-1) / Derived::RowsAtCompileTime,
++    row = (UnrollCount-1) % Derived::RowsAtCompileTime
++  };
++
++  inline static bool run(const Derived &mat)
++  {
++    return ei_all_unroller<Derived, UnrollCount-1>::run(mat) && mat.coeff(row, col);
++  }
++};
++
++template<typename Derived>
++struct ei_all_unroller<Derived, 1>
++{
++  inline static bool run(const Derived &mat) { return mat.coeff(0, 0); }
++};
++
++template<typename Derived>
++struct ei_all_unroller<Derived, Dynamic>
++{
++  inline static bool run(const Derived &) { return false; }
++};
++
++template<typename Derived, int UnrollCount>
++struct ei_any_unroller
++{
++  enum {
++    col = (UnrollCount-1) / Derived::RowsAtCompileTime,
++    row = (UnrollCount-1) % Derived::RowsAtCompileTime
++  };
++
++  inline static bool run(const Derived &mat)
++  {
++    return ei_any_unroller<Derived, UnrollCount-1>::run(mat) || mat.coeff(row, col);
++  }
++};
++
++template<typename Derived>
++struct ei_any_unroller<Derived, 1>
++{
++  inline static bool run(const Derived &mat) { return mat.coeff(0, 0); }
++};
++
++template<typename Derived>
++struct ei_any_unroller<Derived, Dynamic>
++{
++  inline static bool run(const Derived &) { return false; }
++};
++
++/** \array_module
++  * 
++  * \returns true if all coefficients are true
++  *
++  * \addexample CwiseAll \label How to check whether a point is inside a box (using operator< and all())
++  *
++  * Example: \include MatrixBase_all.cpp
++  * Output: \verbinclude MatrixBase_all.out
++  *
++  * \sa MatrixBase::any(), Cwise::operator<()
++  */
++template<typename Derived>
++bool MatrixBase<Derived>::all(void) const
++{
++  const bool unroll = SizeAtCompileTime * (CoeffReadCost + NumTraits<Scalar>::AddCost)
++                      <= EIGEN_UNROLLING_LIMIT;
++  if(unroll)
++    return ei_all_unroller<Derived,
++                           unroll ? int(SizeAtCompileTime) : Dynamic
++     >::run(derived());
++  else
++  {
++    for(int j = 0; j < cols(); j++)
++      for(int i = 0; i < rows(); i++)
++        if (!coeff(i, j)) return false;
++    return true;
++  }
++}
++
++/** \array_module
++  * 
++  * \returns true if at least one coefficient is true
++  *
++  * \sa MatrixBase::all()
++  */
++template<typename Derived>
++bool MatrixBase<Derived>::any(void) const
++{
++  const bool unroll = SizeAtCompileTime * (CoeffReadCost + NumTraits<Scalar>::AddCost)
++                      <= EIGEN_UNROLLING_LIMIT;
++  if(unroll)
++    return ei_any_unroller<Derived,
++                           unroll ? int(SizeAtCompileTime) : Dynamic
++           >::run(derived());
++  else
++  {
++    for(int j = 0; j < cols(); j++)
++      for(int i = 0; i < rows(); i++)
++        if (coeff(i, j)) return true;
++    return false;
++  }
++}
++
++#endif // EIGEN_ALLANDANY_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Array/CwiseOperators.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Array/CwiseOperators.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Array/CwiseOperators.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Array/CwiseOperators.h	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,356 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_ARRAY_CWISE_OPERATORS_H
++#define EIGEN_ARRAY_CWISE_OPERATORS_H
++
++// -- unary operators --
++
++/** \array_module
++  * 
++  * \returns an expression of the coefficient-wise square root of *this.
++  *
++  * Example: \include Cwise_sqrt.cpp
++  * Output: \verbinclude Cwise_sqrt.out
++  *
++  * \sa pow(), square()
++  */
++template<typename ExpressionType>
++inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sqrt_op)
++Cwise<ExpressionType>::sqrt() const
++{
++  return _expression();
++}
++
++/** \array_module
++  * 
++  * \returns an expression of the coefficient-wise exponential of *this.
++  *
++  * Example: \include Cwise_exp.cpp
++  * Output: \verbinclude Cwise_exp.out
++  *
++  * \sa pow(), log(), sin(), cos()
++  */
++template<typename ExpressionType>
++inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_exp_op)
++Cwise<ExpressionType>::exp() const
++{
++  return _expression();
++}
++
++/** \array_module
++  * 
++  * \returns an expression of the coefficient-wise logarithm of *this.
++  *
++  * Example: \include Cwise_log.cpp
++  * Output: \verbinclude Cwise_log.out
++  *
++  * \sa exp()
++  */
++template<typename ExpressionType>
++inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_log_op)
++Cwise<ExpressionType>::log() const
++{
++  return _expression();
++}
++
++/** \array_module
++  * 
++  * \returns an expression of the coefficient-wise cosine of *this.
++  *
++  * Example: \include Cwise_cos.cpp
++  * Output: \verbinclude Cwise_cos.out
++  *
++  * \sa sin(), exp()
++  */
++template<typename ExpressionType>
++inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cos_op)
++Cwise<ExpressionType>::cos() const
++{
++  return _expression();
++}
++
++
++/** \array_module
++  * 
++  * \returns an expression of the coefficient-wise sine of *this.
++  *
++  * Example: \include Cwise_sin.cpp
++  * Output: \verbinclude Cwise_sin.out
++  *
++  * \sa cos(), exp()
++  */
++template<typename ExpressionType>
++inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sin_op)
++Cwise<ExpressionType>::sin() const
++{
++  return _expression();
++}
++
++
++/** \array_module
++  * 
++  * \returns an expression of the coefficient-wise power of *this to the given exponent.
++  *
++  * Example: \include Cwise_pow.cpp
++  * Output: \verbinclude Cwise_pow.out
++  *
++  * \sa exp(), log()
++  */
++template<typename ExpressionType>
++inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_pow_op)
++Cwise<ExpressionType>::pow(const Scalar& exponent) const
++{
++  return EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_pow_op)(_expression(), ei_scalar_pow_op<Scalar>(exponent));
++}
++
++
++/** \array_module
++  * 
++  * \returns an expression of the coefficient-wise inverse of *this.
++  *
++  * Example: \include Cwise_inverse.cpp
++  * Output: \verbinclude Cwise_inverse.out
++  *
++  * \sa operator/(), operator*()
++  */
++template<typename ExpressionType>
++inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_inverse_op)
++Cwise<ExpressionType>::inverse() const
++{
++  return _expression();
++}
++
++/** \array_module
++  *
++  * \returns an expression of the coefficient-wise square of *this.
++  *
++  * Example: \include Cwise_square.cpp
++  * Output: \verbinclude Cwise_square.out
++  *
++  * \sa operator/(), operator*(), abs2()
++  */
++template<typename ExpressionType>
++inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_square_op)
++Cwise<ExpressionType>::square() const
++{
++  return _expression();
++}
++
++/** \array_module
++  *
++  * \returns an expression of the coefficient-wise cube of *this.
++  *
++  * Example: \include Cwise_cube.cpp
++  * Output: \verbinclude Cwise_cube.out
++  *
++  * \sa square(), pow()
++  */
++template<typename ExpressionType>
++inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cube_op)
++Cwise<ExpressionType>::cube() const
++{
++  return _expression();
++}
++
++
++// -- binary operators --
++
++/** \array_module
++  * 
++  * \returns an expression of the coefficient-wise \< operator of *this and \a other
++  *
++  * Example: \include Cwise_less.cpp
++  * Output: \verbinclude Cwise_less.out
++  *
++  * \sa MatrixBase::all(), MatrixBase::any(), operator>(), operator<=()
++  */
++template<typename ExpressionType>
++template<typename OtherDerived>
++inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less)
++Cwise<ExpressionType>::operator<(const MatrixBase<OtherDerived> &other) const
++{
++  return EIGEN_CWISE_BINOP_RETURN_TYPE(std::less)(_expression(), other.derived());
++}
++
++/** \array_module
++  * 
++  * \returns an expression of the coefficient-wise \<= operator of *this and \a other
++  *
++  * Example: \include Cwise_less_equal.cpp
++  * Output: \verbinclude Cwise_less_equal.out
++  *
++  * \sa MatrixBase::all(), MatrixBase::any(), operator>=(), operator<()
++  */
++template<typename ExpressionType>
++template<typename OtherDerived>
++inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal)
++Cwise<ExpressionType>::operator<=(const MatrixBase<OtherDerived> &other) const
++{
++  return EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal)(_expression(), other.derived());
++}
++
++/** \array_module
++  * 
++  * \returns an expression of the coefficient-wise \> operator of *this and \a other
++  *
++  * Example: \include Cwise_greater.cpp
++  * Output: \verbinclude Cwise_greater.out
++  *
++  * \sa MatrixBase::all(), MatrixBase::any(), operator>=(), operator<()
++  */
++template<typename ExpressionType>
++template<typename OtherDerived>
++inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater)
++Cwise<ExpressionType>::operator>(const MatrixBase<OtherDerived> &other) const
++{
++  return EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater)(_expression(), other.derived());
++}
++
++/** \array_module
++  * 
++  * \returns an expression of the coefficient-wise \>= operator of *this and \a other
++  *
++  * Example: \include Cwise_greater_equal.cpp
++  * Output: \verbinclude Cwise_greater_equal.out
++  *
++  * \sa MatrixBase::all(), MatrixBase::any(), operator>(), operator<=()
++  */
++template<typename ExpressionType>
++template<typename OtherDerived>
++inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal)
++Cwise<ExpressionType>::operator>=(const MatrixBase<OtherDerived> &other) const
++{
++  return EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal)(_expression(), other.derived());
++}
++
++/** \array_module
++  * 
++  * \returns an expression of the coefficient-wise == operator of *this and \a other
++  *
++  * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
++  * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
++  * generally a far better idea to use a fuzzy comparison as provided by MatrixBase::isApprox() and
++  * MatrixBase::isMuchSmallerThan().
++  *
++  * Example: \include Cwise_equal_equal.cpp
++  * Output: \verbinclude Cwise_equal_equal.out
++  *
++  * \sa MatrixBase::all(), MatrixBase::any(), MatrixBase::isApprox(), MatrixBase::isMuchSmallerThan()
++  */
++template<typename ExpressionType>
++template<typename OtherDerived>
++inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to)
++Cwise<ExpressionType>::operator==(const MatrixBase<OtherDerived> &other) const
++{
++  return EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to)(_expression(), other.derived());
++}
++
++/** \array_module
++  * 
++  * \returns an expression of the coefficient-wise != operator of *this and \a other
++  *
++  * \warning this performs an exact comparison, which is generally a bad idea with floating-point types.
++  * In order to check for equality between two vectors or matrices with floating-point coefficients, it is
++  * generally a far better idea to use a fuzzy comparison as provided by MatrixBase::isApprox() and
++  * MatrixBase::isMuchSmallerThan().
++  *
++  * Example: \include Cwise_not_equal.cpp
++  * Output: \verbinclude Cwise_not_equal.out
++  *
++  * \sa MatrixBase::all(), MatrixBase::any(), MatrixBase::isApprox(), MatrixBase::isMuchSmallerThan()
++  */
++template<typename ExpressionType>
++template<typename OtherDerived>
++inline const EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to)
++Cwise<ExpressionType>::operator!=(const MatrixBase<OtherDerived> &other) const
++{
++  return EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to)(_expression(), other.derived());
++}
++
++
++/** \array_module
++  *
++  * \returns an expression of \c *this with each coeff incremented by the constant \a scalar
++  *
++  * Example: \include Cwise_plus.cpp
++  * Output: \verbinclude Cwise_plus.out
++  *
++  * \sa operator+=(), operator-()
++  */
++template<typename ExpressionType>
++inline const typename Cwise<ExpressionType>::ScalarAddReturnType
++Cwise<ExpressionType>::operator+(const Scalar& scalar) const
++{
++  return typename Cwise<ExpressionType>::ScalarAddReturnType(m_matrix, ei_scalar_add_op<Scalar>(scalar));
++}
++
++/** \array_module
++  *
++  * Adds the given \a scalar to each coeff of this expression.
++  *
++  * Example: \include Cwise_plus_equal.cpp
++  * Output: \verbinclude Cwise_plus_equal.out
++  *
++  * \sa operator+(), operator-=()
++  */
++template<typename ExpressionType>
++inline ExpressionType& Cwise<ExpressionType>::operator+=(const Scalar& scalar)
++{
++  return m_matrix.const_cast_derived() = *this + scalar;
++}
++
++/** \array_module
++  *
++  * \returns an expression of \c *this with each coeff decremented by the constant \a scalar
++  *
++  * Example: \include Cwise_minus.cpp
++  * Output: \verbinclude Cwise_minus.out
++  *
++  * \sa operator+(), operator-=()
++  */
++template<typename ExpressionType>
++inline const typename Cwise<ExpressionType>::ScalarAddReturnType
++Cwise<ExpressionType>::operator-(const Scalar& scalar) const
++{
++  return *this + (-scalar);
++}
++
++/** \array_module
++  *
++  * Substracts the given \a scalar from each coeff of this expression.
++  *
++  * Example: \include Cwise_minus_equal.cpp
++  * Output: \verbinclude Cwise_minus_equal.out
++  *
++  * \sa operator+=(), operator-()
++  */
++
++template<typename ExpressionType>
++inline ExpressionType& Cwise<ExpressionType>::operator-=(const Scalar& scalar)
++{
++  return m_matrix.const_cast_derived() = *this - scalar;
++}
++
++#endif // EIGEN_ARRAY_CWISE_OPERATORS_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Array/Functors.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Array/Functors.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Array/Functors.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Array/Functors.h	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,302 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_ARRAY_FUNCTORS_H
++#define EIGEN_ARRAY_FUNCTORS_H
++
++/** \internal
++  * \array_module
++  *
++  * \brief Template functor to add a scalar to a fixed other one
++  *
++  * \sa class CwiseUnaryOp, Array::operator+
++  */
++/* If you wonder why doing the ei_pset1() in packetOp() is an optimization check ei_scalar_multiple_op */
++template<typename Scalar>
++struct ei_scalar_add_op {
++  typedef typename ei_packet_traits<Scalar>::type PacketScalar;
++  inline ei_scalar_add_op(const Scalar& other) : m_other(other) { }
++  inline Scalar operator() (const Scalar& a) const { return a + m_other; }
++  inline const PacketScalar packetOp(const PacketScalar& a) const
++  { return ei_padd(a, ei_pset1(m_other)); }
++  const Scalar m_other;
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_add_op<Scalar> >
++{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = ei_packet_traits<Scalar>::size>1 }; };
++
++/** \internal
++  *
++  * \array_module
++  *
++  * \brief Template functor to compute the square root of a scalar
++  *
++  * \sa class CwiseUnaryOp, Cwise::sqrt()
++  */
++template<typename Scalar> struct ei_scalar_sqrt_op EIGEN_EMPTY_STRUCT {
++  inline const Scalar operator() (const Scalar& a) const { return ei_sqrt(a); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_sqrt_op<Scalar> >
++{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
++
++/** \internal
++  *
++  * \array_module
++  *
++  * \brief Template functor to compute the exponential of a scalar
++  *
++  * \sa class CwiseUnaryOp, Cwise::exp()
++  */
++template<typename Scalar> struct ei_scalar_exp_op EIGEN_EMPTY_STRUCT {
++  inline const Scalar operator() (const Scalar& a) const { return ei_exp(a); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_exp_op<Scalar> >
++{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
++
++/** \internal
++  *
++  * \array_module
++  *
++  * \brief Template functor to compute the logarithm of a scalar
++  *
++  * \sa class CwiseUnaryOp, Cwise::log()
++  */
++template<typename Scalar> struct ei_scalar_log_op EIGEN_EMPTY_STRUCT {
++  inline const Scalar operator() (const Scalar& a) const { return ei_log(a); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_log_op<Scalar> >
++{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
++
++/** \internal
++  *
++  * \array_module
++  *
++  * \brief Template functor to compute the cosine of a scalar
++  *
++  * \sa class CwiseUnaryOp, Cwise::cos()
++  */
++template<typename Scalar> struct ei_scalar_cos_op EIGEN_EMPTY_STRUCT {
++  inline const Scalar operator() (const Scalar& a) const { return ei_cos(a); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_cos_op<Scalar> >
++{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
++
++/** \internal
++  *
++  * \array_module
++  *
++  * \brief Template functor to compute the sine of a scalar
++  *
++  * \sa class CwiseUnaryOp, Cwise::sin()
++  */
++template<typename Scalar> struct ei_scalar_sin_op EIGEN_EMPTY_STRUCT {
++  inline const Scalar operator() (const Scalar& a) const { return ei_sin(a); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_sin_op<Scalar> >
++{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
++
++/** \internal
++  *
++  * \array_module
++  *
++  * \brief Template functor to raise a scalar to a power
++  *
++  * \sa class CwiseUnaryOp, Cwise::pow
++  */
++template<typename Scalar>
++struct ei_scalar_pow_op {
++  inline ei_scalar_pow_op(const Scalar& exponent) : m_exponent(exponent) {}
++  inline Scalar operator() (const Scalar& a) const { return ei_pow(a, m_exponent); }
++  const Scalar m_exponent;
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_pow_op<Scalar> >
++{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
++
++/** \internal
++  *
++  * \array_module
++  *
++  * \brief Template functor to compute the inverse of a scalar
++  *
++  * \sa class CwiseUnaryOp, Cwise::inverse()
++  */
++template<typename Scalar>
++struct ei_scalar_inverse_op {
++  inline Scalar operator() (const Scalar& a) const { return Scalar(1)/a; }
++  template<typename PacketScalar>
++  inline const PacketScalar packetOp(const PacketScalar& a) const
++  { return ei_div(ei_pset1(Scalar(1)),a); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_inverse_op<Scalar> >
++{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = int(ei_packet_traits<Scalar>::size)>1 }; };
++
++/** \internal
++  *
++  * \array_module
++  *
++  * \brief Template functor to compute the square of a scalar
++  *
++  * \sa class CwiseUnaryOp, Cwise::square()
++  */
++template<typename Scalar>
++struct ei_scalar_square_op {
++  inline Scalar operator() (const Scalar& a) const { return a*a; }
++  template<typename PacketScalar>
++  inline const PacketScalar packetOp(const PacketScalar& a) const
++  { return ei_pmul(a,a); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_square_op<Scalar> >
++{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = int(ei_packet_traits<Scalar>::size)>1 }; };
++
++/** \internal
++  *
++  * \array_module
++  *
++  * \brief Template functor to compute the cube of a scalar
++  *
++  * \sa class CwiseUnaryOp, Cwise::cube()
++  */
++template<typename Scalar>
++struct ei_scalar_cube_op {
++  inline Scalar operator() (const Scalar& a) const { return a*a*a; }
++  template<typename PacketScalar>
++  inline const PacketScalar packetOp(const PacketScalar& a) const
++  { return ei_pmul(a,ei_pmul(a,a)); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_cube_op<Scalar> >
++{ enum { Cost = 2*NumTraits<Scalar>::MulCost, PacketAccess = int(ei_packet_traits<Scalar>::size)>1 }; };
++
++
++// default ei_functor_traits for STL functors:
++
++template<typename T>
++struct ei_functor_traits<std::multiplies<T> >
++{ enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::divides<T> >
++{ enum { Cost = NumTraits<T>::MulCost, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::plus<T> >
++{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::minus<T> >
++{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::negate<T> >
++{ enum { Cost = NumTraits<T>::AddCost, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::logical_or<T> >
++{ enum { Cost = 1, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::logical_and<T> >
++{ enum { Cost = 1, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::logical_not<T> >
++{ enum { Cost = 1, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::greater<T> >
++{ enum { Cost = 1, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::less<T> >
++{ enum { Cost = 1, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::greater_equal<T> >
++{ enum { Cost = 1, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::less_equal<T> >
++{ enum { Cost = 1, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::equal_to<T> >
++{ enum { Cost = 1, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::not_equal_to<T> >
++{ enum { Cost = 1, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::binder2nd<T> >
++{ enum { Cost = ei_functor_traits<T>::Cost, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::binder1st<T> >
++{ enum { Cost = ei_functor_traits<T>::Cost, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::unary_negate<T> >
++{ enum { Cost = 1 + ei_functor_traits<T>::Cost, PacketAccess = false }; };
++
++template<typename T>
++struct ei_functor_traits<std::binary_negate<T> >
++{ enum { Cost = 1 + ei_functor_traits<T>::Cost, PacketAccess = false }; };
++
++#ifdef EIGEN_STDEXT_SUPPORT
++
++template<typename T0,typename T1>
++struct ei_functor_traits<std::project1st<T0,T1> >
++{ enum { Cost = 0, PacketAccess = false }; };
++
++template<typename T0,typename T1>
++struct ei_functor_traits<std::project2nd<T0,T1> >
++{ enum { Cost = 0, PacketAccess = false }; };
++
++template<typename T0,typename T1>
++struct ei_functor_traits<std::select2nd<std::pair<T0,T1> > >
++{ enum { Cost = 0, PacketAccess = false }; };
++
++template<typename T0,typename T1>
++struct ei_functor_traits<std::select1st<std::pair<T0,T1> > >
++{ enum { Cost = 0, PacketAccess = false }; };
++
++template<typename T0,typename T1>
++struct ei_functor_traits<std::unary_compose<T0,T1> >
++{ enum { Cost = ei_functor_traits<T0>::Cost + ei_functor_traits<T1>::Cost, PacketAccess = false }; };
++
++template<typename T0,typename T1,typename T2>
++struct ei_functor_traits<std::binary_compose<T0,T1,T2> >
++{ enum { Cost = ei_functor_traits<T0>::Cost + ei_functor_traits<T1>::Cost + ei_functor_traits<T2>::Cost, PacketAccess = false }; };
++
++#endif // EIGEN_STDEXT_SUPPORT
++
++#endif // EIGEN_ARRAY_FUNCTORS_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Array/PartialRedux.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Array/PartialRedux.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Array/PartialRedux.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Array/PartialRedux.h	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,301 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_PARTIAL_REDUX_H
++#define EIGEN_PARTIAL_REDUX_H
++
++/** \array_module \ingroup Array
++  * 
++  * \class PartialReduxExpr
++  *
++  * \brief Generic expression of a partially reduxed matrix
++  *
++  * \param MatrixType the type of the matrix we are applying the redux operation
++  * \param MemberOp type of the member functor
++  * \param Direction indicates the direction of the redux (Vertical or Horizontal)
++  *
++  * This class represents an expression of a partial redux operator of a matrix.
++  * It is the return type of PartialRedux functions,
++  * and most of the time this is the only way it is used.
++  *
++  * \sa class PartialRedux
++  */
++
++template< typename MatrixType, typename MemberOp, int Direction>
++class PartialReduxExpr;
++
++template<typename MatrixType, typename MemberOp, int Direction>
++struct ei_traits<PartialReduxExpr<MatrixType, MemberOp, Direction> >
++{
++  typedef typename MemberOp::result_type Scalar;
++  typedef typename MatrixType::Scalar InputScalar;
++  typedef typename ei_nested<MatrixType>::type MatrixTypeNested;
++  typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
++  enum {
++    RowsAtCompileTime = Direction==Vertical   ? 1 : MatrixType::RowsAtCompileTime,
++    ColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::ColsAtCompileTime,
++    MaxRowsAtCompileTime = Direction==Vertical   ? 1 : MatrixType::MaxRowsAtCompileTime,
++    MaxColsAtCompileTime = Direction==Horizontal ? 1 : MatrixType::MaxColsAtCompileTime,
++    Flags = ((int(RowsAtCompileTime) == Dynamic || int(ColsAtCompileTime) == Dynamic)
++          ? (unsigned int)_MatrixTypeNested::Flags
++          : (unsigned int)_MatrixTypeNested::Flags & ~LargeBit) & HereditaryBits,
++    TraversalSize = Direction==Vertical ? RowsAtCompileTime : ColsAtCompileTime
++  };
++  typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType;
++  enum {
++    CoeffReadCost = TraversalSize * _MatrixTypeNested::CoeffReadCost + CostOpType::value
++  };
++};
++
++template< typename MatrixType, typename MemberOp, int Direction>
++class PartialReduxExpr : ei_no_assignment_operator,
++  public MatrixBase<PartialReduxExpr<MatrixType, MemberOp, Direction> >
++{
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(PartialReduxExpr)
++    typedef typename ei_traits<PartialReduxExpr>::MatrixTypeNested MatrixTypeNested;
++    typedef typename ei_traits<PartialReduxExpr>::_MatrixTypeNested _MatrixTypeNested;
++
++    PartialReduxExpr(const MatrixType& mat, const MemberOp& func = MemberOp())
++      : m_matrix(mat), m_functor(func) {}
++
++    int rows() const { return (Direction==Vertical   ? 1 : m_matrix.rows()); }
++    int cols() const { return (Direction==Horizontal ? 1 : m_matrix.cols()); }
++
++    const Scalar coeff(int i, int j) const
++    {
++      if (Direction==Vertical)
++        return m_functor(m_matrix.col(j));
++      else
++        return m_functor(m_matrix.row(i));
++    }
++
++  protected:
++    const MatrixTypeNested m_matrix;
++    const MemberOp m_functor;
++};
++
++#define EIGEN_MEMBER_FUNCTOR(MEMBER,COST)                           \
++  template <typename ResultType>                                    \
++  struct ei_member_##MEMBER EIGEN_EMPTY_STRUCT {                    \
++    typedef ResultType result_type;                                 \
++    template<typename Scalar, int Size> struct Cost                 \
++    { enum { value = COST }; };                                     \
++    template<typename Derived>                                      \
++    inline ResultType operator()(const MatrixBase<Derived>& mat) const     \
++    { return mat.MEMBER(); }                                        \
++  }
++
++EIGEN_MEMBER_FUNCTOR(norm2, Size * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
++EIGEN_MEMBER_FUNCTOR(norm, (Size+5) * NumTraits<Scalar>::MulCost + (Size-1)*NumTraits<Scalar>::AddCost);
++EIGEN_MEMBER_FUNCTOR(sum, (Size-1)*NumTraits<Scalar>::AddCost);
++EIGEN_MEMBER_FUNCTOR(minCoeff, (Size-1)*NumTraits<Scalar>::AddCost);
++EIGEN_MEMBER_FUNCTOR(maxCoeff, (Size-1)*NumTraits<Scalar>::AddCost);
++EIGEN_MEMBER_FUNCTOR(all, (Size-1)*NumTraits<Scalar>::AddCost);
++EIGEN_MEMBER_FUNCTOR(any, (Size-1)*NumTraits<Scalar>::AddCost);
++
++/** \internal */
++template <typename BinaryOp, typename Scalar>
++struct ei_member_redux {
++  typedef typename ei_result_of<
++                     BinaryOp(Scalar)
++                   >::type  result_type;
++  template<typename _Scalar, int Size> struct Cost
++  { enum { value = (Size-1) * ei_functor_traits<BinaryOp>::Cost }; };
++  ei_member_redux(const BinaryOp func) : m_functor(func) {}
++  template<typename Derived>
++  inline result_type operator()(const MatrixBase<Derived>& mat) const
++  { return mat.redux(m_functor); }
++  const BinaryOp m_functor;
++};
++
++/** \array_module \ingroup Array
++  *
++  * \class PartialRedux
++  *
++  * \brief Pseudo expression providing partial reduction operations
++  *
++  * \param ExpressionType the type of the object on which to do partial reductions
++  * \param Direction indicates the direction of the redux (Vertical or Horizontal)
++  *
++  * This class represents a pseudo expression with partial reduction features.
++  * It is the return type of MatrixBase::colwise() and MatrixBase::rowwise()
++  * and most of the time this is the only way it is used.
++  *
++  * Example: \include MatrixBase_colwise.cpp
++  * Output: \verbinclude MatrixBase_colwise.out
++  *
++  * \sa MatrixBase::colwise(), MatrixBase::rowwise(), class PartialReduxExpr
++  */
++template<typename ExpressionType, int Direction> class PartialRedux
++{
++  public:
++
++    typedef typename ei_traits<ExpressionType>::Scalar Scalar;
++    typedef typename ei_meta_if<ei_must_nest_by_value<ExpressionType>::ret,
++        ExpressionType, const ExpressionType&>::ret ExpressionTypeNested;
++
++    template<template<typename _Scalar> class Functor> struct ReturnType
++    {
++      typedef PartialReduxExpr<ExpressionType,
++                               Functor<typename ei_traits<ExpressionType>::Scalar>,
++                               Direction
++                              > Type;
++    };
++
++    template<typename BinaryOp> struct ReduxReturnType
++    {
++      typedef PartialReduxExpr<ExpressionType,
++                               ei_member_redux<BinaryOp,typename ei_traits<ExpressionType>::Scalar>,
++                               Direction
++                              > Type;
++    };
++
++    inline PartialRedux(const ExpressionType& matrix) : m_matrix(matrix) {}
++
++    /** \internal */
++    inline const ExpressionType& _expression() const { return m_matrix; }
++
++    template<typename BinaryOp>
++    const typename ReduxReturnType<BinaryOp>::Type
++    redux(const BinaryOp& func = BinaryOp()) const;
++
++    /** \returns a row (or column) vector expression of the smallest coefficient
++      * of each column (or row) of the referenced expression.
++      *
++      * Example: \include PartialRedux_minCoeff.cpp
++      * Output: \verbinclude PartialRedux_minCoeff.out
++      *      
++      * \sa MatrixBase::minCoeff() */
++    const typename ReturnType<ei_member_minCoeff>::Type minCoeff() const
++    { return _expression(); }
++
++    /** \returns a row (or column) vector expression of the largest coefficient
++      * of each column (or row) of the referenced expression.
++      *
++      * Example: \include PartialRedux_maxCoeff.cpp
++      * Output: \verbinclude PartialRedux_maxCoeff.out
++      *      
++      * \sa MatrixBase::maxCoeff() */
++    const typename ReturnType<ei_member_maxCoeff>::Type maxCoeff() const
++    { return _expression(); }
++
++    /** \returns a row (or column) vector expression of the squared norm
++      * of each column (or row) of the referenced expression.
++      *
++      * Example: \include PartialRedux_norm2.cpp
++      * Output: \verbinclude PartialRedux_norm2.out
++      *      
++      * \sa MatrixBase::norm2() */
++    const typename ReturnType<ei_member_norm2>::Type norm2() const
++    { return _expression(); }
++
++    /** \returns a row (or column) vector expression of the norm
++      * of each column (or row) of the referenced expression.
++      *
++      * Example: \include PartialRedux_norm.cpp
++      * Output: \verbinclude PartialRedux_norm.out
++      *
++      * \sa MatrixBase::norm() */
++    const typename ReturnType<ei_member_norm>::Type norm() const
++    { return _expression(); }
++
++    /** \returns a row (or column) vector expression of the sum
++      * of each column (or row) of the referenced expression.
++      *
++      * Example: \include PartialRedux_sum.cpp
++      * Output: \verbinclude PartialRedux_sum.out
++      *
++      * \sa MatrixBase::sum() */
++    const typename ReturnType<ei_member_sum>::Type sum() const
++    { return _expression(); }
++
++    /** \returns a row (or column) vector expression representing
++      * whether \b all coefficients of each respective column (or row) are \c true.
++      *
++      * \sa MatrixBase::all() */
++    const typename ReturnType<ei_member_all>::Type all() const
++    { return _expression(); }
++
++    /** \returns a row (or column) vector expression representing
++      * whether \b at \b least one coefficient of each respective column (or row) is \c true.
++      *
++      * \sa MatrixBase::any() */
++    const typename ReturnType<ei_member_any>::Type any() const
++    { return _expression(); }
++
++  protected:
++    ExpressionTypeNested m_matrix;
++};
++
++/** \array_module
++  *
++  * \returns a PartialRedux wrapper of *this providing additional partial reduction operations
++  *
++  * Example: \include MatrixBase_colwise.cpp
++  * Output: \verbinclude MatrixBase_colwise.out
++  *
++  * \sa rowwise(), class PartialRedux
++  */
++template<typename Derived>
++inline const PartialRedux<Derived,Vertical>
++MatrixBase<Derived>::colwise() const
++{
++  return derived();
++}
++
++/** \array_module
++  *
++  * \returns a PartialRedux wrapper of *this providing additional partial reduction operations
++  *
++  * Example: \include MatrixBase_rowwise.cpp
++  * Output: \verbinclude MatrixBase_rowwise.out
++  *
++  * \sa colwise(), class PartialRedux
++  */
++template<typename Derived>
++inline const PartialRedux<Derived,Horizontal>
++MatrixBase<Derived>::rowwise() const
++{
++  return derived();
++}
++
++/** \returns a row or column vector expression of \c *this reduxed by \a func
++  *
++  * The template parameter \a BinaryOp is the type of the functor
++  * of the custom redux operator. Note that func must be an associative operator.
++  *
++  * \sa class PartialRedux, MatrixBase::colwise(), MatrixBase::rowwise()
++  */
++template<typename ExpressionType, int Direction>
++template<typename BinaryOp>
++const typename PartialRedux<ExpressionType,Direction>::template ReduxReturnType<BinaryOp>::Type
++PartialRedux<ExpressionType,Direction>::redux(const BinaryOp& func) const
++{
++  return typename ReduxReturnType<BinaryOp>::Type(_expression(), func);
++}
++
++#endif // EIGEN_PARTIAL_REDUX_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Array/Random.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Array/Random.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Array/Random.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Array/Random.h	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,121 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_RANDOM_H
++#define EIGEN_RANDOM_H
++
++template<typename Scalar> struct ei_scalar_random_op EIGEN_EMPTY_STRUCT {
++  inline ei_scalar_random_op(void) {}
++  inline const Scalar operator() (int, int) const { return ei_random<Scalar>(); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_random_op<Scalar> >
++{ enum { Cost = 5 * NumTraits<Scalar>::MulCost, PacketAccess = false, IsRepeatable = false }; };
++
++/** \array_module
++  * 
++  * \returns a random matrix (not an expression, the matrix is immediately evaluated).
++  *
++  * The parameters \a rows and \a cols are the number of rows and of columns of
++  * the returned matrix. Must be compatible with this MatrixBase type.
++  *
++  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
++  * it is redundant to pass \a rows and \a cols as arguments, so ei_random() should be used
++  * instead.
++  *
++  * \addexample RandomExample \label How to create a matrix with random coefficients
++  *
++  * Example: \include MatrixBase_random_int_int.cpp
++  * Output: \verbinclude MatrixBase_random_int_int.out
++  *
++  * \sa MatrixBase::setRandom(), MatrixBase::Random(int), MatrixBase::Random()
++  */
++template<typename Derived>
++inline const CwiseNullaryOp<ei_scalar_random_op<typename ei_traits<Derived>::Scalar>, Derived>
++MatrixBase<Derived>::Random(int rows, int cols)
++{
++  return NullaryExpr(rows, cols, ei_scalar_random_op<Scalar>());
++}
++
++/** \array_module
++  * 
++  * \returns a random vector (not an expression, the vector is immediately evaluated).
++  *
++  * The parameter \a size is the size of the returned vector.
++  * Must be compatible with this MatrixBase type.
++  *
++  * \only_for_vectors
++  *
++  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
++  * it is redundant to pass \a size as argument, so ei_random() should be used
++  * instead.
++  *
++  * Example: \include MatrixBase_random_int.cpp
++  * Output: \verbinclude MatrixBase_random_int.out
++  *
++  * \sa MatrixBase::setRandom(), MatrixBase::Random(int,int), MatrixBase::Random()
++  */
++template<typename Derived>
++inline const CwiseNullaryOp<ei_scalar_random_op<typename ei_traits<Derived>::Scalar>, Derived>
++MatrixBase<Derived>::Random(int size)
++{
++  return NullaryExpr(size, ei_scalar_random_op<Scalar>());
++}
++
++/** \array_module
++  * 
++  * \returns a fixed-size random matrix or vector
++  * (not an expression, the matrix is immediately evaluated).
++  *
++  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
++  * need to use the variants taking size arguments.
++  *
++  * Example: \include MatrixBase_random.cpp
++  * Output: \verbinclude MatrixBase_random.out
++  *
++  * \sa MatrixBase::setRandom(), MatrixBase::Random(int,int), MatrixBase::Random(int)
++  */
++template<typename Derived>
++inline const CwiseNullaryOp<ei_scalar_random_op<typename ei_traits<Derived>::Scalar>, Derived>
++MatrixBase<Derived>::Random()
++{
++  return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, ei_scalar_random_op<Scalar>());
++}
++
++/** \array_module
++  * 
++  * Sets all coefficients in this expression to random values.
++  *
++  * Example: \include MatrixBase_setRandom.cpp
++  * Output: \verbinclude MatrixBase_setRandom.out
++  *
++  * \sa class CwiseNullaryOp, MatrixBase::setRandom(int,int)
++  */
++template<typename Derived>
++inline Derived& MatrixBase<Derived>::setRandom()
++{
++  return *this = Random(rows(), cols());
++}
++
++#endif // EIGEN_RANDOM_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Cholesky/Cholesky.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Cholesky/Cholesky.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Cholesky/Cholesky.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Cholesky/Cholesky.h	2008-08-20 18:52:54.000000000 +0200
+@@ -0,0 +1,157 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_CHOLESKY_H
++#define EIGEN_CHOLESKY_H
++
++/** \ingroup Cholesky_Module
++  *
++  * \class Cholesky
++  *
++  * \brief Standard Cholesky decomposition of a matrix and associated features
++  *
++  * \param MatrixType the type of the matrix of which we are computing the Cholesky decomposition
++  *
++  * This class performs a standard Cholesky decomposition of a symmetric, positive definite
++  * matrix A such that A = LL^* = U^*U, where L is lower triangular.
++  *
++  * While the Cholesky decomposition is particularly useful to solve selfadjoint problems like  D^*D x = b,
++  * for that purpose, we recommend the Cholesky decomposition without square root which is more stable
++  * and even faster. Nevertheless, this standard Cholesky decomposition remains useful in many other
++  * situations like generalised eigen problems with hermitian matrices.
++  *
++  * Note that during the decomposition, only the upper triangular part of A is considered. Therefore,
++  * the strict lower part does not have to store correct values.
++  *
++  * \sa MatrixBase::cholesky(), class CholeskyWithoutSquareRoot
++  */
++template<typename MatrixType> class Cholesky
++{
++  private:
++    typedef typename MatrixType::Scalar Scalar;
++    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
++    typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, 1> VectorType;
++
++    enum {
++      PacketSize = ei_packet_traits<Scalar>::size,
++      AlignmentMask = int(PacketSize)-1
++    };
++
++  public:
++  
++    Cholesky(const MatrixType& matrix)
++      : m_matrix(matrix.rows(), matrix.cols())
++    {
++      compute(matrix);
++    }
++
++    inline Part<MatrixType, Lower> matrixL(void) const { return m_matrix; }
++
++    /** \returns true if the matrix is positive definite */
++    inline bool isPositiveDefinite(void) const { return m_isPositiveDefinite; }
++
++    template<typename Derived>
++    typename Derived::Eval solve(const MatrixBase<Derived> &b) const;
++
++    void compute(const MatrixType& matrix);
++
++  protected:
++    /** \internal
++      * Used to compute and store L
++      * The strict upper part is not used and even not initialized.
++      */
++    MatrixType m_matrix;
++    bool m_isPositiveDefinite;
++};
++
++/** Computes / recomputes the Cholesky decomposition A = LL^* = U^*U of \a matrix
++  */
++template<typename MatrixType>
++void Cholesky<MatrixType>::compute(const MatrixType& a)
++{
++  assert(a.rows()==a.cols());
++  const int size = a.rows();
++  m_matrix.resize(size, size);
++
++  RealScalar x;
++  x = ei_real(a.coeff(0,0));
++  m_isPositiveDefinite = x > precision<Scalar>() && ei_isMuchSmallerThan(ei_imag(a.coeff(0,0)), RealScalar(1));
++  m_matrix.coeffRef(0,0) = ei_sqrt(x);
++  m_matrix.col(0).end(size-1) = a.row(0).end(size-1).adjoint() / ei_real(m_matrix.coeff(0,0));
++  for (int j = 1; j < size; ++j)
++  {
++    Scalar tmp = ei_real(a.coeff(j,j)) - m_matrix.row(j).start(j).norm2();
++    x = ei_real(tmp);
++    if (x < precision<Scalar>() || (!ei_isMuchSmallerThan(ei_imag(tmp), RealScalar(1))))
++    {
++      m_isPositiveDefinite = false;
++      return;
++    }
++    m_matrix.coeffRef(j,j) = x = ei_sqrt(x);
++
++    int endSize = size-j-1;
++    if (endSize>0) {
++      // Note that when all matrix columns have good alignment, then the following
++      // product is guaranteed to be optimal with respect to alignment.
++      m_matrix.col(j).end(endSize) =
++        (m_matrix.block(j+1, 0, endSize, j) * m_matrix.row(j).start(j).adjoint()).lazy();
++
++      // FIXME could use a.col instead of a.row
++      m_matrix.col(j).end(endSize) = (a.row(j).end(endSize).adjoint()
++        - m_matrix.col(j).end(endSize) ) / x;
++    }
++  }
++}
++
++/** \returns the solution of \f$ A x = b \f$ using the current decomposition of A.
++  * In other words, it returns \f$ A^{-1} b \f$ computing
++  * \f$ {L^{*}}^{-1} L^{-1} b \f$ from right to left.
++  * \param b the column vector \f$ b \f$, which can also be a matrix.
++  *
++  * Example: \include Cholesky_solve.cpp
++  * Output: \verbinclude Cholesky_solve.out
++  *
++  * \sa MatrixBase::cholesky(), CholeskyWithoutSquareRoot::solve()
++  */
++template<typename MatrixType>
++template<typename Derived>
++typename Derived::Eval Cholesky<MatrixType>::solve(const MatrixBase<Derived> &b) const
++{
++  const int size = m_matrix.rows();
++  ei_assert(size==b.rows());
++
++  return m_matrix.adjoint().template part<Upper>().solveTriangular(matrixL().solveTriangular(b));
++}
++
++/** \cholesky_module
++  * \returns the Cholesky decomposition of \c *this
++  */
++template<typename Derived>
++inline const Cholesky<typename MatrixBase<Derived>::EvalType>
++MatrixBase<Derived>::cholesky() const
++{
++  return Cholesky<typename ei_eval<Derived>::type>(derived());
++}
++
++#endif // EIGEN_CHOLESKY_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Cholesky/CholeskyWithoutSquareRoot.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Cholesky/CholeskyWithoutSquareRoot.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Cholesky/CholeskyWithoutSquareRoot.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Cholesky/CholeskyWithoutSquareRoot.h	2008-08-20 18:52:54.000000000 +0200
+@@ -0,0 +1,167 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_CHOLESKY_WITHOUT_SQUARE_ROOT_H
++#define EIGEN_CHOLESKY_WITHOUT_SQUARE_ROOT_H
++
++/** \ingroup Cholesky_Module
++  *
++  * \class CholeskyWithoutSquareRoot
++  *
++  * \brief Robust Cholesky decomposition of a matrix and associated features
++  *
++  * \param MatrixType the type of the matrix of which we are computing the Cholesky decomposition
++  *
++  * This class performs a Cholesky decomposition without square root of a symmetric, positive definite
++  * matrix A such that A = L D L^* = U^* D U, where L is lower triangular with a unit diagonal
++  * and D is a diagonal matrix.
++  *
++  * Compared to a standard Cholesky decomposition, avoiding the square roots allows for faster and more
++  * stable computation.
++  *
++  * Note that during the decomposition, only the upper triangular part of A is considered. Therefore,
++  * the strict lower part does not have to store correct values.
++  *
++  * \sa MatrixBase::choleskyNoSqrt(), class Cholesky
++  */
++template<typename MatrixType> class CholeskyWithoutSquareRoot
++{
++  public:
++
++    typedef typename MatrixType::Scalar Scalar;
++    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
++    typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, 1> VectorType;
++
++    CholeskyWithoutSquareRoot(const MatrixType& matrix)
++      : m_matrix(matrix.rows(), matrix.cols())
++    {
++      compute(matrix);
++    }
++
++    /** \returns the lower triangular matrix L */
++    inline Part<MatrixType, UnitLower> matrixL(void) const { return m_matrix; }
++
++    /** \returns the coefficients of the diagonal matrix D */
++    inline DiagonalCoeffs<MatrixType> vectorD(void) const { return m_matrix.diagonal(); }
++
++    /** \returns true if the matrix is positive definite */
++    inline bool isPositiveDefinite(void) const { return m_isPositiveDefinite; }
++
++    template<typename Derived>
++    typename Derived::Eval solve(const MatrixBase<Derived> &b) const;
++
++    void compute(const MatrixType& matrix);
++
++  protected:
++    /** \internal
++      * Used to compute and store the cholesky decomposition A = L D L^* = U^* D U.
++      * The strict upper part is used during the decomposition, the strict lower
++      * part correspond to the coefficients of L (its diagonal is equal to 1 and
++      * is not stored), and the diagonal entries correspond to D.
++      */
++    MatrixType m_matrix;
++
++    bool m_isPositiveDefinite;
++};
++
++/** Compute / recompute the Cholesky decomposition A = L D L^* = U^* D U of \a matrix
++  */
++template<typename MatrixType>
++void CholeskyWithoutSquareRoot<MatrixType>::compute(const MatrixType& a)
++{
++  assert(a.rows()==a.cols());
++  const int size = a.rows();
++  m_matrix.resize(size, size);
++  m_isPositiveDefinite = true;
++
++  // Let's preallocate a temporay vector to evaluate the matrix-vector product into it.
++  // Unlike the standard Cholesky decomposition, here we cannot evaluate it to the destination
++  // matrix because it a sub-row which is not compatible suitable for efficient packet evaluation.
++  // (at least if we assume the matrix is col-major)
++  Matrix<Scalar,MatrixType::RowsAtCompileTime,1> _temporary(size);
++
++  // Note that, in this algorithm the rows of the strict upper part of m_matrix is used to store
++  // column vector, thus the strange .conjugate() and .transpose()...
++
++  m_matrix.row(0) = a.row(0).conjugate();
++  m_matrix.col(0).end(size-1) = m_matrix.row(0).end(size-1) / m_matrix.coeff(0,0);
++  for (int j = 1; j < size; ++j)
++  {
++    RealScalar tmp = ei_real(a.coeff(j,j) - (m_matrix.row(j).start(j) * m_matrix.col(j).start(j).conjugate()).coeff(0,0));
++    m_matrix.coeffRef(j,j) = tmp;
++
++    if (ei_isMuchSmallerThan(tmp,RealScalar(1)))
++    {
++      m_isPositiveDefinite = false;
++      return;
++    }
++
++    int endSize = size-j-1;
++    if (endSize>0)
++    {
++      _temporary.end(endSize) = ( m_matrix.block(j+1,0, endSize, j)
++                                  * m_matrix.col(j).start(j).conjugate() ).lazy();
++
++      m_matrix.row(j).end(endSize) = a.row(j).end(endSize).conjugate()
++                                   - _temporary.end(endSize).transpose();
++
++      m_matrix.col(j).end(endSize) = m_matrix.row(j).end(endSize) / tmp;
++    }
++  }
++}
++
++/** \returns the solution of \f$ A x = b \f$ using the current decomposition of A.
++  * In other words, it returns \f$ A^{-1} b \f$ computing
++  * \f$ {L^{*}}^{-1} D^{-1} L^{-1} b \f$ from right to left.
++  * \param b the column vector \f$ b \f$, which can also be a matrix.
++  *
++  * See Cholesky::solve() for a example.
++  * 
++  * \sa MatrixBase::choleskyNoSqrt()
++  */
++template<typename MatrixType>
++template<typename Derived>
++typename Derived::Eval CholeskyWithoutSquareRoot<MatrixType>::solve(const MatrixBase<Derived> &b) const
++{
++  const int size = m_matrix.rows();
++  ei_assert(size==b.rows());
++
++  return m_matrix.adjoint().template part<UnitUpper>()
++    .solveTriangular(
++      (  m_matrix.cwise().inverse().template part<Diagonal>()
++       * matrixL().solveTriangular(b))
++     );
++}
++
++/** \cholesky_module
++  * \returns the Cholesky decomposition without square root of \c *this
++  */
++template<typename Derived>
++inline const CholeskyWithoutSquareRoot<typename MatrixBase<Derived>::EvalType>
++MatrixBase<Derived>::choleskyNoSqrt() const
++{
++  return derived();
++}
++
++#endif // EIGEN_CHOLESKY_WITHOUT_SQUARE_ROOT_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/arch/AltiVec/PacketMath.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/arch/AltiVec/PacketMath.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/arch/AltiVec/PacketMath.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/arch/AltiVec/PacketMath.h	2008-08-20 18:52:54.000000000 +0200
+@@ -0,0 +1,120 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Konstantinos Margaritis <markos at codex.gr>
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_PACKET_MATH_ALTIVEC_H
++#define EIGEN_PACKET_MATH_ALTIVEC_H
++
++#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
++#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4
++#endif
++
++static const vector int   v0i   = vec_splat_u32(0);
++static const vector int   v16i_ = vec_splat_u32(-16);
++static const vector float v0f   = (vector float) v0i;
++
++template<> struct ei_packet_traits<float>  { typedef vector float type; enum {size=4}; };
++template<> struct ei_packet_traits<int>    { typedef vector int type; enum {size=4}; };
++
++inline vector float  ei_padd(const vector float   a, const vector float   b) { return vec_add(a,b); }
++inline vector int    ei_padd(const vector int     a, const vector int     b) { return vec_add(a,b); }
++
++inline vector float  ei_psub(const vector float   a, const vector float   b) { return vec_sub(a,b); }
++inline vector int    ei_psub(const vector int     a, const vector int     b) { return vec_sub(a,b); }
++
++inline vector float  ei_pmul(const vector float   a, const vector float   b) { return vec_madd(a,b, v0f); }
++inline vector int    ei_pmul(const vector int     a, const vector int     b)
++{
++  // Taken from http://developer.apple.com/hardwaredrivers/ve/algorithms.html#Multiply32
++
++  //Set up constants
++  vector int bswap, lowProduct, highProduct;
++
++  //Do real work
++  bswap = vec_rl( (vector unsigned int)b, (vector unsigned int)v16i_ );
++  lowProduct = vec_mulo( (vector short)a,(vector short)b );
++  highProduct = vec_msum((vector short)a,(vector short)bswap, v0i);
++  highProduct = vec_sl( (vector unsigned int)highProduct, (vector unsigned int)v16i_ );
++  return vec_add( lowProduct, highProduct );
++}
++
++inline vector float  ei_pdiv(const vector float   a, const vector float   b) { return vec_div(a,b); }
++
++inline vector float ei_pmadd(const vector float   a, const vector float   b, const vector float c) { return vec_madd(a, b, c); }
++
++inline vector float  ei_pmin(const vector float   a, const vector float   b) { return vec_min(a,b); }
++inline vector int    ei_pmin(const vector int     a, const vector int     b) { return vec_min(a,b); }
++
++inline vector float  ei_pmax(const vector float   a, const vector float   b) { return vec_max(a,b); }
++inline vector int    ei_pmax(const vector int     a, const vector int     b) { return vec_max(a,b); }
++
++inline vector float  ei_pload(const float*   from) { return vec_ld(0, from); }
++inline vector int    ei_pload(const int*     from) { return vec_ld(0, from); }
++
++inline vector float  ei_ploadu(const float*)
++{ EIGEN_STATIC_ASSERT(unaligned_load_and_store_operations_unimplemented_on_AltiVec) }
++inline vector int    ei_ploadu(const int*  )
++{ EIGEN_STATIC_ASSERT(unaligned_load_and_store_operations_unimplemented_on_AltiVec) }
++
++inline vector float  ei_pset1(const float&  from)
++{
++  static float __attribute__(aligned(16)) af[4];
++  af[0] = from;
++  vector float vc = vec_ld(0, af);
++  vc = vec_splat(vc, 0);
++  return vc;
++}
++
++inline vector int    ei_pset1(const int&    from)
++{
++  static int __attribute__(aligned(16)) ai[4];
++  ai[0] = from;
++  vector int vc = vec_ld(0, ai);
++  vc = vec_splat(vc, 0);
++  return vc;
++}
++
++inline void ei_pstore(float*   to, const vector float   from) { vec_st(from, 0, to); }
++inline void ei_pstore(int*     to, const vector int     from) { vec_st(from, 0, to); }
++
++inline void ei_pstoreu(float*, const vector float)
++{ EIGEN_STATIC_ASSERT(unaligned_load_and_store_operations_unimplemented_on_AltiVec) }
++inline void ei_pstoreu(int*  , const vector int  )
++{ EIGEN_STATIC_ASSERT(unaligned_load_and_store_operations_unimplemented_on_AltiVec) }
++
++inline float  ei_pfirst(const vector float  a)
++{
++  static float __attribute__(aligned(16)) af[4];
++  vec_st(a, 0, af);
++  return af[0];
++}
++
++inline int    ei_pfirst(const vector int    a)
++{
++  static int __attribute__(aligned(16)) ai[4];
++  vec_st(a, 0, ai);
++  return ai[0];
++}
++
++#endif // EIGEN_PACKET_MATH_ALTIVEC_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/arch/SSE/PacketMath.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/arch/SSE/PacketMath.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/arch/SSE/PacketMath.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/arch/SSE/PacketMath.h	2008-08-20 18:52:54.000000000 +0200
+@@ -0,0 +1,283 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_PACKET_MATH_SSE_H
++#define EIGEN_PACKET_MATH_SSE_H
++
++#ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
++#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 16
++#endif
++
++template<> struct ei_packet_traits<float>  { typedef __m128  type; enum {size=4}; };
++template<> struct ei_packet_traits<double> { typedef __m128d type; enum {size=2}; };
++template<> struct ei_packet_traits<int>    { typedef __m128i type; enum {size=4}; };
++
++template<> struct ei_unpacket_traits<__m128>  { typedef float  type; enum {size=4}; };
++template<> struct ei_unpacket_traits<__m128d> { typedef double type; enum {size=2}; };
++template<> struct ei_unpacket_traits<__m128i> { typedef int    type; enum {size=4}; };
++
++template<> inline __m128  ei_padd(const __m128&  a, const __m128&  b) { return _mm_add_ps(a,b); }
++template<> inline __m128d ei_padd(const __m128d& a, const __m128d& b) { return _mm_add_pd(a,b); }
++template<> inline __m128i ei_padd(const __m128i& a, const __m128i& b) { return _mm_add_epi32(a,b); }
++
++template<> inline __m128  ei_psub(const __m128&  a, const __m128&  b) { return _mm_sub_ps(a,b); }
++template<> inline __m128d ei_psub(const __m128d& a, const __m128d& b) { return _mm_sub_pd(a,b); }
++template<> inline __m128i ei_psub(const __m128i& a, const __m128i& b) { return _mm_sub_epi32(a,b); }
++
++template<> inline __m128  ei_pmul(const __m128&  a, const __m128&  b) { return _mm_mul_ps(a,b); }
++template<> inline __m128d ei_pmul(const __m128d& a, const __m128d& b) { return _mm_mul_pd(a,b); }
++template<> inline __m128i ei_pmul(const __m128i& a, const __m128i& b)
++{
++  return _mm_or_si128(
++    _mm_and_si128(
++      _mm_mul_epu32(a,b),
++      _mm_setr_epi32(0xffffffff,0,0xffffffff,0)),
++    _mm_slli_si128(
++      _mm_and_si128(
++        _mm_mul_epu32(_mm_srli_si128(a,4),_mm_srli_si128(b,4)),
++        _mm_setr_epi32(0xffffffff,0,0xffffffff,0)), 4));
++}
++
++template<> inline __m128  ei_pdiv(const __m128&  a, const __m128&  b) { return _mm_div_ps(a,b); }
++template<> inline __m128d ei_pdiv(const __m128d& a, const __m128d& b) { return _mm_div_pd(a,b); }
++template<> inline __m128i ei_pdiv(const __m128i& /*a*/, const __m128i& /*b*/)
++{ ei_assert(false && "packet integer division are not supported by SSE"); }
++
++// for some weird raisons, it has to be overloaded for packet integer
++template<> inline __m128i ei_pmadd(const __m128i& a, const __m128i& b, const __m128i& c) { return ei_padd(ei_pmul(a,b), c); }
++
++template<> inline __m128  ei_pmin(const __m128&  a, const __m128&  b) { return _mm_min_ps(a,b); }
++template<> inline __m128d ei_pmin(const __m128d& a, const __m128d& b) { return _mm_min_pd(a,b); }
++// FIXME this vectorized min operator is likely to be slower than the standard one
++template<> inline __m128i ei_pmin(const __m128i& a, const __m128i& b)
++{
++  __m128i mask = _mm_cmplt_epi32(a,b);
++  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
++}
++
++template<> inline __m128  ei_pmax(const __m128&  a, const __m128&  b) { return _mm_max_ps(a,b); }
++template<> inline __m128d ei_pmax(const __m128d& a, const __m128d& b) { return _mm_max_pd(a,b); }
++// FIXME this vectorized max operator is likely to be slower than the standard one
++template<> inline __m128i ei_pmax(const __m128i& a, const __m128i& b)
++{
++  __m128i mask = _mm_cmpgt_epi32(a,b);
++  return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
++}
++
++template<> inline __m128  ei_pload(const float*   from) { return _mm_load_ps(from); }
++template<> inline __m128d ei_pload(const double*  from) { return _mm_load_pd(from); }
++template<> inline __m128i ei_pload(const int* from) { return _mm_load_si128(reinterpret_cast<const __m128i*>(from)); }
++
++template<> inline __m128  ei_ploadu(const float*   from) { return _mm_loadu_ps(from); }
++// template<> inline __m128  ei_ploadu(const float*   from) {
++//   if (size_t(from)&0xF)
++//     return _mm_loadu_ps(from);
++//   else 
++//     return _mm_loadu_ps(from);
++// }
++template<> inline __m128d ei_ploadu(const double*  from) { return _mm_loadu_pd(from); }
++template<> inline __m128i ei_ploadu(const int* from) { return _mm_loadu_si128(reinterpret_cast<const __m128i*>(from)); }
++
++template<> inline __m128  ei_pset1(const float&  from) { return _mm_set1_ps(from); }
++template<> inline __m128d ei_pset1(const double& from) { return _mm_set1_pd(from); }
++template<> inline __m128i ei_pset1(const int&    from) { return _mm_set1_epi32(from); }
++
++template<> inline void ei_pstore(float*  to, const __m128&  from) { _mm_store_ps(to, from); }
++template<> inline void ei_pstore(double* to, const __m128d& from) { _mm_store_pd(to, from); }
++template<> inline void ei_pstore(int*    to, const __m128i& from) { _mm_store_si128(reinterpret_cast<__m128i*>(to), from); }
++
++template<> inline void ei_pstoreu(float*  to, const __m128&  from) { _mm_storeu_ps(to, from); }
++template<> inline void ei_pstoreu(double* to, const __m128d& from) { _mm_storeu_pd(to, from); }
++template<> inline void ei_pstoreu(int*    to, const __m128i& from) { _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from); }
++
++template<> inline float  ei_pfirst(const __m128&  a) { return _mm_cvtss_f32(a); }
++template<> inline double ei_pfirst(const __m128d& a) { return _mm_cvtsd_f64(a); }
++template<> inline int    ei_pfirst(const __m128i& a) { return _mm_cvtsi128_si32(a); }
++
++#ifdef __SSE3__
++// TODO implement SSE2 versions as well as integer versions
++inline __m128 ei_preduxp(const __m128* vecs)
++{
++  return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
++}
++inline __m128d ei_preduxp(const __m128d* vecs)
++{
++  return _mm_hadd_pd(vecs[0], vecs[1]);
++}
++// SSSE3 version:
++// inline __m128i ei_preduxp(const __m128i* vecs)
++// {
++//   return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
++// }
++
++inline float ei_predux(const __m128& a)
++{
++  __m128 tmp0 = _mm_hadd_ps(a,a);
++  return ei_pfirst(_mm_hadd_ps(tmp0, tmp0));
++}
++
++inline double ei_predux(const __m128d& a) { return ei_pfirst(_mm_hadd_pd(a, a)); }
++
++// SSSE3 version:
++// inline float ei_predux(const __m128i& a)
++// {
++//   __m128i tmp0 = _mm_hadd_epi32(a,a);
++//   return ei_pfirst(_mm_hadd_epi32(tmp0, tmp0));
++// }
++#else
++// SSE2 versions
++inline float ei_predux(const __m128& a)
++{
++  __m128 tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
++  return ei_pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
++}
++inline double ei_predux(const __m128d& a)
++{
++  return ei_pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
++}
++
++inline __m128 ei_preduxp(const __m128* vecs)
++{
++  __m128 tmp0, tmp1, tmp2;
++  tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
++  tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
++  tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
++  tmp0 = _mm_add_ps(tmp0, tmp1);
++  tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
++  tmp1 = _mm_add_ps(tmp1, tmp2);
++  tmp2 = _mm_movehl_ps(tmp1, tmp0);
++  tmp0 = _mm_movelh_ps(tmp0, tmp1);
++  return _mm_add_ps(tmp0, tmp2);
++}
++
++inline __m128d ei_preduxp(const __m128d* vecs)
++{
++  return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
++}
++#endif  // SSE3
++
++inline int ei_predux(const __m128i& a)
++{
++  __m128i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
++  return ei_pfirst(tmp) + ei_pfirst(_mm_shuffle_epi32(tmp, 1));
++}
++
++inline __m128i ei_preduxp(const __m128i* vecs)
++{
++  __m128i tmp0, tmp1, tmp2;
++  tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
++  tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
++  tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
++  tmp0 = _mm_add_epi32(tmp0, tmp1);
++  tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
++  tmp1 = _mm_add_epi32(tmp1, tmp2);
++  tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
++  tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
++  return _mm_add_epi32(tmp0, tmp2);
++}
++
++#if (defined __GNUC__)
++// template <> inline __m128 ei_pmadd(const __m128&  a, const __m128&  b, const __m128&  c)
++// {
++//   __m128 res = b;
++//   asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
++//   return res;
++// }
++// inline __m128i _mm_alignr_epi8(const __m128i&  a, const __m128i&  b, const int i)
++// {
++//   __m128i res = a;
++//   asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
++//   return res;
++// }
++#endif
++
++#ifdef __SSSE3__
++// SSSE3 versions
++template<int Offset>
++struct ei_palign_impl<Offset,__m128>
++{
++  inline static void run(__m128& first, const __m128& second)
++  {
++    first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(first), _mm_castps_si128(second), (4-Offset)*4));
++  }
++};
++
++template<int Offset>
++struct ei_palign_impl<Offset,__m128i>
++{
++  inline static void run(__m128i& first, const __m128i& second)
++  {
++    first = _mm_alignr_epi8(first, second, (4-Offset)*4);
++  }
++};
++#else
++// SSE2 versions
++template<int Offset>
++struct ei_palign_impl<Offset,__m128>
++{
++  inline static void run(__m128& first, const __m128& second)
++  {
++    if (Offset==1)
++    {
++      first = _mm_move_ss(first,second);
++      first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
++    }
++    else if (Offset==2)
++    {
++      first = _mm_movehl_ps(first,first);
++      first = _mm_movelh_ps(first,second);
++    }
++    else if (Offset==3)
++    {
++      first = _mm_move_ss(first,second);
++      first = _mm_shuffle_ps(first,second,0x93);
++    }
++  }
++};
++
++template<int Offset>
++struct ei_palign_impl<Offset,__m128i>
++{
++  inline static void run(__m128i& first, const __m128i& second)
++  {
++    if (Offset==1)
++    {
++      first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
++      first = _mm_shuffle_epi32(first,0x39);
++    }
++    else if (Offset==2)
++    {
++      first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
++      first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
++    }
++    else if (Offset==3)
++    {
++      first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
++      first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
++    }
++  }
++};
++#endif
++
++#endif // EIGEN_PACKET_MATH_SSE_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Assign.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Assign.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Assign.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Assign.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,442 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2007 Michael Olbrich <michael.olbrich at gmx.net>
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_ASSIGN_H
++#define EIGEN_ASSIGN_H
++
++/***************************************************************************
++* Part 1 : the logic deciding a strategy for vectorization and unrolling
++***************************************************************************/
++
++template <typename Derived, typename OtherDerived>
++struct ei_assign_traits
++{
++public:
++  enum {
++    DstIsAligned = Derived::Flags & AlignedBit,
++    SrcIsAligned = OtherDerived::Flags & AlignedBit,
++    SrcAlignment = DstIsAligned && SrcIsAligned ? Aligned : Unaligned
++  };
++
++private:
++  enum {
++    InnerSize = int(Derived::Flags)&RowMajorBit
++              ? Derived::ColsAtCompileTime
++              : Derived::RowsAtCompileTime,
++    InnerMaxSize = int(Derived::Flags)&RowMajorBit
++              ? Derived::MaxColsAtCompileTime
++              : Derived::MaxRowsAtCompileTime,
++    PacketSize = ei_packet_traits<typename Derived::Scalar>::size
++  };
++
++  enum {
++    MightVectorize = (int(Derived::Flags) & int(OtherDerived::Flags) & ActualPacketAccessBit)
++                  && ((int(Derived::Flags)&RowMajorBit)==(int(OtherDerived::Flags)&RowMajorBit)),
++    MayInnerVectorize  = MightVectorize && int(InnerSize)!=Dynamic && int(InnerSize)%int(PacketSize)==0
++                       && int(DstIsAligned) && int(SrcIsAligned),
++    MayLinearVectorize = MightVectorize && (int(Derived::Flags) & int(OtherDerived::Flags) & LinearAccessBit),
++    MaySliceVectorize  = MightVectorize && int(InnerMaxSize)>=3*PacketSize /* slice vectorization can be slow, so we only
++      want it if the slices are big, which is indicated by InnerMaxSize rather than InnerSize, think of the case
++      of a dynamic block in a fixed-size matrix */
++  };
++
++public:
++  enum {
++    Vectorization = int(MayInnerVectorize)  ? int(InnerVectorization)
++                  : int(MayLinearVectorize) ? int(LinearVectorization)
++                  : int(MaySliceVectorize)  ? int(SliceVectorization)
++                                            : int(NoVectorization)
++  };
++
++private:
++  enum {
++    UnrollingLimit      = EIGEN_UNROLLING_LIMIT * (int(Vectorization) == int(NoVectorization) ? 1 : int(PacketSize)),
++    MayUnrollCompletely = int(Derived::SizeAtCompileTime) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit),
++    MayUnrollInner      = int(InnerSize * OtherDerived::CoeffReadCost) <= int(UnrollingLimit)
++  };
++
++public:
++  enum {
++    Unrolling = (int(Vectorization) == int(InnerVectorization) || int(Vectorization) == int(NoVectorization))
++              ? (
++                   int(MayUnrollCompletely) ? int(CompleteUnrolling)
++                 : int(MayUnrollInner)      ? int(InnerUnrolling)
++                                            : int(NoUnrolling)
++                )
++              : int(Vectorization) == int(LinearVectorization)
++              ? ( int(MayUnrollCompletely) && int(DstIsAligned) ? int(CompleteUnrolling) : int(NoUnrolling) )
++              : int(NoUnrolling)
++  };
++};
++
++/***************************************************************************
++* Part 2 : meta-unrollers
++***************************************************************************/
++
++/***********************
++*** No vectorization ***
++***********************/
++
++template<typename Derived1, typename Derived2, int Index, int Stop>
++struct ei_assign_novec_CompleteUnrolling
++{
++  enum {
++    row = int(Derived1::Flags)&RowMajorBit
++        ? Index / int(Derived1::ColsAtCompileTime)
++        : Index % Derived1::RowsAtCompileTime,
++    col = int(Derived1::Flags)&RowMajorBit
++        ? Index % int(Derived1::ColsAtCompileTime)
++        : Index / Derived1::RowsAtCompileTime
++  };
++
++  inline static void run(Derived1 &dst, const Derived2 &src)
++  {
++    dst.copyCoeff(row, col, src);
++    ei_assign_novec_CompleteUnrolling<Derived1, Derived2, Index+1, Stop>::run(dst, src);
++  }
++};
++
++template<typename Derived1, typename Derived2, int Stop>
++struct ei_assign_novec_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
++{
++  inline static void run(Derived1 &, const Derived2 &) {}
++};
++
++template<typename Derived1, typename Derived2, int Index, int Stop>
++struct ei_assign_novec_InnerUnrolling
++{
++  inline static void run(Derived1 &dst, const Derived2 &src, int row_or_col)
++  {
++    const bool rowMajor = int(Derived1::Flags)&RowMajorBit;
++    const int row = rowMajor ? row_or_col : Index;
++    const int col = rowMajor ? Index : row_or_col;
++    dst.copyCoeff(row, col, src);
++    ei_assign_novec_InnerUnrolling<Derived1, Derived2, Index+1, Stop>::run(dst, src, row_or_col);
++  }
++};
++
++template<typename Derived1, typename Derived2, int Stop>
++struct ei_assign_novec_InnerUnrolling<Derived1, Derived2, Stop, Stop>
++{
++  inline static void run(Derived1 &, const Derived2 &, int) {}
++};
++
++/**************************
++*** Inner vectorization ***
++**************************/
++
++template<typename Derived1, typename Derived2, int Index, int Stop>
++struct ei_assign_innervec_CompleteUnrolling
++{
++  enum {
++    row = int(Derived1::Flags)&RowMajorBit
++        ? Index / int(Derived1::ColsAtCompileTime)
++        : Index % Derived1::RowsAtCompileTime,
++    col = int(Derived1::Flags)&RowMajorBit
++        ? Index % int(Derived1::ColsAtCompileTime)
++        : Index / Derived1::RowsAtCompileTime
++  };
++
++  inline static void run(Derived1 &dst, const Derived2 &src)
++  {
++    dst.template copyPacket<Derived2, Aligned, Aligned>(row, col, src);
++    ei_assign_innervec_CompleteUnrolling<Derived1, Derived2,
++      Index+ei_packet_traits<typename Derived1::Scalar>::size, Stop>::run(dst, src);
++  }
++};
++
++template<typename Derived1, typename Derived2, int Stop>
++struct ei_assign_innervec_CompleteUnrolling<Derived1, Derived2, Stop, Stop>
++{
++  inline static void run(Derived1 &, const Derived2 &) {}
++};
++
++template<typename Derived1, typename Derived2, int Index, int Stop>
++struct ei_assign_innervec_InnerUnrolling
++{
++  inline static void run(Derived1 &dst, const Derived2 &src, int row_or_col)
++  {
++    const int row = int(Derived1::Flags)&RowMajorBit ? row_or_col : Index;
++    const int col = int(Derived1::Flags)&RowMajorBit ? Index : row_or_col;
++    dst.template copyPacket<Derived2, Aligned, Aligned>(row, col, src);
++    ei_assign_innervec_InnerUnrolling<Derived1, Derived2,
++      Index+ei_packet_traits<typename Derived1::Scalar>::size, Stop>::run(dst, src, row_or_col);
++  }
++};
++
++template<typename Derived1, typename Derived2, int Stop>
++struct ei_assign_innervec_InnerUnrolling<Derived1, Derived2, Stop, Stop>
++{
++  inline static void run(Derived1 &, const Derived2 &, int) {}
++};
++
++/***************************************************************************
++* Part 3 : implementation of all cases
++***************************************************************************/
++
++template<typename Derived1, typename Derived2,
++         int Vectorization = ei_assign_traits<Derived1, Derived2>::Vectorization,
++         int Unrolling = ei_assign_traits<Derived1, Derived2>::Unrolling>
++struct ei_assign_impl;
++
++/***********************
++*** No vectorization ***
++***********************/
++
++template<typename Derived1, typename Derived2>
++struct ei_assign_impl<Derived1, Derived2, NoVectorization, NoUnrolling>
++{
++  static void run(Derived1 &dst, const Derived2 &src)
++  {
++    const int innerSize = dst.innerSize();
++    const int outerSize = dst.outerSize();
++    for(int j = 0; j < outerSize; j++)
++      for(int i = 0; i < innerSize; i++)
++      {
++        if(int(Derived1::Flags)&RowMajorBit)
++          dst.copyCoeff(j, i, src);
++        else
++          dst.copyCoeff(i, j, src);
++      }
++  }
++};
++
++template<typename Derived1, typename Derived2>
++struct ei_assign_impl<Derived1, Derived2, NoVectorization, CompleteUnrolling>
++{
++  inline static void run(Derived1 &dst, const Derived2 &src)
++  {
++    ei_assign_novec_CompleteUnrolling<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>
++      ::run(dst, src);
++  }
++};
++
++template<typename Derived1, typename Derived2>
++struct ei_assign_impl<Derived1, Derived2, NoVectorization, InnerUnrolling>
++{
++  static void run(Derived1 &dst, const Derived2 &src)
++  {
++    const bool rowMajor = int(Derived1::Flags)&RowMajorBit;
++    const int innerSize = rowMajor ? Derived1::ColsAtCompileTime : Derived1::RowsAtCompileTime;
++    const int outerSize = dst.outerSize();
++    for(int j = 0; j < outerSize; j++)
++      ei_assign_novec_InnerUnrolling<Derived1, Derived2, 0, innerSize>
++        ::run(dst, src, j);
++  }
++};
++
++/**************************
++*** Inner vectorization ***
++**************************/
++
++template<typename Derived1, typename Derived2>
++struct ei_assign_impl<Derived1, Derived2, InnerVectorization, NoUnrolling>
++{
++  static void run(Derived1 &dst, const Derived2 &src)
++  {
++    const int innerSize = dst.innerSize();
++    const int outerSize = dst.outerSize();
++    const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
++    for(int j = 0; j < outerSize; j++)
++      for(int i = 0; i < innerSize; i+=packetSize)
++      {
++        if(int(Derived1::Flags)&RowMajorBit)
++          dst.template copyPacket<Derived2, Aligned, Aligned>(j, i, src);
++        else
++          dst.template copyPacket<Derived2, Aligned, Aligned>(i, j, src);
++      }
++  }
++};
++
++template<typename Derived1, typename Derived2>
++struct ei_assign_impl<Derived1, Derived2, InnerVectorization, CompleteUnrolling>
++{
++  inline static void run(Derived1 &dst, const Derived2 &src)
++  {
++    ei_assign_innervec_CompleteUnrolling<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>
++      ::run(dst, src);
++  }
++};
++
++template<typename Derived1, typename Derived2>
++struct ei_assign_impl<Derived1, Derived2, InnerVectorization, InnerUnrolling>
++{
++  static void run(Derived1 &dst, const Derived2 &src)
++  {
++    const bool rowMajor = int(Derived1::Flags)&RowMajorBit;
++    const int innerSize = rowMajor ? Derived1::ColsAtCompileTime : Derived1::RowsAtCompileTime;
++    const int outerSize = dst.outerSize();
++    for(int j = 0; j < outerSize; j++)
++      ei_assign_innervec_InnerUnrolling<Derived1, Derived2, 0, innerSize>
++        ::run(dst, src, j);
++  }
++};
++
++/***************************
++*** Linear vectorization ***
++***************************/
++
++template<typename Derived1, typename Derived2>
++struct ei_assign_impl<Derived1, Derived2, LinearVectorization, NoUnrolling>
++{
++  static void run(Derived1 &dst, const Derived2 &src)
++  {
++    const int size = dst.size();
++    const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
++    const int alignedStart = ei_assign_traits<Derived1,Derived2>::DstIsAligned ? 0
++                           : ei_alignmentOffset(&dst.coeffRef(0), size);
++    const int alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize;
++
++    for(int index = 0; index < alignedStart; index++)
++      dst.copyCoeff(index, src);
++
++    for(int index = alignedStart; index < alignedEnd; index += packetSize)
++    {
++      dst.template copyPacket<Derived2, Aligned, ei_assign_traits<Derived1,Derived2>::SrcAlignment>(index, src);
++    }
++
++    for(int index = alignedEnd; index < size; index++)
++      dst.copyCoeff(index, src);
++  }
++};
++
++template<typename Derived1, typename Derived2>
++struct ei_assign_impl<Derived1, Derived2, LinearVectorization, CompleteUnrolling>
++{
++  static void run(Derived1 &dst, const Derived2 &src)
++  {
++    const int size = Derived1::SizeAtCompileTime;
++    const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
++    const int alignedSize = (size/packetSize)*packetSize;
++
++    ei_assign_innervec_CompleteUnrolling<Derived1, Derived2, 0, alignedSize>::run(dst, src);
++    ei_assign_novec_CompleteUnrolling<Derived1, Derived2, alignedSize, size>::run(dst, src);
++  }
++};
++
++/**************************
++*** Slice vectorization ***
++***************************/
++
++template<typename Derived1, typename Derived2>
++struct ei_assign_impl<Derived1, Derived2, SliceVectorization, NoUnrolling>
++{
++  static void run(Derived1 &dst, const Derived2 &src)
++  {
++    const int packetSize = ei_packet_traits<typename Derived1::Scalar>::size;
++    const int packetAlignedMask = packetSize - 1;
++    const int innerSize = dst.innerSize();
++    const int outerSize = dst.outerSize();
++    const int alignedStep = (packetSize - dst.stride() % packetSize) & packetAlignedMask;
++    int alignedStart = ei_assign_traits<Derived1,Derived2>::DstIsAligned ? 0
++                     : ei_alignmentOffset(&dst.coeffRef(0), innerSize);
++
++    for(int i = 0; i < outerSize; i++)
++    {
++      const int alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask);
++
++      // do the non-vectorizable part of the assignment
++      for (int index = 0; index<alignedStart ; index++)
++      {
++        if(Derived1::Flags&RowMajorBit)
++          dst.copyCoeff(i, index, src);
++        else
++          dst.copyCoeff(index, i, src);
++      }
++
++      // do the vectorizable part of the assignment
++      for (int index = alignedStart; index<alignedEnd; index+=packetSize)
++      {
++        if(Derived1::Flags&RowMajorBit)
++          dst.template copyPacket<Derived2, Aligned, Unaligned>(i, index, src);
++        else
++          dst.template copyPacket<Derived2, Aligned, Unaligned>(index, i, src);
++      }
++
++      // do the non-vectorizable part of the assignment
++      for (int index = alignedEnd; index<innerSize ; index++)
++      {
++        if(Derived1::Flags&RowMajorBit)
++          dst.copyCoeff(i, index, src);
++        else
++          dst.copyCoeff(index, i, src);
++      }
++
++      alignedStart = std::min<int>((alignedStart+alignedStep)%packetSize, innerSize);
++    }
++  }
++};
++
++/***************************************************************************
++* Part 4 : implementation of MatrixBase methods
++***************************************************************************/
++
++template<typename Derived>
++template<typename OtherDerived>
++inline Derived& MatrixBase<Derived>
++  ::lazyAssign(const MatrixBase<OtherDerived>& other)
++{
++  EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived);
++  ei_assert(rows() == other.rows() && cols() == other.cols());
++  ei_assign_impl<Derived, OtherDerived>::run(derived(),other.derived());
++  return derived();
++}
++
++template<typename Derived, typename OtherDerived,
++         bool EvalBeforeAssigning = int(OtherDerived::Flags) & EvalBeforeAssigningBit,
++         bool NeedToTranspose = Derived::IsVectorAtCompileTime
++                && OtherDerived::IsVectorAtCompileTime
++                && int(Derived::RowsAtCompileTime) == int(OtherDerived::ColsAtCompileTime)
++                && int(Derived::ColsAtCompileTime) == int(OtherDerived::RowsAtCompileTime)
++                && int(Derived::SizeAtCompileTime) != 1>
++struct ei_assign_selector;
++
++template<typename Derived, typename OtherDerived>
++struct ei_assign_selector<Derived,OtherDerived,false,false> {
++  static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.derived()); }
++};
++template<typename Derived, typename OtherDerived>
++struct ei_assign_selector<Derived,OtherDerived,true,false> {
++  static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.eval()); }
++};
++template<typename Derived, typename OtherDerived>
++struct ei_assign_selector<Derived,OtherDerived,false,true> {
++  static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose()); }
++};
++template<typename Derived, typename OtherDerived>
++struct ei_assign_selector<Derived,OtherDerived,true,true> {
++  static Derived& run(Derived& dst, const OtherDerived& other) { return dst.lazyAssign(other.transpose().eval()); }
++};
++
++template<typename Derived>
++template<typename OtherDerived>
++inline Derived& MatrixBase<Derived>
++  ::operator=(const MatrixBase<OtherDerived>& other)
++{
++  return ei_assign_selector<Derived,OtherDerived>::run(derived(), other.derived());
++}
++
++#endif // EIGEN_ASSIGN_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Block.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Block.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Block.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Block.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,717 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_BLOCK_H
++#define EIGEN_BLOCK_H
++
++/** \class Block
++  *
++  * \brief Expression of a fixed-size or dynamic-size block
++  *
++  * \param MatrixType the type of the object in which we are taking a block
++  * \param BlockRows the number of rows of the block we are taking at compile time (optional)
++  * \param BlockCols the number of columns of the block we are taking at compile time (optional)
++  * \param _PacketAccess allows to enforce aligned loads and stores if set to ForceAligned.
++  *                      The default is AsRequested. This parameter is internaly used by Eigen
++  *                      in expressions such as \code mat.block() += other; \endcode and most of
++  *                      the time this is the only way it is used.
++  * \param _DirectAccessStatus \internal used for partial specialization
++  *
++  * This class represents an expression of either a fixed-size or dynamic-size block. It is the return
++  * type of MatrixBase::block(int,int,int,int) and MatrixBase::block<int,int>(int,int) and
++  * most of the time this is the only way it is used.
++  *
++  * However, if you want to directly maniputate block expressions,
++  * for instance if you want to write a function returning such an expression, you
++  * will need to use this class.
++  *
++  * Here is an example illustrating the dynamic case:
++  * \include class_Block.cpp
++  * Output: \verbinclude class_Block.out
++  *
++  * \note Even though this expression has dynamic size, in the case where \a MatrixType
++  * has fixed size, this expression inherits a fixed maximal size which means that evaluating
++  * it does not cause a dynamic memory allocation.
++  *
++  * Here is an example illustrating the fixed-size case:
++  * \include class_FixedBlock.cpp
++  * Output: \verbinclude class_FixedBlock.out
++  *
++  * \sa MatrixBase::block(int,int,int,int), MatrixBase::block(int,int), class VectorBlock
++  */
++template<typename MatrixType, int BlockRows, int BlockCols, int _PacketAccess, int _DirectAccessStatus>
++struct ei_traits<Block<MatrixType, BlockRows, BlockCols, _PacketAccess, _DirectAccessStatus> >
++{
++  typedef typename MatrixType::Scalar Scalar;
++  enum{
++    RowsAtCompileTime = MatrixType::RowsAtCompileTime == 1 ? 1 : BlockRows,
++    ColsAtCompileTime = MatrixType::ColsAtCompileTime == 1 ? 1 : BlockCols,
++    MaxRowsAtCompileTime = RowsAtCompileTime == 1 ? 1
++      : (BlockRows==Dynamic ? MatrixType::MaxRowsAtCompileTime : BlockRows),
++    MaxColsAtCompileTime = ColsAtCompileTime == 1 ? 1
++      : (BlockCols==Dynamic ? MatrixType::MaxColsAtCompileTime : BlockCols),
++    MaskLargeBit = ((RowsAtCompileTime != Dynamic && MatrixType::RowsAtCompileTime == Dynamic)
++                  || (ColsAtCompileTime != Dynamic && MatrixType::ColsAtCompileTime == Dynamic))
++                   ? ~LargeBit
++                   : ~(unsigned int)0,
++    RowMajor = int(MatrixType::Flags)&RowMajorBit,
++    InnerSize = RowMajor ? ColsAtCompileTime : RowsAtCompileTime,
++    InnerMaxSize = RowMajor ? MaxColsAtCompileTime : MaxRowsAtCompileTime,
++    MaskPacketAccessBit = (InnerMaxSize == Dynamic || (InnerSize >= ei_packet_traits<Scalar>::size))
++                        ? PacketAccessBit : 0,
++    FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0,
++    Flags = (MatrixType::Flags & (HereditaryBits | MaskPacketAccessBit | DirectAccessBit) & MaskLargeBit)
++          | FlagsLinearAccessBit,
++    CoeffReadCost = MatrixType::CoeffReadCost,
++    PacketAccess = _PacketAccess
++  };
++  typedef typename ei_meta_if<int(PacketAccess)==ForceAligned,
++                 Block<MatrixType, BlockRows, BlockCols, _PacketAccess, _DirectAccessStatus>&,
++                 Block<MatrixType, BlockRows, BlockCols, ForceAligned, _DirectAccessStatus> >::ret AlignedDerivedType;
++};
++
++template<typename MatrixType, int BlockRows, int BlockCols, int PacketAccess, int _DirectAccessStatus> class Block
++  : public MatrixBase<Block<MatrixType, BlockRows, BlockCols, PacketAccess, _DirectAccessStatus> >
++{
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(Block)
++
++    /** Column or Row constructor
++      */
++    inline Block(const MatrixType& matrix, int i)
++      : m_matrix(matrix),
++        // It is a row if and only if BlockRows==1 and BlockCols==MatrixType::ColsAtCompileTime,
++        // and it is a column if and only if BlockRows==MatrixType::RowsAtCompileTime and BlockCols==1,
++        // all other cases are invalid.
++        // The case a 1x1 matrix seems ambiguous, but the result is the same anyway.
++        m_startRow( (BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) ? i : 0),
++        m_startCol( (BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
++        m_blockRows(matrix.rows()), // if it is a row, then m_blockRows has a fixed-size of 1, so no pb to try to overwrite it
++        m_blockCols(matrix.cols())  // same for m_blockCols
++    {
++      ei_assert( (i>=0) && (
++          ((BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) && i<matrix.rows())
++        ||((BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) && i<matrix.cols())));
++    }
++
++    /** Fixed-size constructor
++      */
++    inline Block(const MatrixType& matrix, int startRow, int startCol)
++      : m_matrix(matrix), m_startRow(startRow), m_startCol(startCol),
++        m_blockRows(matrix.rows()), m_blockCols(matrix.cols())
++    {
++      EIGEN_STATIC_ASSERT(RowsAtCompileTime!=Dynamic && RowsAtCompileTime!=Dynamic,this_method_is_only_for_fixed_size);
++      ei_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= matrix.rows()
++          && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= matrix.cols());
++    }
++
++    /** Dynamic-size constructor
++      */
++    inline Block(const MatrixType& matrix,
++          int startRow, int startCol,
++          int blockRows, int blockCols)
++      : m_matrix(matrix), m_startRow(startRow), m_startCol(startCol),
++                          m_blockRows(blockRows), m_blockCols(blockCols)
++    {
++      ei_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
++          && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
++      ei_assert(startRow >= 0 && blockRows >= 1 && startRow + blockRows <= matrix.rows()
++          && startCol >= 0 && blockCols >= 1 && startCol + blockCols <= matrix.cols());
++    }
++
++    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
++
++    inline int rows() const { return m_blockRows.value(); }
++    inline int cols() const { return m_blockCols.value(); }
++
++    inline int stride(void) const { return m_matrix.stride(); }
++
++    inline Scalar& coeffRef(int row, int col)
++    {
++      return m_matrix.const_cast_derived()
++               .coeffRef(row + m_startRow.value(), col + m_startCol.value());
++    }
++
++    inline const Scalar coeff(int row, int col) const
++    {
++      return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
++    }
++
++    inline Scalar& coeffRef(int index)
++    {
++      return m_matrix.const_cast_derived()
++             .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
++                       m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
++    }
++
++    inline const Scalar coeff(int index) const
++    {
++      return m_matrix
++             .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
++                    m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
++    }
++
++    template<int LoadMode>
++    inline PacketScalar packet(int row, int col) const
++    {
++      return m_matrix.template packet<Unaligned>
++              (row + m_startRow.value(), col + m_startCol.value());
++    }
++
++    template<int LoadMode>
++    inline void writePacket(int row, int col, const PacketScalar& x)
++    {
++      m_matrix.const_cast_derived().template writePacket<Unaligned>
++              (row + m_startRow.value(), col + m_startCol.value(), x);
++    }
++
++    template<int LoadMode>
++    inline PacketScalar packet(int index) const
++    {
++      return m_matrix.template packet<Unaligned>
++              (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
++               m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
++    }
++
++    template<int LoadMode>
++    inline void writePacket(int index, const PacketScalar& x)
++    {
++      m_matrix.const_cast_derived().template writePacket<Unaligned>
++         (m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
++          m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0), x);
++    }
++
++  protected:
++
++    const typename MatrixType::Nested m_matrix;
++    const ei_int_if_dynamic<MatrixType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
++    const ei_int_if_dynamic<MatrixType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
++    const ei_int_if_dynamic<RowsAtCompileTime> m_blockRows;
++    const ei_int_if_dynamic<ColsAtCompileTime> m_blockCols;
++};
++
++/** \internal */
++template<typename MatrixType, int BlockRows, int BlockCols, int PacketAccess>
++class Block<MatrixType,BlockRows,BlockCols,PacketAccess,HasDirectAccess>
++  : public MapBase<Block<MatrixType, BlockRows, BlockCols,PacketAccess,HasDirectAccess> >
++{
++  public:
++
++    _EIGEN_GENERIC_PUBLIC_INTERFACE(Block, MapBase<Block>)
++
++    typedef typename ei_traits<Block>::AlignedDerivedType AlignedDerivedType;
++
++    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Block)
++
++    AlignedDerivedType forceAligned()
++    {
++      if (PacketAccess==ForceAligned)
++        return *this;
++      else
++        return Block<MatrixType,BlockRows,BlockCols,ForceAligned,HasDirectAccess>
++                    (m_matrix, Base::m_data, Base::m_rows.value(), Base::m_cols.value());
++    }
++
++    /** Column or Row constructor
++      */
++    inline Block(const MatrixType& matrix, int i)
++      : Base(&matrix.const_cast_derived().coeffRef(
++              (BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) ? i : 0,
++              (BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) ? i : 0),
++             BlockRows==1 ? 1 : matrix.rows(),
++             BlockCols==1 ? 1 : matrix.cols()),
++        m_matrix(matrix)
++    {
++      ei_assert( (i>=0) && (
++          ((BlockRows==1) && (BlockCols==MatrixType::ColsAtCompileTime) && i<matrix.rows())
++        ||((BlockRows==MatrixType::RowsAtCompileTime) && (BlockCols==1) && i<matrix.cols())));
++    }
++
++    /** Fixed-size constructor
++      */
++    inline Block(const MatrixType& matrix, int startRow, int startCol)
++      : Base(&matrix.const_cast_derived().coeffRef(startRow,startCol)), m_matrix(matrix)
++    {
++      ei_assert(startRow >= 0 && BlockRows >= 1 && startRow + BlockRows <= matrix.rows()
++             && startCol >= 0 && BlockCols >= 1 && startCol + BlockCols <= matrix.cols());
++    }
++
++    /** Dynamic-size constructor
++      */
++    inline Block(const MatrixType& matrix,
++          int startRow, int startCol,
++          int blockRows, int blockCols)
++      : Base(&matrix.const_cast_derived().coeffRef(startRow,startCol), blockRows, blockCols),
++        m_matrix(matrix)
++    {
++      ei_assert((RowsAtCompileTime==Dynamic || RowsAtCompileTime==blockRows)
++             && (ColsAtCompileTime==Dynamic || ColsAtCompileTime==blockCols));
++      ei_assert(startRow >= 0 && blockRows >= 1 && startRow + blockRows <= matrix.rows()
++             && startCol >= 0 && blockCols >= 1 && startCol + blockCols <= matrix.cols());
++    }
++
++    inline int stride(void) const { return m_matrix.stride(); }
++
++  protected:
++
++    /** \internal used by allowAligned() */
++    inline Block(const MatrixType& matrix, const Scalar* data, int blockRows, int blockCols)
++      : Base(data, blockRows, blockCols), m_matrix(matrix)
++    {}
++
++    const typename MatrixType::Nested m_matrix;
++};
++
++/** \returns a dynamic-size expression of a block in *this.
++  *
++  * \param startRow the first row in the block
++  * \param startCol the first column in the block
++  * \param blockRows the number of rows in the block
++  * \param blockCols the number of columns in the block
++  *
++  * \addexample BlockIntIntIntInt \label How to reference a sub-matrix (dynamic-size)
++  *
++  * Example: \include MatrixBase_block_int_int_int_int.cpp
++  * Output: \verbinclude MatrixBase_block_int_int_int_int.out
++  *
++  * \note Even though the returned expression has dynamic size, in the case
++  * when it is applied to a fixed-size matrix, it inherits a fixed maximal size,
++  * which means that evaluating it does not cause a dynamic memory allocation.
++  *
++  * \sa class Block, block(int,int)
++  */
++template<typename Derived>
++inline typename BlockReturnType<Derived>::Type MatrixBase<Derived>
++  ::block(int startRow, int startCol, int blockRows, int blockCols)
++{
++  return typename BlockReturnType<Derived>::Type(derived(), startRow, startCol, blockRows, blockCols);
++}
++
++/** This is the const version of block(int,int,int,int). */
++template<typename Derived>
++inline const typename BlockReturnType<Derived>::Type MatrixBase<Derived>
++  ::block(int startRow, int startCol, int blockRows, int blockCols) const
++{
++  return typename BlockReturnType<Derived>::Type(derived(), startRow, startCol, blockRows, blockCols);
++}
++
++/** \returns a dynamic-size expression of a block in *this.
++  *
++  * \only_for_vectors
++  *
++  * \addexample BlockIntInt \label How to reference a sub-vector (dynamic size)
++  *
++  * \param start the first coefficient in the block
++  * \param size the number of coefficients in the block
++  *
++  * Example: \include MatrixBase_block_int_int.cpp
++  * Output: \verbinclude MatrixBase_block_int_int.out
++  *
++  * \note Even though the returned expression has dynamic size, in the case
++  * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
++  * which means that evaluating it does not cause a dynamic memory allocation.
++  *
++  * \sa class Block, block(int)
++  */
++template<typename Derived>
++inline typename BlockReturnType<Derived>::SubVectorType MatrixBase<Derived>
++  ::block(int start, int size)
++{
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
++  return typename BlockReturnType<Derived>::SubVectorType(derived(), RowsAtCompileTime == 1 ? 0 : start,
++                                   ColsAtCompileTime == 1 ? 0 : start,
++                                   RowsAtCompileTime == 1 ? 1 : size,
++                                   ColsAtCompileTime == 1 ? 1 : size);
++}
++
++/** This is the const version of block(int,int).*/
++template<typename Derived>
++inline const typename BlockReturnType<Derived>::SubVectorType
++MatrixBase<Derived>::block(int start, int size) const
++{
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
++  return typename BlockReturnType<Derived>::SubVectorType(derived(), RowsAtCompileTime == 1 ? 0 : start,
++                                   ColsAtCompileTime == 1 ? 0 : start,
++                                   RowsAtCompileTime == 1 ? 1 : size,
++                                   ColsAtCompileTime == 1 ? 1 : size);
++}
++
++/** \returns a dynamic-size expression of the first coefficients of *this.
++  *
++  * \only_for_vectors
++  *
++  * \param size the number of coefficients in the block
++  *
++  * \addexample BlockInt \label How to reference a sub-vector (fixed-size)
++  *
++  * Example: \include MatrixBase_start_int.cpp
++  * Output: \verbinclude MatrixBase_start_int.out
++  *
++  * \note Even though the returned expression has dynamic size, in the case
++  * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
++  * which means that evaluating it does not cause a dynamic memory allocation.
++  *
++  * \sa class Block, block(int,int)
++  */
++template<typename Derived>
++inline typename BlockReturnType<Derived,Dynamic>::SubVectorType
++MatrixBase<Derived>::start(int size)
++{
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
++  return Block<Derived,
++               RowsAtCompileTime == 1 ? 1 : Dynamic,
++               ColsAtCompileTime == 1 ? 1 : Dynamic>
++              (derived(), 0, 0,
++               RowsAtCompileTime == 1 ? 1 : size,
++               ColsAtCompileTime == 1 ? 1 : size);
++}
++
++/** This is the const version of start(int).*/
++template<typename Derived>
++inline const typename BlockReturnType<Derived,Dynamic>::SubVectorType
++MatrixBase<Derived>::start(int size) const
++{
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
++  return Block<Derived,
++               RowsAtCompileTime == 1 ? 1 : Dynamic,
++               ColsAtCompileTime == 1 ? 1 : Dynamic>
++              (derived(), 0, 0,
++               RowsAtCompileTime == 1 ? 1 : size,
++               ColsAtCompileTime == 1 ? 1 : size);
++}
++
++/** \returns a dynamic-size expression of the last coefficients of *this.
++  *
++  * \only_for_vectors
++  *
++  * \param size the number of coefficients in the block
++  *
++  * \addexample BlockEnd \label How to reference the end of a vector (fixed-size)
++  *
++  * Example: \include MatrixBase_end_int.cpp
++  * Output: \verbinclude MatrixBase_end_int.out
++  *
++  * \note Even though the returned expression has dynamic size, in the case
++  * when it is applied to a fixed-size vector, it inherits a fixed maximal size,
++  * which means that evaluating it does not cause a dynamic memory allocation.
++  *
++  * \sa class Block, block(int,int)
++  */
++template<typename Derived>
++inline typename BlockReturnType<Derived,Dynamic>::SubVectorType
++MatrixBase<Derived>::end(int size)
++{
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
++  return Block<Derived,
++               RowsAtCompileTime == 1 ? 1 : Dynamic,
++               ColsAtCompileTime == 1 ? 1 : Dynamic>
++              (derived(),
++               RowsAtCompileTime == 1 ? 0 : rows() - size,
++               ColsAtCompileTime == 1 ? 0 : cols() - size,
++               RowsAtCompileTime == 1 ? 1 : size,
++               ColsAtCompileTime == 1 ? 1 : size);
++}
++
++/** This is the const version of end(int).*/
++template<typename Derived>
++inline const typename BlockReturnType<Derived,Dynamic>::SubVectorType
++MatrixBase<Derived>::end(int size) const
++{
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
++  return Block<Derived,
++               RowsAtCompileTime == 1 ? 1 : Dynamic,
++               ColsAtCompileTime == 1 ? 1 : Dynamic>
++              (derived(),
++               RowsAtCompileTime == 1 ? 0 : rows() - size,
++               ColsAtCompileTime == 1 ? 0 : cols() - size,
++               RowsAtCompileTime == 1 ? 1 : size,
++               ColsAtCompileTime == 1 ? 1 : size);
++}
++
++/** \returns a fixed-size expression of the first coefficients of *this.
++  *
++  * \only_for_vectors
++  *
++  * The template parameter \a Size is the number of coefficients in the block
++  *
++  * \addexample BlockStart \label How to reference the start of a vector (fixed-size)
++  *
++  * Example: \include MatrixBase_template_int_start.cpp
++  * Output: \verbinclude MatrixBase_template_int_start.out
++  *
++  * \sa class Block
++  */
++template<typename Derived>
++template<int Size>
++inline typename BlockReturnType<Derived,Size>::SubVectorType
++MatrixBase<Derived>::start()
++{
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
++  return Block<Derived, (RowsAtCompileTime == 1 ? 1 : Size),
++                        (ColsAtCompileTime == 1 ? 1 : Size)>(derived(), 0, 0);
++}
++
++/** This is the const version of start<int>().*/
++template<typename Derived>
++template<int Size>
++inline const typename BlockReturnType<Derived,Size>::SubVectorType
++MatrixBase<Derived>::start() const
++{
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
++  return Block<Derived, (RowsAtCompileTime == 1 ? 1 : Size),
++                        (ColsAtCompileTime == 1 ? 1 : Size)>(derived(), 0, 0);
++}
++
++/** \returns a fixed-size expression of the last coefficients of *this.
++  *
++  * \only_for_vectors
++  *
++  * The template parameter \a Size is the number of coefficients in the block
++  *
++  * Example: \include MatrixBase_template_int_end.cpp
++  * Output: \verbinclude MatrixBase_template_int_end.out
++  *
++  * \sa class Block
++  */
++template<typename Derived>
++template<int Size>
++inline typename BlockReturnType<Derived,Size>::SubVectorType
++MatrixBase<Derived>::end()
++{
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
++  return Block<Derived, RowsAtCompileTime == 1 ? 1 : Size,
++                        ColsAtCompileTime == 1 ? 1 : Size>
++           (derived(),
++            RowsAtCompileTime == 1 ? 0 : rows() - Size,
++            ColsAtCompileTime == 1 ? 0 : cols() - Size);
++}
++
++/** This is the const version of end<int>.*/
++template<typename Derived>
++template<int Size>
++inline const typename BlockReturnType<Derived,Size>::SubVectorType
++MatrixBase<Derived>::end() const
++{
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
++  return Block<Derived, RowsAtCompileTime == 1 ? 1 : Size,
++                        ColsAtCompileTime == 1 ? 1 : Size>
++           (derived(),
++            RowsAtCompileTime == 1 ? 0 : rows() - Size,
++            ColsAtCompileTime == 1 ? 0 : cols() - Size);
++}
++
++/** \returns a dynamic-size expression of a corner of *this.
++  *
++  * \param type the type of corner. Can be \a Eigen::TopLeft, \a Eigen::TopRight,
++  * \a Eigen::BottomLeft, \a Eigen::BottomRight.
++  * \param cRows the number of rows in the corner
++  * \param cCols the number of columns in the corner
++  *
++  * \addexample BlockCornerDynamicSize \label How to reference a sub-corner of a matrix
++  *
++  * Example: \include MatrixBase_corner_enum_int_int.cpp
++  * Output: \verbinclude MatrixBase_corner_enum_int_int.out
++  *
++  * \note Even though the returned expression has dynamic size, in the case
++  * when it is applied to a fixed-size matrix, it inherits a fixed maximal size,
++  * which means that evaluating it does not cause a dynamic memory allocation.
++  *
++  * \sa class Block, block(int,int,int,int)
++  */
++template<typename Derived>
++inline typename BlockReturnType<Derived>::Type MatrixBase<Derived>
++  ::corner(CornerType type, int cRows, int cCols)
++{
++  switch(type)
++  {
++    default:
++      ei_assert(false && "Bad corner type.");
++    case TopLeft:
++      return typename BlockReturnType<Derived>::Type(derived(), 0, 0, cRows, cCols);
++    case TopRight:
++      return typename BlockReturnType<Derived>::Type(derived(), 0, cols() - cCols, cRows, cCols);
++    case BottomLeft:
++      return typename BlockReturnType<Derived>::Type(derived(), rows() - cRows, 0, cRows, cCols);
++    case BottomRight:
++      return typename BlockReturnType<Derived>::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
++  }
++}
++
++/** This is the const version of corner(CornerType, int, int).*/
++template<typename Derived>
++inline const typename BlockReturnType<Derived>::Type
++MatrixBase<Derived>::corner(CornerType type, int cRows, int cCols) const
++{
++  switch(type)
++  {
++    default:
++      ei_assert(false && "Bad corner type.");
++    case TopLeft:
++      return typename BlockReturnType<Derived>::Type(derived(), 0, 0, cRows, cCols);
++    case TopRight:
++      return typename BlockReturnType<Derived>::Type(derived(), 0, cols() - cCols, cRows, cCols);
++    case BottomLeft:
++      return typename BlockReturnType<Derived>::Type(derived(), rows() - cRows, 0, cRows, cCols);
++    case BottomRight:
++      return typename BlockReturnType<Derived>::Type(derived(), rows() - cRows, cols() - cCols, cRows, cCols);
++  }
++}
++
++/** \returns a fixed-size expression of a corner of *this.
++  *
++  * \param type the type of corner. Can be \a Eigen::TopLeft, \a Eigen::TopRight,
++  * \a Eigen::BottomLeft, \a Eigen::BottomRight.
++  *
++  * The template parameters CRows and CCols arethe number of rows and columns in the corner.
++  *
++  * Example: \include MatrixBase_template_int_int_corner_enum.cpp
++  * Output: \verbinclude MatrixBase_template_int_int_corner_enum.out
++  *
++  * \sa class Block, block(int,int,int,int)
++  */
++template<typename Derived>
++template<int CRows, int CCols>
++inline typename BlockReturnType<Derived, CRows, CCols>::Type
++MatrixBase<Derived>::corner(CornerType type)
++{
++  switch(type)
++  {
++    default:
++      ei_assert(false && "Bad corner type.");
++    case TopLeft:
++      return Block<Derived, CRows, CCols>(derived(), 0, 0);
++    case TopRight:
++      return Block<Derived, CRows, CCols>(derived(), 0, cols() - CCols);
++    case BottomLeft:
++      return Block<Derived, CRows, CCols>(derived(), rows() - CRows, 0);
++    case BottomRight:
++      return Block<Derived, CRows, CCols>(derived(), rows() - CRows, cols() - CCols);
++  }
++}
++
++/** This is the const version of corner<int, int>(CornerType).*/
++template<typename Derived>
++template<int CRows, int CCols>
++inline const typename BlockReturnType<Derived, CRows, CCols>::Type
++MatrixBase<Derived>::corner(CornerType type) const
++{
++  switch(type)
++  {
++    default:
++      ei_assert(false && "Bad corner type.");
++    case TopLeft:
++      return Block<Derived, CRows, CCols>(derived(), 0, 0);
++    case TopRight:
++      return Block<Derived, CRows, CCols>(derived(), 0, cols() - CCols);
++    case BottomLeft:
++      return Block<Derived, CRows, CCols>(derived(), rows() - CRows, 0);
++    case BottomRight:
++      return Block<Derived, CRows, CCols>(derived(), rows() - CRows, cols() - CCols);
++  }
++}
++
++/** \returns a fixed-size expression of a block in *this.
++  *
++  * The template parameters \a BlockRows and \a BlockCols are the number of
++  * rows and columns in the block.
++  *
++  * \param startRow the first row in the block
++  * \param startCol the first column in the block
++  *
++  * \addexample BlockSubMatrixFixedSize \label How to reference a sub-matrix (fixed-size)
++  *
++  * Example: \include MatrixBase_block_int_int.cpp
++  * Output: \verbinclude MatrixBase_block_int_int.out
++  *
++  * \note since block is a templated member, the keyword template has to be used
++  * if the matrix type is also a template parameter: \code m.template block<3,3>(1,1); \endcode
++  *
++  * \sa class Block, block(int,int,int,int)
++  */
++template<typename Derived>
++template<int BlockRows, int BlockCols>
++inline typename BlockReturnType<Derived, BlockRows, BlockCols>::Type
++MatrixBase<Derived>::block(int startRow, int startCol)
++{
++  return Block<Derived, BlockRows, BlockCols>(derived(), startRow, startCol);
++}
++
++/** This is the const version of block<>(int, int). */
++template<typename Derived>
++template<int BlockRows, int BlockCols>
++inline const typename BlockReturnType<Derived, BlockRows, BlockCols>::Type
++MatrixBase<Derived>::block(int startRow, int startCol) const
++{
++  return Block<Derived, BlockRows, BlockCols>(derived(), startRow, startCol);
++}
++
++/** \returns an expression of the \a i-th column of *this. Note that the numbering starts at 0.
++  *
++  * \addexample BlockColumn \label How to reference a single column of a matrix
++  *
++  * Example: \include MatrixBase_col.cpp
++  * Output: \verbinclude MatrixBase_col.out
++  *
++  * \sa row(), class Block */
++template<typename Derived>
++inline typename MatrixBase<Derived>::ColXpr
++MatrixBase<Derived>::col(int i)
++{
++  return ColXpr(derived(), i);
++}
++
++/** This is the const version of col(). */
++template<typename Derived>
++inline const typename MatrixBase<Derived>::ColXpr
++MatrixBase<Derived>::col(int i) const
++{
++  return ColXpr(derived(), i);
++}
++
++/** \returns an expression of the \a i-th row of *this. Note that the numbering starts at 0.
++  *
++  * \addexample BlockRow \label How to reference a single row of a matrix
++  *
++  * Example: \include MatrixBase_row.cpp
++  * Output: \verbinclude MatrixBase_row.out
++  *
++  * \sa col(), class Block */
++template<typename Derived>
++inline typename MatrixBase<Derived>::RowXpr
++MatrixBase<Derived>::row(int i)
++{
++  return RowXpr(derived(), i);
++}
++
++/** This is the const version of row(). */
++template<typename Derived>
++inline const typename MatrixBase<Derived>::RowXpr
++MatrixBase<Derived>::row(int i) const
++{
++  return RowXpr(derived(), i);
++}
++
++#endif // EIGEN_BLOCK_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/CacheFriendlyProduct.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/CacheFriendlyProduct.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/CacheFriendlyProduct.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/CacheFriendlyProduct.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,747 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_CACHE_FRIENDLY_PRODUCT_H
++#define EIGEN_CACHE_FRIENDLY_PRODUCT_H
++
++#ifndef EIGEN_EXTERN_INSTANTIATIONS
++
++template<typename Scalar>
++static void ei_cache_friendly_product(
++  int _rows, int _cols, int depth,
++  bool _lhsRowMajor, const Scalar* _lhs, int _lhsStride,
++  bool _rhsRowMajor, const Scalar* _rhs, int _rhsStride,
++  bool resRowMajor, Scalar* res, int resStride)
++{
++  const Scalar* EIGEN_RESTRICT lhs;
++  const Scalar* EIGEN_RESTRICT rhs;
++  int lhsStride, rhsStride, rows, cols;
++  bool lhsRowMajor;
++
++  if (resRowMajor)
++  {
++    lhs = _rhs;
++    rhs = _lhs;
++    lhsStride = _rhsStride;
++    rhsStride = _lhsStride;
++    cols = _rows;
++    rows = _cols;
++    lhsRowMajor = !_rhsRowMajor;
++    ei_assert(_lhsRowMajor);
++  }
++  else
++  {
++    lhs = _lhs;
++    rhs = _rhs;
++    lhsStride = _lhsStride;
++    rhsStride = _rhsStride;
++    rows = _rows;
++    cols = _cols;
++    lhsRowMajor = _lhsRowMajor;
++    ei_assert(!_rhsRowMajor);
++  }
++
++  typedef typename ei_packet_traits<Scalar>::type PacketType;
++
++  enum {
++    PacketSize = sizeof(PacketType)/sizeof(Scalar),
++    #if (defined __i386__)
++    // i386 architecture provides only 8 xmm registers,
++    // so let's reduce the max number of rows processed at once.
++    MaxBlockRows = 4,
++    MaxBlockRows_ClampingMask = 0xFFFFFC,
++    #else
++    MaxBlockRows = 8,
++    MaxBlockRows_ClampingMask = 0xFFFFF8,
++    #endif
++    // maximal size of the blocks fitted in L2 cache
++    MaxL2BlockSize = EIGEN_TUNE_FOR_L2_CACHE_SIZE / sizeof(Scalar)
++  };
++
++  const bool resIsAligned = (PacketSize==1) || (((resStride%PacketSize) == 0) && (size_t(res)%16==0));
++
++  const int remainingSize = depth % PacketSize;
++  const int size = depth - remainingSize; // third dimension of the product clamped to packet boundaries
++  const int l2BlockRows = MaxL2BlockSize > rows ? rows : MaxL2BlockSize;
++  const int l2BlockCols = MaxL2BlockSize > cols ? cols : MaxL2BlockSize;
++  const int l2BlockSize = MaxL2BlockSize > size ? size : MaxL2BlockSize;
++  const int l2BlockSizeAligned = (1 + std::max(l2BlockSize,l2BlockCols)/PacketSize)*PacketSize;
++  const bool needRhsCopy = (PacketSize>1) && ((rhsStride%PacketSize!=0) || (size_t(rhs)%16!=0));
++  Scalar* EIGEN_RESTRICT block = 0;
++  const int allocBlockSize = l2BlockRows*size;
++  block = ei_alloc_stack(Scalar, allocBlockSize);
++  Scalar* EIGEN_RESTRICT rhsCopy
++    = ei_alloc_stack(Scalar, l2BlockSizeAligned*l2BlockSizeAligned);
++
++  // loops on each L2 cache friendly blocks of the result
++  for(int l2i=0; l2i<rows; l2i+=l2BlockRows)
++  {
++    const int l2blockRowEnd = std::min(l2i+l2BlockRows, rows);
++    const int l2blockRowEndBW = l2blockRowEnd & MaxBlockRows_ClampingMask;    // end of the rows aligned to bw
++    const int l2blockRemainingRows = l2blockRowEnd - l2blockRowEndBW;         // number of remaining rows
++    //const int l2blockRowEndBWPlusOne = l2blockRowEndBW + (l2blockRemainingRows?0:MaxBlockRows);
++
++    // build a cache friendly blocky matrix
++    int count = 0;
++
++    // copy l2blocksize rows of m_lhs to blocks of ps x bw
++    for(int l2k=0; l2k<size; l2k+=l2BlockSize)
++    {
++      const int l2blockSizeEnd = std::min(l2k+l2BlockSize, size);
++
++      for (int i = l2i; i<l2blockRowEndBW/*PlusOne*/; i+=MaxBlockRows)
++      {
++        // TODO merge the "if l2blockRemainingRows" using something like:
++        // const int blockRows = std::min(i+MaxBlockRows, rows) - i;
++
++        for (int k=l2k; k<l2blockSizeEnd; k+=PacketSize)
++        {
++          // TODO write these loops using meta unrolling
++          // negligible for large matrices but useful for small ones
++          if (lhsRowMajor)
++          {
++            for (int w=0; w<MaxBlockRows; ++w)
++              for (int s=0; s<PacketSize; ++s)
++                block[count++] = lhs[(i+w)*lhsStride + (k+s)];
++          }
++          else
++          {
++            for (int w=0; w<MaxBlockRows; ++w)
++              for (int s=0; s<PacketSize; ++s)
++                block[count++] = lhs[(i+w) + (k+s)*lhsStride];
++          }
++        }
++      }
++      if (l2blockRemainingRows>0)
++      {
++        for (int k=l2k; k<l2blockSizeEnd; k+=PacketSize)
++        {
++          if (lhsRowMajor)
++          {
++            for (int w=0; w<l2blockRemainingRows; ++w)
++              for (int s=0; s<PacketSize; ++s)
++                block[count++] = lhs[(l2blockRowEndBW+w)*lhsStride + (k+s)];
++          }
++          else
++          {
++            for (int w=0; w<l2blockRemainingRows; ++w)
++              for (int s=0; s<PacketSize; ++s)
++                block[count++] = lhs[(l2blockRowEndBW+w) + (k+s)*lhsStride];
++          }
++        }
++      }
++    }
++
++    for(int l2j=0; l2j<cols; l2j+=l2BlockCols)
++    {
++      int l2blockColEnd = std::min(l2j+l2BlockCols, cols);
++
++      for(int l2k=0; l2k<size; l2k+=l2BlockSize)
++      {
++        // acumulate bw rows of lhs time a single column of rhs to a bw x 1 block of res
++        int l2blockSizeEnd = std::min(l2k+l2BlockSize, size);
++
++        // if not aligned, copy the rhs block
++        if (needRhsCopy)
++          for(int l1j=l2j; l1j<l2blockColEnd; l1j+=1)
++          {
++            ei_internal_assert(l2BlockSizeAligned*(l1j-l2j)+(l2blockSizeEnd-l2k) < l2BlockSizeAligned*l2BlockSizeAligned);
++            memcpy(rhsCopy+l2BlockSizeAligned*(l1j-l2j),&(rhs[l1j*rhsStride+l2k]),(l2blockSizeEnd-l2k)*sizeof(Scalar));
++          }
++
++        // for each bw x 1 result's block
++        for(int l1i=l2i; l1i<l2blockRowEndBW; l1i+=MaxBlockRows)
++        {
++          int offsetblock = l2k * (l2blockRowEnd-l2i) + (l1i-l2i)*(l2blockSizeEnd-l2k) - l2k*MaxBlockRows;
++          const Scalar* EIGEN_RESTRICT localB = &block[offsetblock];
++          
++          for(int l1j=l2j; l1j<l2blockColEnd; l1j+=1)
++          {
++            const Scalar* EIGEN_RESTRICT rhsColumn;
++            if (needRhsCopy)
++              rhsColumn = &(rhsCopy[l2BlockSizeAligned*(l1j-l2j)-l2k]);
++            else
++              rhsColumn = &(rhs[l1j*rhsStride]);
++
++            PacketType dst[MaxBlockRows];
++            dst[3] = dst[2] = dst[1] = dst[0] = ei_pset1(Scalar(0.));
++            if (MaxBlockRows==8)
++              dst[7] = dst[6] = dst[5] = dst[4] = dst[0];
++
++            PacketType tmp;
++
++            for(int k=l2k; k<l2blockSizeEnd; k+=PacketSize)
++            {
++              tmp = ei_ploadu(&rhsColumn[k]);
++              PacketType A0, A1, A2, A3, A4, A5;
++              A0 = ei_pload(localB + k*MaxBlockRows);
++              A1 = ei_pload(localB + k*MaxBlockRows+1*PacketSize);
++              A2 = ei_pload(localB + k*MaxBlockRows+2*PacketSize);
++              A3 = ei_pload(localB + k*MaxBlockRows+3*PacketSize);
++              if (MaxBlockRows==8) A4 = ei_pload(localB + k*MaxBlockRows+4*PacketSize);
++              if (MaxBlockRows==8) A5 = ei_pload(localB + k*MaxBlockRows+5*PacketSize);
++              dst[0] = ei_pmadd(tmp, A0, dst[0]);
++              if (MaxBlockRows==8) A0 = ei_pload(localB + k*MaxBlockRows+6*PacketSize);
++              dst[1] = ei_pmadd(tmp, A1, dst[1]);
++              if (MaxBlockRows==8) A1 = ei_pload(localB + k*MaxBlockRows+7*PacketSize);
++              dst[2] = ei_pmadd(tmp, A2, dst[2]);
++              dst[3] = ei_pmadd(tmp, A3, dst[3]);
++              if (MaxBlockRows==8)
++              {
++                dst[4] = ei_pmadd(tmp, A4, dst[4]);
++                dst[5] = ei_pmadd(tmp, A5, dst[5]);
++                dst[6] = ei_pmadd(tmp, A0, dst[6]);
++                dst[7] = ei_pmadd(tmp, A1, dst[7]);
++              }
++            }
++
++            Scalar* EIGEN_RESTRICT localRes = &(res[l1i + l1j*resStride]);
++
++            if (PacketSize>1 && resIsAligned)
++            {
++              // the result is aligned: let's do packet reduction
++              ei_pstore(&(localRes[0]), ei_padd(ei_pload(&(localRes[0])), ei_preduxp(&dst[0])));
++              if (PacketSize==2)
++                ei_pstore(&(localRes[2]), ei_padd(ei_pload(&(localRes[2])), ei_preduxp(&(dst[2]))));
++              if (MaxBlockRows==8)
++              {
++                ei_pstore(&(localRes[4]), ei_padd(ei_pload(&(localRes[4])), ei_preduxp(&(dst[4]))));
++                if (PacketSize==2)
++                  ei_pstore(&(localRes[6]), ei_padd(ei_pload(&(localRes[6])), ei_preduxp(&(dst[6]))));
++              }
++            }
++            else
++            {
++              // not aligned => per coeff packet reduction
++              localRes[0] += ei_predux(dst[0]);
++              localRes[1] += ei_predux(dst[1]);
++              localRes[2] += ei_predux(dst[2]);
++              localRes[3] += ei_predux(dst[3]);
++              if (MaxBlockRows==8)
++              {
++                localRes[4] += ei_predux(dst[4]);
++                localRes[5] += ei_predux(dst[5]);
++                localRes[6] += ei_predux(dst[6]);
++                localRes[7] += ei_predux(dst[7]);
++              }
++            }
++          }
++        }
++        if (l2blockRemainingRows>0)
++        {
++          int offsetblock = l2k * (l2blockRowEnd-l2i) + (l2blockRowEndBW-l2i)*(l2blockSizeEnd-l2k) - l2k*l2blockRemainingRows;
++          const Scalar* localB = &block[offsetblock];
++
++          for(int l1j=l2j; l1j<l2blockColEnd; l1j+=1)
++          {
++            const Scalar* EIGEN_RESTRICT rhsColumn;
++            if (needRhsCopy)
++              rhsColumn = &(rhsCopy[l2BlockSizeAligned*(l1j-l2j)-l2k]);
++            else
++              rhsColumn = &(rhs[l1j*rhsStride]);
++
++            PacketType dst[MaxBlockRows];
++            dst[3] = dst[2] = dst[1] = dst[0] = ei_pset1(Scalar(0.));
++            if (MaxBlockRows==8)
++              dst[7] = dst[6] = dst[5] = dst[4] = dst[0];
++
++            // let's declare a few other temporary registers
++            PacketType tmp;
++
++            for(int k=l2k; k<l2blockSizeEnd; k+=PacketSize)
++            {
++              tmp = ei_pload(&rhsColumn[k]);
++
++                                           dst[0] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows             ])), dst[0]);
++              if (l2blockRemainingRows>=2) dst[1] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+  PacketSize])), dst[1]);
++              if (l2blockRemainingRows>=3) dst[2] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+2*PacketSize])), dst[2]);
++              if (l2blockRemainingRows>=4) dst[3] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+3*PacketSize])), dst[3]);
++              if (MaxBlockRows==8)
++              {
++                if (l2blockRemainingRows>=5) dst[4] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+4*PacketSize])), dst[4]);
++                if (l2blockRemainingRows>=6) dst[5] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+5*PacketSize])), dst[5]);
++                if (l2blockRemainingRows>=7) dst[6] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+6*PacketSize])), dst[6]);
++                if (l2blockRemainingRows>=8) dst[7] = ei_pmadd(tmp, ei_pload(&(localB[k*l2blockRemainingRows+7*PacketSize])), dst[7]);
++              }
++            }
++
++            Scalar* EIGEN_RESTRICT localRes = &(res[l2blockRowEndBW + l1j*resStride]);
++
++            // process the remaining rows once at a time
++                                         localRes[0] += ei_predux(dst[0]);
++            if (l2blockRemainingRows>=2) localRes[1] += ei_predux(dst[1]);
++            if (l2blockRemainingRows>=3) localRes[2] += ei_predux(dst[2]);
++            if (l2blockRemainingRows>=4) localRes[3] += ei_predux(dst[3]);
++            if (MaxBlockRows==8)
++            {
++              if (l2blockRemainingRows>=5) localRes[4] += ei_predux(dst[4]);
++              if (l2blockRemainingRows>=6) localRes[5] += ei_predux(dst[5]);
++              if (l2blockRemainingRows>=7) localRes[6] += ei_predux(dst[6]);
++              if (l2blockRemainingRows>=8) localRes[7] += ei_predux(dst[7]);
++            }
++
++          }
++        }
++      }
++    }
++  }
++  if (PacketSize>1 && remainingSize)
++  {
++    if (lhsRowMajor)
++    {
++      for (int j=0; j<cols; ++j)
++        for (int i=0; i<rows; ++i)
++        {
++          Scalar tmp = lhs[i*lhsStride+size] * rhs[j*rhsStride+size];
++          // FIXME this loop get vectorized by the compiler !
++          for (int k=1; k<remainingSize; ++k)
++            tmp += lhs[i*lhsStride+size+k] * rhs[j*rhsStride+size+k];
++          res[i+j*resStride] += tmp;
++        }
++    }
++    else
++    {
++      for (int j=0; j<cols; ++j)
++        for (int i=0; i<rows; ++i)
++        {
++          Scalar tmp = lhs[i+size*lhsStride] * rhs[j*rhsStride+size];
++          for (int k=1; k<remainingSize; ++k)
++            tmp += lhs[i+(size+k)*lhsStride] * rhs[j*rhsStride+size+k];
++          res[i+j*resStride] += tmp;
++        }
++    }
++  }
++
++  ei_free_stack(block, Scalar, allocBlockSize);
++  ei_free_stack(rhsCopy, Scalar, l2BlockSizeAligned*l2BlockSizeAligned);
++}
++
++#endif // EIGEN_EXTERN_INSTANTIATIONS
++
++/* Optimized col-major matrix * vector product:
++ * This algorithm processes 4 columns at onces that allows to both reduce
++ * the number of load/stores of the result by a factor 4 and to reduce
++ * the instruction dependency. Moreover, we know that all bands have the
++ * same alignment pattern.
++ * TODO: since rhs gets evaluated only once, no need to evaluate it
++ */
++template<typename Scalar, typename RhsType>
++EIGEN_DONT_INLINE static void ei_cache_friendly_product_colmajor_times_vector(
++  int size,
++  const Scalar* lhs, int lhsStride,
++  const RhsType& rhs,
++  Scalar* res)
++{
++  #ifdef _EIGEN_ACCUMULATE_PACKETS
++  #error _EIGEN_ACCUMULATE_PACKETS has already been defined
++  #endif
++
++  #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2,OFFSET) \
++    ei_pstore(&res[j OFFSET], \
++      ei_padd(ei_pload(&res[j OFFSET]), \
++        ei_padd( \
++          ei_padd(ei_pmul(ptmp0,ei_pload ## A0(&lhs0[j OFFSET])),ei_pmul(ptmp1,ei_pload ## A13(&lhs1[j OFFSET]))), \
++          ei_padd(ei_pmul(ptmp2,ei_pload ## A2(&lhs2[j OFFSET])),ei_pmul(ptmp3,ei_pload ## A13(&lhs3[j OFFSET]))) )))
++
++  typedef typename ei_packet_traits<Scalar>::type Packet;
++  const int PacketSize = sizeof(Packet)/sizeof(Scalar);
++
++  enum { AllAligned = 0, EvenAligned, FirstAligned, NoneAligned };
++  const int columnsAtOnce = 4;
++  const int peels = 2;
++  const int PacketAlignedMask = PacketSize-1;
++  const int PeelAlignedMask = PacketSize*peels-1;
++
++  // How many coeffs of the result do we have to skip to be aligned.
++  // Here we assume data are at least aligned on the base scalar type that is mandatory anyway.
++  const int alignedStart = ei_alignmentOffset(res,size);
++  const int alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0;
++  const int peeledSize  = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
++
++  const int alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0;
++  int alignmentPattern = alignmentStep==0 ? AllAligned
++                       : alignmentStep==(PacketSize/2) ? EvenAligned
++                       : FirstAligned;
++
++  // we cannot assume the first element is aligned because of sub-matrices
++  const int lhsAlignmentOffset = ei_alignmentOffset(lhs,size);
++
++  // find how many columns do we have to skip to be aligned with the result (if possible)
++  int skipColumns = 0;
++  if (PacketSize>1)
++  {
++    ei_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(Packet)==0 || size<PacketSize);
++    
++    while (skipColumns<PacketSize &&
++           alignedStart != ((lhsAlignmentOffset + alignmentStep*skipColumns)%PacketSize))
++      ++skipColumns;
++    if (skipColumns==PacketSize)
++    {
++      // nothing can be aligned, no need to skip any column
++      alignmentPattern = NoneAligned;
++      skipColumns = 0;
++    }
++    else
++    {
++      skipColumns = std::min(skipColumns,rhs.size());
++      // note that the skiped columns are processed later.
++    }
++
++    ei_internal_assert((alignmentPattern==NoneAligned) || (size_t(lhs+alignedStart+lhsStride*skipColumns)%sizeof(Packet))==0);
++  }
++
++  int offset1 = (FirstAligned && alignmentStep==1?3:1);
++  int offset3 = (FirstAligned && alignmentStep==1?1:3);
++  
++  int columnBound = ((rhs.size()-skipColumns)/columnsAtOnce)*columnsAtOnce + skipColumns;
++  for (int i=skipColumns; i<columnBound; i+=columnsAtOnce)
++  {
++    Packet ptmp0 = ei_pset1(rhs[i]),   ptmp1 = ei_pset1(rhs[i+offset1]),
++           ptmp2 = ei_pset1(rhs[i+2]), ptmp3 = ei_pset1(rhs[i+offset3]);
++
++    // this helps a lot generating better binary code
++    const Scalar *lhs0 = lhs + i*lhsStride, *lhs1 = lhs + (i+offset1)*lhsStride,
++                 *lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride;
++
++    if (PacketSize>1)
++    {
++      /* explicit vectorization */
++      // process initial unaligned coeffs
++      for (int j=0; j<alignedStart; j++)
++        res[j] += ei_pfirst(ptmp0)*lhs0[j] + ei_pfirst(ptmp1)*lhs1[j] + ei_pfirst(ptmp2)*lhs2[j] + ei_pfirst(ptmp3)*lhs3[j];
++
++      if (alignedSize>alignedStart)
++      {
++        switch(alignmentPattern)
++        {
++          case AllAligned:
++            for (int j = alignedStart; j<alignedSize; j+=PacketSize)
++              _EIGEN_ACCUMULATE_PACKETS(,,,);
++            break;
++          case EvenAligned:
++            for (int j = alignedStart; j<alignedSize; j+=PacketSize)
++              _EIGEN_ACCUMULATE_PACKETS(,u,,);
++            break;
++          case FirstAligned:
++            if(peels>1)
++            {
++              Packet A00, A01, A02, A03, A10, A11, A12, A13;
++
++              A01 = ei_pload(&lhs1[alignedStart-1]);
++              A02 = ei_pload(&lhs2[alignedStart-2]);
++              A03 = ei_pload(&lhs3[alignedStart-3]);
++
++              for (int j = alignedStart; j<peeledSize; j+=peels*PacketSize)
++              {
++                A11 = ei_pload(&lhs1[j-1+PacketSize]);  ei_palign<1>(A01,A11);
++                A12 = ei_pload(&lhs2[j-2+PacketSize]);  ei_palign<2>(A02,A12);
++                A13 = ei_pload(&lhs3[j-3+PacketSize]);  ei_palign<3>(A03,A13);
++
++                A00 = ei_pload (&lhs0[j]);
++                A10 = ei_pload (&lhs0[j+PacketSize]);
++                A00 = ei_pmadd(ptmp0, A00, ei_pload(&res[j]));
++                A10 = ei_pmadd(ptmp0, A10, ei_pload(&res[j+PacketSize]));
++
++                A00 = ei_pmadd(ptmp1, A01, A00);
++                A01 = ei_pload(&lhs1[j-1+2*PacketSize]);  ei_palign<1>(A11,A01);
++                A00 = ei_pmadd(ptmp2, A02, A00);
++                A02 = ei_pload(&lhs2[j-2+2*PacketSize]);  ei_palign<2>(A12,A02);
++                A00 = ei_pmadd(ptmp3, A03, A00);
++                ei_pstore(&res[j],A00);
++                A03 = ei_pload(&lhs3[j-3+2*PacketSize]);  ei_palign<3>(A13,A03);
++                A10 = ei_pmadd(ptmp1, A11, A10);
++                A10 = ei_pmadd(ptmp2, A12, A10);
++                A10 = ei_pmadd(ptmp3, A13, A10);
++                ei_pstore(&res[j+PacketSize],A10);
++              }
++            }
++            for (int j = peeledSize; j<alignedSize; j+=PacketSize)
++              _EIGEN_ACCUMULATE_PACKETS(,u,u,);
++            break;
++          default:
++            for (int j = alignedStart; j<alignedSize; j+=PacketSize)
++              _EIGEN_ACCUMULATE_PACKETS(u,u,u,);
++            break;
++        }
++      }
++    } // end explit vectorization
++
++    /* process remaining coeffs (or all if there is no explicit vectorization) */
++    for (int j=alignedSize; j<size; j++)
++      res[j] += ei_pfirst(ptmp0)*lhs0[j] + ei_pfirst(ptmp1)*lhs1[j] + ei_pfirst(ptmp2)*lhs2[j] + ei_pfirst(ptmp3)*lhs3[j];
++  }
++
++  // process remaining first and last columns (at most columnsAtOnce-1)
++  int end = rhs.size();
++  int start = columnBound;
++  do
++  {
++    for (int i=start; i<end; i++)
++    {
++      Packet ptmp0 = ei_pset1(rhs[i]);
++      const Scalar* lhs0 = lhs + i*lhsStride;
++
++      if (PacketSize>1)
++      {
++        /* explicit vectorization */
++        // process first unaligned result's coeffs
++        for (int j=0; j<alignedStart; j++)
++          res[j] += ei_pfirst(ptmp0) * lhs0[j];
++
++        // process aligned result's coeffs
++        if ((size_t(lhs0+alignedStart)%sizeof(Packet))==0)
++          for (int j = alignedStart;j<alignedSize;j+=PacketSize)
++            ei_pstore(&res[j], ei_pmadd(ptmp0,ei_pload(&lhs0[j]),ei_pload(&res[j])));
++        else
++          for (int j = alignedStart;j<alignedSize;j+=PacketSize)
++            ei_pstore(&res[j], ei_pmadd(ptmp0,ei_ploadu(&lhs0[j]),ei_pload(&res[j])));
++      }
++
++      // process remaining scalars (or all if no explicit vectorization)
++      for (int j=alignedSize; j<size; j++)
++        res[j] += ei_pfirst(ptmp0) * lhs0[j];
++    }
++    if (skipColumns)
++    {
++      start = 0;
++      end = skipColumns;
++      skipColumns = 0;
++    }
++    else
++      break;
++  } while(PacketSize>1);
++  #undef _EIGEN_ACCUMULATE_PACKETS
++}
++
++// TODO add peeling to mask unaligned load/stores
++template<typename Scalar, typename ResType>
++EIGEN_DONT_INLINE static void ei_cache_friendly_product_rowmajor_times_vector(
++  const Scalar* lhs, int lhsStride,
++  const Scalar* rhs, int rhsSize,
++  ResType& res)
++{
++  #ifdef _EIGEN_ACCUMULATE_PACKETS
++  #error _EIGEN_ACCUMULATE_PACKETS has already been defined
++  #endif
++
++  #define _EIGEN_ACCUMULATE_PACKETS(A0,A13,A2,OFFSET) {\
++    Packet b = ei_pload(&rhs[j]); \
++    ptmp0 = ei_pmadd(b, ei_pload##A0 (&lhs0[j]), ptmp0); \
++    ptmp1 = ei_pmadd(b, ei_pload##A13(&lhs1[j]), ptmp1); \
++    ptmp2 = ei_pmadd(b, ei_pload##A2 (&lhs2[j]), ptmp2); \
++    ptmp3 = ei_pmadd(b, ei_pload##A13(&lhs3[j]), ptmp3); }
++
++  typedef typename ei_packet_traits<Scalar>::type Packet;
++  const int PacketSize = sizeof(Packet)/sizeof(Scalar);
++
++  enum { AllAligned=0, EvenAligned=1, FirstAligned=2, NoneAligned=3 };
++  const int rowsAtOnce = 4;
++  const int peels = 2;
++  const int PacketAlignedMask = PacketSize-1;
++  const int PeelAlignedMask = PacketSize*peels-1;
++  const int size = rhsSize;
++
++  // How many coeffs of the result do we have to skip to be aligned.
++  // Here we assume data are at least aligned on the base scalar type that is mandatory anyway.
++  const int alignedStart = ei_alignmentOffset(rhs, size);
++  const int alignedSize = PacketSize>1 ? alignedStart + ((size-alignedStart) & ~PacketAlignedMask) : 0;
++  const int peeledSize  = peels>1 ? alignedStart + ((alignedSize-alignedStart) & ~PeelAlignedMask) : alignedStart;
++
++  const int alignmentStep = PacketSize>1 ? (PacketSize - lhsStride % PacketSize) & PacketAlignedMask : 0;
++  int alignmentPattern = alignmentStep==0 ? AllAligned
++                       : alignmentStep==(PacketSize/2) ? EvenAligned
++                       : FirstAligned;
++
++  // we cannot assume the first element is aligned because of sub-matrices
++  const int lhsAlignmentOffset = ei_alignmentOffset(lhs,size);
++  
++  // find how many rows do we have to skip to be aligned with rhs (if possible)
++  int skipRows = 0;
++  if (PacketSize>1)
++  {
++    ei_internal_assert(size_t(lhs+lhsAlignmentOffset)%sizeof(Packet)==0  || size<PacketSize);
++    
++    while (skipRows<PacketSize &&
++           alignedStart != ((lhsAlignmentOffset + alignmentStep*skipRows)%PacketSize))
++      ++skipRows;
++    if (skipRows==PacketSize)
++    {
++      // nothing can be aligned, no need to skip any column
++      alignmentPattern = NoneAligned;
++      skipRows = 0;
++    }
++    else
++    {
++      skipRows = std::min(skipRows,res.size());
++      // note that the skiped columns are processed later.
++    }
++    ei_internal_assert((alignmentPattern==NoneAligned) || PacketSize==1
++      || (size_t(lhs+alignedStart+lhsStride*skipRows)%sizeof(Packet))==0);
++  }
++
++  int offset1 = (FirstAligned && alignmentStep==1?3:1);
++  int offset3 = (FirstAligned && alignmentStep==1?1:3);
++  
++  int rowBound = ((res.size()-skipRows)/rowsAtOnce)*rowsAtOnce + skipRows;
++  for (int i=skipRows; i<rowBound; i+=rowsAtOnce)
++  {
++    Scalar tmp0 = Scalar(0), tmp1 = Scalar(0), tmp2 = Scalar(0), tmp3 = Scalar(0);
++
++    // this helps the compiler generating good binary code
++    const Scalar *lhs0 = lhs + i*lhsStride,     *lhs1 = lhs + (i+offset1)*lhsStride,
++                 *lhs2 = lhs + (i+2)*lhsStride, *lhs3 = lhs + (i+offset3)*lhsStride;
++
++    if (PacketSize>1)
++    {
++      /* explicit vectorization */
++      Packet ptmp0 = ei_pset1(Scalar(0)), ptmp1 = ei_pset1(Scalar(0)), ptmp2 = ei_pset1(Scalar(0)), ptmp3 = ei_pset1(Scalar(0));
++      
++      // process initial unaligned coeffs
++      // FIXME this loop get vectorized by the compiler !
++      for (int j=0; j<alignedStart; j++)
++      {
++        Scalar b = rhs[j];
++        tmp0 += b*lhs0[j]; tmp1 += b*lhs1[j]; tmp2 += b*lhs2[j]; tmp3 += b*lhs3[j];
++      }
++
++      if (alignedSize>alignedStart)
++      {
++        switch(alignmentPattern)
++        {
++          case AllAligned:
++            for (int j = alignedStart; j<alignedSize; j+=PacketSize)
++              _EIGEN_ACCUMULATE_PACKETS(,,,);
++            break;
++          case EvenAligned:
++            for (int j = alignedStart; j<alignedSize; j+=PacketSize)
++              _EIGEN_ACCUMULATE_PACKETS(,u,,);
++            break;
++          case FirstAligned:
++            if (peels>1)
++            {
++              /* Here we proccess 4 rows with with two peeled iterations to hide
++               * tghe overhead of unaligned loads. Moreover unaligned loads are handled
++               * using special shift/move operations between the two aligned packets
++               * overlaping the desired unaligned packet. This is *much* more efficient
++               * than basic unaligned loads.
++               */
++              Packet A01, A02, A03, b, A11, A12, A13;
++              A01 = ei_pload(&lhs1[alignedStart-1]);
++              A02 = ei_pload(&lhs2[alignedStart-2]);
++              A03 = ei_pload(&lhs3[alignedStart-3]);
++
++              for (int j = alignedStart; j<peeledSize; j+=peels*PacketSize)
++              {
++                b = ei_pload(&rhs[j]);
++                A11 = ei_pload(&lhs1[j-1+PacketSize]);  ei_palign<1>(A01,A11);
++                A12 = ei_pload(&lhs2[j-2+PacketSize]);  ei_palign<2>(A02,A12);
++                A13 = ei_pload(&lhs3[j-3+PacketSize]);  ei_palign<3>(A03,A13);
++
++                ptmp0 = ei_pmadd(b, ei_pload (&lhs0[j]), ptmp0);
++                ptmp1 = ei_pmadd(b, A01, ptmp1);
++                A01 = ei_pload(&lhs1[j-1+2*PacketSize]);  ei_palign<1>(A11,A01);
++                ptmp2 = ei_pmadd(b, A02, ptmp2);
++                A02 = ei_pload(&lhs2[j-2+2*PacketSize]);  ei_palign<2>(A12,A02);
++                ptmp3 = ei_pmadd(b, A03, ptmp3);
++                A03 = ei_pload(&lhs3[j-3+2*PacketSize]);  ei_palign<3>(A13,A03);
++
++                b = ei_pload(&rhs[j+PacketSize]);
++                ptmp0 = ei_pmadd(b, ei_pload (&lhs0[j+PacketSize]), ptmp0);
++                ptmp1 = ei_pmadd(b, A11, ptmp1);
++                ptmp2 = ei_pmadd(b, A12, ptmp2);
++                ptmp3 = ei_pmadd(b, A13, ptmp3);
++              }
++            }
++            for (int j = peeledSize; j<alignedSize; j+=PacketSize)
++              _EIGEN_ACCUMULATE_PACKETS(,u,u,);
++            break;
++          default:
++            for (int j = alignedStart; j<alignedSize; j+=PacketSize)
++              _EIGEN_ACCUMULATE_PACKETS(u,u,u,);
++            break;
++        }
++        tmp0 += ei_predux(ptmp0);
++        tmp1 += ei_predux(ptmp1);
++        tmp2 += ei_predux(ptmp2);
++        tmp3 += ei_predux(ptmp3);
++      }
++    } // end explicit vectorization
++
++    // process remaining coeffs (or all if no explicit vectorization)
++    // FIXME this loop get vectorized by the compiler !
++    for (int j=alignedSize; j<size; j++)
++    {
++      Scalar b = rhs[j];
++      tmp0 += b*lhs0[j]; tmp1 += b*lhs1[j]; tmp2 += b*lhs2[j]; tmp3 += b*lhs3[j];
++    }
++    res[i] += tmp0; res[i+offset1] += tmp1; res[i+2] += tmp2; res[i+offset3] += tmp3;
++  }
++
++  // process remaining first and last rows (at most columnsAtOnce-1)
++  int end = res.size();
++  int start = rowBound;
++  do
++  {
++    for (int i=start; i<end; i++)
++    {
++      Scalar tmp0 = Scalar(0);
++      Packet ptmp0 = ei_pset1(tmp0);
++      const Scalar* lhs0 = lhs + i*lhsStride;
++      // process first unaligned result's coeffs
++      // FIXME this loop get vectorized by the compiler !
++      for (int j=0; j<alignedStart; j++)
++        tmp0 += rhs[j] * lhs0[j];
++
++      if (alignedSize>alignedStart)
++      {
++        // process aligned rhs coeffs
++        if ((size_t(lhs0+alignedStart)%sizeof(Packet))==0)
++          for (int j = alignedStart;j<alignedSize;j+=PacketSize)
++            ptmp0 = ei_pmadd(ei_pload(&rhs[j]), ei_pload(&lhs0[j]), ptmp0);
++        else
++          for (int j = alignedStart;j<alignedSize;j+=PacketSize)
++            ptmp0 = ei_pmadd(ei_pload(&rhs[j]), ei_ploadu(&lhs0[j]), ptmp0);
++        tmp0 += ei_predux(ptmp0);
++      }
++
++      // process remaining scalars
++      // FIXME this loop get vectorized by the compiler !
++      for (int j=alignedSize; j<size; j++)
++        tmp0 += rhs[j] * lhs0[j];
++      res[i] += tmp0;
++    }
++    if (skipRows)
++    {
++      start = 0;
++      end = skipRows;
++      skipRows = 0;
++    }
++    else
++      break;
++  } while(PacketSize>1);
++
++  #undef _EIGEN_ACCUMULATE_PACKETS
++}
++
++#endif // EIGEN_CACHE_FRIENDLY_PRODUCT_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Coeffs.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Coeffs.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Coeffs.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Coeffs.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,352 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_COEFFS_H
++#define EIGEN_COEFFS_H
++
++/** Short version: don't use this function, use
++  * \link operator()(int,int) const \endlink instead.
++  *
++  * Long version: this function is similar to
++  * \link operator()(int,int) const \endlink, but without the assertion.
++  * Use this for limiting the performance cost of debugging code when doing
++  * repeated coefficient access. Only use this when it is guaranteed that the
++  * parameters \a row and \a col are in range.
++  *
++  * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
++  * function equivalent to \link operator()(int,int) const \endlink.
++  *
++  * \sa operator()(int,int) const, coeffRef(int,int), coeff(int) const
++  */
++template<typename Derived>
++inline const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
++  ::coeff(int row, int col) const
++{
++  ei_internal_assert(row >= 0 && row < rows()
++                     && col >= 0 && col < cols());
++  return derived().coeff(row, col);
++}
++
++/** \returns the coefficient at given the given row and column.
++  *
++  * \sa operator()(int,int), operator[](int) const
++  */
++template<typename Derived>
++inline const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
++  ::operator()(int row, int col) const
++{
++  ei_assert(row >= 0 && row < rows()
++      && col >= 0 && col < cols());
++  return derived().coeff(row, col);
++}
++
++/** Short version: don't use this function, use
++  * \link operator()(int,int) \endlink instead.
++  *
++  * Long version: this function is similar to
++  * \link operator()(int,int) \endlink, but without the assertion.
++  * Use this for limiting the performance cost of debugging code when doing
++  * repeated coefficient access. Only use this when it is guaranteed that the
++  * parameters \a row and \a col are in range.
++  *
++  * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
++  * function equivalent to \link operator()(int,int) \endlink.
++  *
++  * \sa operator()(int,int), coeff(int, int) const, coeffRef(int)
++  */
++template<typename Derived>
++inline typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
++  ::coeffRef(int row, int col)
++{
++  ei_internal_assert(row >= 0 && row < rows()
++                     && col >= 0 && col < cols());
++  return derived().coeffRef(row, col);
++}
++
++/** \returns a reference to the coefficient at given the given row and column.
++  *
++  * \sa operator()(int,int) const, operator[](int)
++  */
++template<typename Derived>
++inline typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
++  ::operator()(int row, int col)
++{
++  ei_assert(row >= 0 && row < rows()
++      && col >= 0 && col < cols());
++  return derived().coeffRef(row, col);
++}
++
++/** Short version: don't use this function, use
++  * \link operator[](int) const \endlink instead.
++  *
++  * Long version: this function is similar to
++  * \link operator[](int) const \endlink, but without the assertion.
++  * Use this for limiting the performance cost of debugging code when doing
++  * repeated coefficient access. Only use this when it is guaranteed that the
++  * parameter \a index is in range.
++  *
++  * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
++  * function equivalent to \link operator[](int) const \endlink.
++  *
++  * \sa operator[](int) const, coeffRef(int), coeff(int,int) const
++  */
++template<typename Derived>
++inline const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
++  ::coeff(int index) const
++{
++  ei_internal_assert(index >= 0 && index < size());
++  return derived().coeff(index);
++}
++
++/** \returns the coefficient at given index.
++  *
++  * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
++  *
++  * \sa operator[](int), operator()(int,int) const, x() const, y() const,
++  * z() const, w() const
++  */
++template<typename Derived>
++inline const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
++  ::operator[](int index) const
++{
++  ei_assert(index >= 0 && index < size());
++  return derived().coeff(index);
++}
++
++/** \returns the coefficient at given index.
++  *
++  * This is synonymous to operator[](int) const.
++  *
++  * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
++  *
++  * \sa operator[](int), operator()(int,int) const, x() const, y() const,
++  * z() const, w() const
++  */
++template<typename Derived>
++inline const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
++  ::operator()(int index) const
++{
++  ei_assert(index >= 0 && index < size());
++  return derived().coeff(index);
++}
++
++/** Short version: don't use this function, use
++  * \link operator[](int) \endlink instead.
++  *
++  * Long version: this function is similar to
++  * \link operator[](int) \endlink, but without the assertion.
++  * Use this for limiting the performance cost of debugging code when doing
++  * repeated coefficient access. Only use this when it is guaranteed that the
++  * parameters \a row and \a col are in range.
++  *
++  * If EIGEN_INTERNAL_DEBUGGING is defined, an assertion will be made, making this
++  * function equivalent to \link operator[](int) \endlink.
++  *
++  * \sa operator[](int), coeff(int) const, coeffRef(int,int)
++  */
++template<typename Derived>
++inline typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
++  ::coeffRef(int index)
++{
++  ei_internal_assert(index >= 0 && index < size());
++  return derived().coeffRef(index);
++}
++
++/** \returns a reference to the coefficient at given index.
++  *
++  * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
++  *
++  * \sa operator[](int) const, operator()(int,int), x(), y(), z(), w()
++  */
++template<typename Derived>
++inline typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
++  ::operator[](int index)
++{
++  ei_assert(index >= 0 && index < size());
++  return derived().coeffRef(index);
++}
++
++/** \returns a reference to the coefficient at given index.
++  *
++  * This is synonymous to operator[](int).
++  *
++  * This method is allowed only for vector expressions, and for matrix expressions having the LinearAccessBit.
++  *
++  * \sa operator[](int) const, operator()(int,int), x(), y(), z(), w()
++  */
++template<typename Derived>
++inline typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
++  ::operator()(int index)
++{
++  ei_assert(index >= 0 && index < size());
++  return derived().coeffRef(index);
++}
++
++/** equivalent to operator[](0).  */
++template<typename Derived>
++inline const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
++  ::x() const { return (*this)[0]; }
++
++/** equivalent to operator[](1).  */
++template<typename Derived>
++inline const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
++  ::y() const { return (*this)[1]; }
++
++/** equivalent to operator[](2).  */
++template<typename Derived>
++inline const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
++  ::z() const { return (*this)[2]; }
++
++/** equivalent to operator[](3).  */
++template<typename Derived>
++inline const typename ei_traits<Derived>::Scalar MatrixBase<Derived>
++  ::w() const { return (*this)[3]; }
++
++/** equivalent to operator[](0).  */
++template<typename Derived>
++inline typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
++  ::x() { return (*this)[0]; }
++
++/** equivalent to operator[](1).  */
++template<typename Derived>
++inline typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
++  ::y() { return (*this)[1]; }
++
++/** equivalent to operator[](2).  */
++template<typename Derived>
++inline typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
++  ::z() { return (*this)[2]; }
++
++/** equivalent to operator[](3).  */
++template<typename Derived>
++inline typename ei_traits<Derived>::Scalar& MatrixBase<Derived>
++  ::w() { return (*this)[3]; }
++
++/** \returns the packet of coefficients starting at the given row and column. It is your responsibility
++  * to ensure that a packet really starts there. This method is only available on expressions having the
++  * PacketAccessBit.
++  *
++  * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select
++  * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
++  * starting at an address which is a multiple of the packet size.
++  */
++template<typename Derived>
++template<int LoadMode>
++inline typename ei_packet_traits<typename ei_traits<Derived>::Scalar>::type
++MatrixBase<Derived>::packet(int row, int col) const
++{
++  ei_internal_assert(row >= 0 && row < rows()
++                     && col >= 0 && col < cols());
++  return derived().template packet<LoadMode>(row,col);
++}
++
++/** Stores the given packet of coefficients, at the given row and column of this expression. It is your responsibility
++  * to ensure that a packet really starts there. This method is only available on expressions having the
++  * PacketAccessBit.
++  *
++  * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select
++  * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
++  * starting at an address which is a multiple of the packet size.
++  */
++template<typename Derived>
++template<int StoreMode>
++inline void MatrixBase<Derived>::writePacket
++(int row, int col, const typename ei_packet_traits<typename ei_traits<Derived>::Scalar>::type& x)
++{
++  ei_internal_assert(row >= 0 && row < rows()
++                     && col >= 0 && col < cols());
++  derived().template writePacket<StoreMode>(row,col,x);
++}
++
++/** \returns the packet of coefficients starting at the given index. It is your responsibility
++  * to ensure that a packet really starts there. This method is only available on expressions having the
++  * PacketAccessBit and the LinearAccessBit.
++  *
++  * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select
++  * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
++  * starting at an address which is a multiple of the packet size.
++  */
++template<typename Derived>
++template<int LoadMode>
++inline typename ei_packet_traits<typename ei_traits<Derived>::Scalar>::type
++MatrixBase<Derived>::packet(int index) const
++{
++  ei_internal_assert(index >= 0 && index < size());
++  return derived().template packet<LoadMode>(index);
++}
++
++/** Stores the given packet of coefficients, at the given index in this expression. It is your responsibility
++  * to ensure that a packet really starts there. This method is only available on expressions having the
++  * PacketAccessBit and the LinearAccessBit.
++  *
++  * The \a LoadMode parameter may have the value \a Aligned or \a Unaligned. Its effect is to select
++  * the appropriate vectorization instruction. Aligned access is faster, but is only possible for packets
++  * starting at an address which is a multiple of the packet size.
++  */
++template<typename Derived>
++template<int StoreMode>
++inline void MatrixBase<Derived>::writePacket
++(int index, const typename ei_packet_traits<typename ei_traits<Derived>::Scalar>::type& x)
++{
++  ei_internal_assert(index >= 0 && index < size());
++  derived().template writePacket<StoreMode>(index,x);
++}
++
++template<typename Derived>
++template<typename OtherDerived>
++inline void MatrixBase<Derived>::copyCoeff(int row, int col, const MatrixBase<OtherDerived>& other)
++{
++  ei_internal_assert(row >= 0 && row < rows()
++                     && col >= 0 && col < cols());
++  derived().coeffRef(row, col) = other.derived().coeff(row, col);
++}
++
++template<typename Derived>
++template<typename OtherDerived>
++inline void MatrixBase<Derived>::copyCoeff(int index, const MatrixBase<OtherDerived>& other)
++{
++  ei_internal_assert(index >= 0 && index < size());
++  derived().coeffRef(index) = other.derived().coeff(index);
++}
++
++template<typename Derived>
++template<typename OtherDerived, int StoreMode, int LoadMode>
++inline void MatrixBase<Derived>::copyPacket(int row, int col, const MatrixBase<OtherDerived>& other)
++{
++  ei_internal_assert(row >= 0 && row < rows()
++                     && col >= 0 && col < cols());
++  derived().template writePacket<StoreMode>(row, col,
++    other.derived().template packet<LoadMode>(row, col));
++}
++
++template<typename Derived>
++template<typename OtherDerived, int StoreMode, int LoadMode>
++inline void MatrixBase<Derived>::copyPacket(int index, const MatrixBase<OtherDerived>& other)
++{
++  ei_internal_assert(index >= 0 && index < size());
++  derived().template writePacket<StoreMode>(index,
++    other.derived().template packet<LoadMode>(index));
++}
++
++#endif // EIGEN_COEFFS_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/CommaInitializer.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/CommaInitializer.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/CommaInitializer.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/CommaInitializer.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,136 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_COMMAINITIALIZER_H
++#define EIGEN_COMMAINITIALIZER_H
++
++/** \internal
++  * Helper class to define the MatrixBase::operator<<
++  */
++template<typename Derived>
++struct MatrixBase<Derived>::CommaInitializer
++{
++  inline CommaInitializer(Derived& mat, const Scalar& s)
++    : m_matrix(mat), m_row(0), m_col(1), m_currentBlockRows(1)
++  {
++    m_matrix.coeffRef(0,0) = s;
++  }
++
++  template<typename OtherDerived>
++  inline CommaInitializer(Derived& mat, const MatrixBase<OtherDerived>& other)
++    : m_matrix(mat), m_row(0), m_col(other.cols()), m_currentBlockRows(other.rows())
++  {
++    m_matrix.block(0, 0, other.rows(), other.cols()) = other;
++  }
++
++  CommaInitializer& operator,(const Scalar& s)
++  {
++    if (m_col==m_matrix.cols())
++    {
++      m_row+=m_currentBlockRows;
++      m_col = 0;
++      m_currentBlockRows = 1;
++      ei_assert(m_row<m_matrix.rows()
++        && "Too many rows passed to comma initializer (operator<<)");
++    }
++    ei_assert(m_col<m_matrix.cols()
++      && "Too many coefficients passed to comma initializer (operator<<)");
++    ei_assert(m_currentBlockRows==1);
++    m_matrix.coeffRef(m_row, m_col++) = s;
++    return *this;
++  }
++
++  template<typename OtherDerived>
++  CommaInitializer& operator,(const MatrixBase<OtherDerived>& other)
++  {
++    if (m_col==m_matrix.cols())
++    {
++      m_row+=m_currentBlockRows;
++      m_col = 0;
++      m_currentBlockRows = other.rows();
++      ei_assert(m_row+m_currentBlockRows<=m_matrix.rows()
++        && "Too many rows passed to comma initializer (operator<<)");
++    }
++    ei_assert(m_col<m_matrix.cols()
++      && "Too many coefficients passed to comma initializer (operator<<)");
++    ei_assert(m_currentBlockRows==other.rows());
++    if (OtherDerived::SizeAtCompileTime != Dynamic)
++      m_matrix.block<OtherDerived::RowsAtCompileTime != Dynamic ? OtherDerived::RowsAtCompileTime : 1,
++                     OtherDerived::ColsAtCompileTime != Dynamic ? OtherDerived::ColsAtCompileTime : 1>
++                    (m_row, m_col) = other;
++    else
++      m_matrix.block(m_row, m_col, other.rows(), other.cols()) = other;
++    m_col += other.cols();
++    return *this;
++  }
++
++  inline ~CommaInitializer()
++  {
++    ei_assert((m_row+m_currentBlockRows) == m_matrix.rows()
++         && m_col == m_matrix.cols()
++         && "Too few coefficients passed to comma initializer (operator<<)");
++  }
++
++  /** \returns the built matrix once all its coefficients have been set.
++    * Calling finished is 100% optional. Its purpose is to write expressions
++    * like this:
++    * \code
++    * quaternion.fromRotationMatrix((Matrix3f() << axis0, axis1, axis2).finished());
++    * \endcode
++    */
++  inline Derived& finished() { return m_matrix; }
++
++  Derived& m_matrix;
++  int m_row; // current row id
++  int m_col; // current col id
++  int m_currentBlockRows; // current block height
++};
++
++/** Convenient operator to set the coefficients of a matrix.
++  *
++  * The coefficients must be provided in a row major order and exactly match
++  * the size of the matrix. Otherwise an assertion is raised.
++  *
++  * \addexample CommaInit \label How to easily set all the coefficients of a matrix
++  *
++  * Example: \include MatrixBase_set.cpp
++  * Output: \verbinclude MatrixBase_set.out
++  */
++template<typename Derived>
++inline typename MatrixBase<Derived>::CommaInitializer MatrixBase<Derived>::operator<< (const Scalar& s)
++{
++  return CommaInitializer(*static_cast<Derived*>(this), s);
++}
++
++/** \sa operator<<(const Scalar&) */
++template<typename Derived>
++template<typename OtherDerived>
++inline typename MatrixBase<Derived>::CommaInitializer
++MatrixBase<Derived>::operator<<(const MatrixBase<OtherDerived>& other)
++{
++  return CommaInitializer(*static_cast<Derived *>(this), other);
++}
++
++#endif // EIGEN_COMMAINITIALIZER_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/CwiseBinaryOp.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/CwiseBinaryOp.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/CwiseBinaryOp.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/CwiseBinaryOp.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,263 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_CWISE_BINARY_OP_H
++#define EIGEN_CWISE_BINARY_OP_H
++
++/** \class CwiseBinaryOp
++  *
++  * \brief Generic expression of a coefficient-wise operator between two matrices or vectors
++  *
++  * \param BinaryOp template functor implementing the operator
++  * \param Lhs the type of the left-hand side
++  * \param Rhs the type of the right-hand side
++  *
++  * This class represents an expression of a generic binary operator of two matrices or vectors.
++  * It is the return type of the operator+, operator-, and the Cwise methods, and most
++  * of the time this is the only way it is used.
++  *
++  * However, if you want to write a function returning such an expression, you
++  * will need to use this class.
++  *
++  * \sa MatrixBase::binaryExpr(const MatrixBase<OtherDerived> &,const CustomBinaryOp &) const, class CwiseUnaryOp, class CwiseNullaryOp
++  */
++template<typename BinaryOp, typename Lhs, typename Rhs>
++struct ei_traits<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
++{
++  typedef typename ei_result_of<
++                     BinaryOp(
++                       typename Lhs::Scalar,
++                       typename Rhs::Scalar
++                     )
++                   >::type Scalar;
++  typedef typename Lhs::Nested LhsNested;
++  typedef typename Rhs::Nested RhsNested;
++  typedef typename ei_unref<LhsNested>::type _LhsNested;
++  typedef typename ei_unref<RhsNested>::type _RhsNested;
++  enum {
++    LhsCoeffReadCost = _LhsNested::CoeffReadCost,
++    RhsCoeffReadCost = _RhsNested::CoeffReadCost,
++    LhsFlags = _LhsNested::Flags,
++    RhsFlags = _RhsNested::Flags,
++    RowsAtCompileTime = Lhs::RowsAtCompileTime,
++    ColsAtCompileTime = Lhs::ColsAtCompileTime,
++    MaxRowsAtCompileTime = Lhs::MaxRowsAtCompileTime,
++    MaxColsAtCompileTime = Lhs::MaxColsAtCompileTime,
++    Flags = (int(LhsFlags) | int(RhsFlags)) & (
++        HereditaryBits
++      | (int(LhsFlags) & int(RhsFlags) & (LinearAccessBit | AlignedBit))
++      | (ei_functor_traits<BinaryOp>::PacketAccess && ((int(LhsFlags) & RowMajorBit)==(int(RhsFlags) & RowMajorBit))
++        ? (int(LhsFlags) & int(RhsFlags) & PacketAccessBit) : 0)),
++    CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + ei_functor_traits<BinaryOp>::Cost
++  };
++};
++
++template<typename BinaryOp, typename Lhs, typename Rhs>
++class CwiseBinaryOp : ei_no_assignment_operator,
++  public MatrixBase<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
++{
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseBinaryOp)
++    typedef typename ei_traits<CwiseBinaryOp>::LhsNested LhsNested;
++    typedef typename ei_traits<CwiseBinaryOp>::RhsNested RhsNested;
++
++    class InnerIterator;
++
++    inline CwiseBinaryOp(const Lhs& lhs, const Rhs& rhs, const BinaryOp& func = BinaryOp())
++      : m_lhs(lhs), m_rhs(rhs), m_functor(func)
++    {
++      ei_assert(lhs.rows() == rhs.rows() && lhs.cols() == rhs.cols());
++    }
++
++    inline int rows() const { return m_lhs.rows(); }
++    inline int cols() const { return m_lhs.cols(); }
++
++    inline const Scalar coeff(int row, int col) const
++    {
++      return m_functor(m_lhs.coeff(row, col), m_rhs.coeff(row, col));
++    }
++
++    template<int LoadMode>
++    inline PacketScalar packet(int row, int col) const
++    {
++      return m_functor.packetOp(m_lhs.template packet<LoadMode>(row, col), m_rhs.template packet<LoadMode>(row, col));
++    }
++
++    inline const Scalar coeff(int index) const
++    {
++      return m_functor(m_lhs.coeff(index), m_rhs.coeff(index));
++    }
++
++    template<int LoadMode>
++    inline PacketScalar packet(int index) const
++    {
++      return m_functor.packetOp(m_lhs.template packet<LoadMode>(index), m_rhs.template packet<LoadMode>(index));
++    }
++
++  protected:
++    const LhsNested m_lhs;
++    const RhsNested m_rhs;
++    const BinaryOp m_functor;
++};
++
++/**\returns an expression of the difference of \c *this and \a other
++  *
++  * \note If you want to substract a given scalar from all coefficients, see Cwise::operator-().
++  *
++  * \sa class CwiseBinaryOp, MatrixBase::operator-=(), Cwise::operator-()
++  */
++template<typename Derived>
++template<typename OtherDerived>
++inline const CwiseBinaryOp<ei_scalar_difference_op<typename ei_traits<Derived>::Scalar>,
++                                 Derived, OtherDerived>
++MatrixBase<Derived>::operator-(const MatrixBase<OtherDerived> &other) const
++{
++  return CwiseBinaryOp<ei_scalar_difference_op<Scalar>,
++                       Derived, OtherDerived>(derived(), other.derived());
++}
++
++/** replaces \c *this by \c *this - \a other.
++  *
++  * \returns a reference to \c *this
++  */
++template<typename Derived>
++template<typename OtherDerived>
++inline Derived &
++MatrixBase<Derived>::operator-=(const MatrixBase<OtherDerived> &other)
++{
++  return *this = *this - other;
++}
++
++/** \relates MatrixBase
++  *
++  * \returns an expression of the sum of \c *this and \a other
++  *
++  * \note If you want to add a given scalar to all coefficients, see Cwise::operator+().
++  *
++  * \sa class CwiseBinaryOp, MatrixBase::operator+=(), Cwise::operator+()
++  */
++template<typename Derived>
++template<typename OtherDerived>
++inline const CwiseBinaryOp<ei_scalar_sum_op<typename ei_traits<Derived>::Scalar>, Derived, OtherDerived>
++MatrixBase<Derived>::operator+(const MatrixBase<OtherDerived> &other) const
++{
++  return CwiseBinaryOp<ei_scalar_sum_op<Scalar>, Derived, OtherDerived>(derived(), other.derived());
++}
++
++/** replaces \c *this by \c *this + \a other.
++  *
++  * \returns a reference to \c *this
++  */
++template<typename Derived>
++template<typename OtherDerived>
++inline Derived &
++MatrixBase<Derived>::operator+=(const MatrixBase<OtherDerived>& other)
++{
++  return *this = *this + other;
++}
++
++/** \returns an expression of the Schur product (coefficient wise product) of *this and \a other
++  *
++  * Example: \include Cwise_product.cpp
++  * Output: \verbinclude Cwise_product.out
++  * 
++  * \sa class CwiseBinaryOp, operator/(), square()
++  */
++template<typename ExpressionType>
++template<typename OtherDerived>
++inline const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_product_op)
++Cwise<ExpressionType>::operator*(const MatrixBase<OtherDerived> &other) const
++{
++  return EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_product_op)(_expression(), other.derived());
++}
++
++/** \returns an expression of the coefficient-wise quotient of *this and \a other
++  *
++  * Example: \include Cwise_quotient.cpp
++  * Output: \verbinclude Cwise_quotient.out
++  *
++  * \sa class CwiseBinaryOp, operator*(), inverse()
++  */
++template<typename ExpressionType>
++template<typename OtherDerived>
++inline const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)
++Cwise<ExpressionType>::operator/(const MatrixBase<OtherDerived> &other) const
++{
++  return EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)(_expression(), other.derived());
++}
++
++/** \returns an expression of the coefficient-wise min of *this and \a other
++  *
++  * Example: \include Cwise_min.cpp
++  * Output: \verbinclude Cwise_min.out
++  *
++  * \sa class CwiseBinaryOp
++  */
++template<typename ExpressionType>
++template<typename OtherDerived>
++inline const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op)
++Cwise<ExpressionType>::min(const MatrixBase<OtherDerived> &other) const
++{
++  return EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op)(_expression(), other.derived());
++}
++
++/** \returns an expression of the coefficient-wise max of *this and \a other
++  *
++  * Example: \include Cwise_max.cpp
++  * Output: \verbinclude Cwise_max.out
++  *
++  * \sa class CwiseBinaryOp
++  */
++template<typename ExpressionType>
++template<typename OtherDerived>
++inline const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op)
++Cwise<ExpressionType>::max(const MatrixBase<OtherDerived> &other) const
++{
++  return EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op)(_expression(), other.derived());
++}
++
++/** \returns an expression of a custom coefficient-wise operator \a func of *this and \a other
++  *
++  * The template parameter \a CustomBinaryOp is the type of the functor
++  * of the custom operator (see class CwiseBinaryOp for an example)
++  *
++  * \addexample CustomCwiseBinaryFunctors \label How to use custom coeff wise binary functors
++  *
++  * Here is an example illustrating the use of custom functors:
++  * \include class_CwiseBinaryOp.cpp
++  * Output: \verbinclude class_CwiseBinaryOp.out
++  *
++  * \sa class CwiseBinaryOp, MatrixBase::operator+, MatrixBase::operator-, Cwise::operator*, Cwise::operator/
++  */
++template<typename Derived>
++template<typename CustomBinaryOp, typename OtherDerived>
++inline const CwiseBinaryOp<CustomBinaryOp, Derived, OtherDerived>
++MatrixBase<Derived>::binaryExpr(const MatrixBase<OtherDerived> &other, const CustomBinaryOp& func) const
++{
++  return CwiseBinaryOp<CustomBinaryOp, Derived, OtherDerived>(derived(), other.derived(), func);
++}
++
++#endif // EIGEN_CWISE_BINARY_OP_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Cwise.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Cwise.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Cwise.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Cwise.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,165 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_CWISE_H
++#define EIGEN_CWISE_H
++
++/** \internal
++  * convenient macro to defined the return type of a cwise binary operation */
++#define EIGEN_CWISE_BINOP_RETURN_TYPE(OP) \
++    CwiseBinaryOp<OP<typename ei_traits<ExpressionType>::Scalar>, ExpressionType, OtherDerived>
++
++/** \internal
++  * convenient macro to defined the return type of a cwise unary operation */
++#define EIGEN_CWISE_UNOP_RETURN_TYPE(OP) \
++    CwiseUnaryOp<OP<typename ei_traits<ExpressionType>::Scalar>, ExpressionType>
++
++/** \class Cwise
++  *
++  * \brief Pseudo expression providing additional coefficient-wise operations
++  *
++  * \param ExpressionType the type of the object on which to do coefficient-wise operations
++  *
++  * This class represents an expression with additional coefficient-wise features.
++  * It is the return type of MatrixBase::cwise()
++  * and most of the time this is the only way it is used.
++  *
++  * Note that some methods are defined in the \ref Array module.
++  *
++  * Example: \include MatrixBase_cwise_const.cpp
++  * Output: \verbinclude MatrixBase_cwise_const.out
++  *
++  * \sa MatrixBase::cwise() const, MatrixBase::cwise()
++  */
++template<typename ExpressionType> class Cwise
++{
++  public:
++
++    typedef typename ei_traits<ExpressionType>::Scalar Scalar;
++    typedef typename ei_meta_if<ei_must_nest_by_value<ExpressionType>::ret,
++        ExpressionType, const ExpressionType&>::ret ExpressionTypeNested;
++    typedef CwiseUnaryOp<ei_scalar_add_op<Scalar>, ExpressionType> ScalarAddReturnType;
++
++    inline Cwise(const ExpressionType& matrix) : m_matrix(matrix) {}
++
++    /** \internal */
++    inline const ExpressionType& _expression() const { return m_matrix; }
++
++    template<typename OtherDerived>
++	const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_product_op)
++    operator*(const MatrixBase<OtherDerived> &other) const;
++
++    template<typename OtherDerived>
++    const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_quotient_op)
++    operator/(const MatrixBase<OtherDerived> &other) const;
++
++    template<typename OtherDerived>
++    const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_min_op)
++    min(const MatrixBase<OtherDerived> &other) const;
++
++    template<typename OtherDerived>
++    const EIGEN_CWISE_BINOP_RETURN_TYPE(ei_scalar_max_op)
++    max(const MatrixBase<OtherDerived> &other) const;
++
++    const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs_op)      abs() const;
++    const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs2_op)     abs2() const;
++    const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_square_op)   square() const;
++    const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cube_op)     cube() const;
++    const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_inverse_op)  inverse() const;
++    const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sqrt_op)     sqrt() const;
++    const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_exp_op)      exp() const;
++    const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_log_op)      log() const;
++    const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_cos_op)      cos() const;
++    const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_sin_op)      sin() const;
++    const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_pow_op)      pow(const Scalar& exponent) const;
++
++
++    const ScalarAddReturnType
++    operator+(const Scalar& scalar) const;
++
++    /** \relates Cwise */
++    friend const ScalarAddReturnType
++    operator+(const Scalar& scalar, const Cwise& mat)
++    { return mat + scalar; }
++
++    ExpressionType& operator+=(const Scalar& scalar);
++
++    const ScalarAddReturnType
++    operator-(const Scalar& scalar) const;
++
++    ExpressionType& operator-=(const Scalar& scalar);
++
++    template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less)
++    operator<(const MatrixBase<OtherDerived>& other) const;
++
++    template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::less_equal)
++    operator<=(const MatrixBase<OtherDerived>& other) const;
++
++    template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater)
++    operator>(const MatrixBase<OtherDerived>& other) const;
++
++    template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::greater_equal)
++    operator>=(const MatrixBase<OtherDerived>& other) const;
++
++    template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::equal_to)
++    operator==(const MatrixBase<OtherDerived>& other) const;
++
++    template<typename OtherDerived> const EIGEN_CWISE_BINOP_RETURN_TYPE(std::not_equal_to)
++    operator!=(const MatrixBase<OtherDerived>& other) const;
++
++
++  protected:
++    ExpressionTypeNested m_matrix;
++};
++
++/** \returns a Cwise wrapper of *this providing additional coefficient-wise operations
++  *
++  * Example: \include MatrixBase_cwise_const.cpp
++  * Output: \verbinclude MatrixBase_cwise_const.out
++  *
++  * \sa class Cwise, cwise()
++  */
++template<typename Derived>
++inline const Cwise<Derived>
++MatrixBase<Derived>::cwise() const
++{
++  return derived();
++}
++
++/** \returns a Cwise wrapper of *this providing additional coefficient-wise operations
++  *
++  * Example: \include MatrixBase_cwise.cpp
++  * Output: \verbinclude MatrixBase_cwise.out
++  *
++  * \sa class Cwise, cwise() const
++  */
++template<typename Derived>
++inline Cwise<Derived>
++MatrixBase<Derived>::cwise()
++{
++  return derived();
++}
++
++#endif // EIGEN_CWISE_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/CwiseNullaryOp.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/CwiseNullaryOp.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/CwiseNullaryOp.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/CwiseNullaryOp.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,620 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_CWISE_NULLARY_OP_H
++#define EIGEN_CWISE_NULLARY_OP_H
++
++/** \class CwiseNullaryOp
++  *
++  * \brief Generic expression of a matrix where all coefficients are defined by a functor
++  *
++  * \param NullaryOp template functor implementing the operator
++  *
++  * This class represents an expression of a generic nullary operator.
++  * It is the return type of the Ones(), Zero(), Constant(), Identity() and Random() functions,
++  * and most of the time this is the only way it is used.
++  *
++  * However, if you want to write a function returning such an expression, you
++  * will need to use this class.
++  *
++  * \sa class CwiseUnaryOp, class CwiseBinaryOp, MatrixBase::NullaryExpr()
++  */
++template<typename NullaryOp, typename MatrixType>
++struct ei_traits<CwiseNullaryOp<NullaryOp, MatrixType> >
++{
++  typedef typename ei_traits<MatrixType>::Scalar Scalar;
++  enum {
++    RowsAtCompileTime = ei_traits<MatrixType>::RowsAtCompileTime,
++    ColsAtCompileTime = ei_traits<MatrixType>::ColsAtCompileTime,
++    MaxRowsAtCompileTime = ei_traits<MatrixType>::MaxRowsAtCompileTime,
++    MaxColsAtCompileTime = ei_traits<MatrixType>::MaxColsAtCompileTime,
++    Flags = (ei_traits<MatrixType>::Flags
++      & (  HereditaryBits
++         | (ei_functor_has_linear_access<NullaryOp>::ret ? LinearAccessBit : 0)
++         | (ei_functor_traits<NullaryOp>::PacketAccess ? PacketAccessBit : 0)))
++      | (ei_functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit),
++    CoeffReadCost = ei_functor_traits<NullaryOp>::Cost
++  };
++};
++
++template<typename NullaryOp, typename MatrixType>
++class CwiseNullaryOp : ei_no_assignment_operator,
++  public MatrixBase<CwiseNullaryOp<NullaryOp, MatrixType> >
++{
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseNullaryOp)
++
++    CwiseNullaryOp(int rows, int cols, const NullaryOp& func = NullaryOp())
++      : m_rows(rows), m_cols(cols), m_functor(func)
++    {
++      ei_assert(rows > 0
++          && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
++          && cols > 0
++          && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
++    }
++
++    int rows() const { return m_rows.value(); }
++    int cols() const { return m_cols.value(); }
++
++    const Scalar coeff(int rows, int cols) const
++    {
++      return m_functor(rows, cols);
++    }
++
++    template<int LoadMode>
++    PacketScalar packet(int, int) const
++    {
++      return m_functor.packetOp();
++    }
++
++    const Scalar coeff(int index) const
++    {
++      return m_functor(index);
++    }
++
++    template<int LoadMode>
++    PacketScalar packet(int) const
++    {
++      return m_functor.packetOp();
++    }
++
++  protected:
++    const ei_int_if_dynamic<RowsAtCompileTime> m_rows;
++    const ei_int_if_dynamic<ColsAtCompileTime> m_cols;
++    const NullaryOp m_functor;
++};
++
++
++/** \returns an expression of a matrix defined by a custom functor \a func
++  *
++  * The parameters \a rows and \a cols are the number of rows and of columns of
++  * the returned matrix. Must be compatible with this MatrixBase type.
++  *
++  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
++  * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
++  * instead.
++  *
++  * The template parameter \a CustomNullaryOp is the type of the functor.
++  *
++  * \sa class CwiseNullaryOp
++  */
++template<typename Derived>
++template<typename CustomNullaryOp>
++const CwiseNullaryOp<CustomNullaryOp, Derived>
++MatrixBase<Derived>::NullaryExpr(int rows, int cols, const CustomNullaryOp& func)
++{
++  return CwiseNullaryOp<CustomNullaryOp, Derived>(rows, cols, func);
++}
++
++/** \returns an expression of a matrix defined by a custom functor \a func
++  *
++  * The parameter \a size is the size of the returned vector.
++  * Must be compatible with this MatrixBase type.
++  *
++  * \only_for_vectors
++  *
++  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
++  * it is redundant to pass \a size as argument, so Zero() should be used
++  * instead.
++  *
++  * The template parameter \a CustomNullaryOp is the type of the functor.
++  *
++  * \sa class CwiseNullaryOp
++  */
++template<typename Derived>
++template<typename CustomNullaryOp>
++const CwiseNullaryOp<CustomNullaryOp, Derived>
++MatrixBase<Derived>::NullaryExpr(int size, const CustomNullaryOp& func)
++{
++  ei_assert(IsVectorAtCompileTime);
++  if(RowsAtCompileTime == 1) return CwiseNullaryOp<CustomNullaryOp, Derived>(1, size, func);
++  else return CwiseNullaryOp<CustomNullaryOp, Derived>(size, 1, func);
++}
++
++/** \returns an expression of a matrix defined by a custom functor \a func
++  *
++  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
++  * need to use the variants taking size arguments.
++  *
++  * The template parameter \a CustomNullaryOp is the type of the functor.
++  *
++  * \sa class CwiseNullaryOp
++  */
++template<typename Derived>
++template<typename CustomNullaryOp>
++const CwiseNullaryOp<CustomNullaryOp, Derived>
++MatrixBase<Derived>::NullaryExpr(const CustomNullaryOp& func)
++{
++  return CwiseNullaryOp<CustomNullaryOp, Derived>(RowsAtCompileTime, ColsAtCompileTime, func);
++}
++
++/** \returns an expression of a constant matrix of value \a value
++  *
++  * The parameters \a rows and \a cols are the number of rows and of columns of
++  * the returned matrix. Must be compatible with this MatrixBase type.
++  *
++  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
++  * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
++  * instead.
++  *
++  * The template parameter \a CustomNullaryOp is the type of the functor.
++  *
++  * \sa class CwiseNullaryOp
++  */
++template<typename Derived>
++const typename MatrixBase<Derived>::ConstantReturnType
++MatrixBase<Derived>::Constant(int rows, int cols, const Scalar& value)
++{
++  return NullaryExpr(rows, cols, ei_scalar_constant_op<Scalar>(value));
++}
++
++/** \returns an expression of a constant matrix of value \a value
++  *
++  * The parameter \a size is the size of the returned vector.
++  * Must be compatible with this MatrixBase type.
++  *
++  * \only_for_vectors
++  *
++  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
++  * it is redundant to pass \a size as argument, so Zero() should be used
++  * instead.
++  *
++  * The template parameter \a CustomNullaryOp is the type of the functor.
++  *
++  * \sa class CwiseNullaryOp
++  */
++template<typename Derived>
++const typename MatrixBase<Derived>::ConstantReturnType
++MatrixBase<Derived>::Constant(int size, const Scalar& value)
++{
++  return NullaryExpr(size, ei_scalar_constant_op<Scalar>(value));
++}
++
++/** \returns an expression of a constant matrix of value \a value
++  *
++  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
++  * need to use the variants taking size arguments.
++  *
++  * The template parameter \a CustomNullaryOp is the type of the functor.
++  *
++  * \sa class CwiseNullaryOp
++  */
++template<typename Derived>
++const typename MatrixBase<Derived>::ConstantReturnType
++MatrixBase<Derived>::Constant(const Scalar& value)
++{
++  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
++  return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, ei_scalar_constant_op<Scalar>(value));
++}
++
++template<typename Derived>
++bool MatrixBase<Derived>::isApproxToConstant
++(const Scalar& value, typename NumTraits<Scalar>::Real prec) const
++{
++  for(int j = 0; j < cols(); j++)
++    for(int i = 0; i < rows(); i++)
++      if(!ei_isApprox(coeff(i, j), value, prec))
++        return false;
++  return true;
++}
++
++/** Sets all coefficients in this expression to \a value.
++  *
++  * \sa class CwiseNullaryOp, Zero(), Ones()
++  */
++template<typename Derived>
++Derived& MatrixBase<Derived>::setConstant(const Scalar& value)
++{
++  return derived() = Constant(rows(), cols(), value);
++}
++
++// zero:
++
++/** \returns an expression of a zero matrix.
++  *
++  * The parameters \a rows and \a cols are the number of rows and of columns of
++  * the returned matrix. Must be compatible with this MatrixBase type.
++  *
++  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
++  * it is redundant to pass \a rows and \a cols as arguments, so Zero() should be used
++  * instead.
++  *
++  * \addexample Zero \label How to take get a zero matrix
++  *
++  * Example: \include MatrixBase_zero_int_int.cpp
++  * Output: \verbinclude MatrixBase_zero_int_int.out
++  *
++  * \sa Zero(), Zero(int)
++  */
++template<typename Derived>
++const typename MatrixBase<Derived>::ConstantReturnType
++MatrixBase<Derived>::Zero(int rows, int cols)
++{
++  return Constant(rows, cols, Scalar(0));
++}
++
++/** \returns an expression of a zero vector.
++  *
++  * The parameter \a size is the size of the returned vector.
++  * Must be compatible with this MatrixBase type.
++  *
++  * \only_for_vectors
++  *
++  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
++  * it is redundant to pass \a size as argument, so Zero() should be used
++  * instead.
++  *
++  * Example: \include MatrixBase_zero_int.cpp
++  * Output: \verbinclude MatrixBase_zero_int.out
++  *
++  * \sa Zero(), Zero(int,int)
++  */
++template<typename Derived>
++const typename MatrixBase<Derived>::ConstantReturnType
++MatrixBase<Derived>::Zero(int size)
++{
++  return Constant(size, Scalar(0));
++}
++
++/** \returns an expression of a fixed-size zero matrix or vector.
++  *
++  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
++  * need to use the variants taking size arguments.
++  *
++  * Example: \include MatrixBase_zero.cpp
++  * Output: \verbinclude MatrixBase_zero.out
++  *
++  * \sa Zero(int), Zero(int,int)
++  */
++template<typename Derived>
++const typename MatrixBase<Derived>::ConstantReturnType
++MatrixBase<Derived>::Zero()
++{
++  return Constant(Scalar(0));
++}
++
++/** \returns true if *this is approximately equal to the zero matrix,
++  *          within the precision given by \a prec.
++  *
++  * Example: \include MatrixBase_isZero.cpp
++  * Output: \verbinclude MatrixBase_isZero.out
++  *
++  * \sa class CwiseNullaryOp, Zero()
++  */
++template<typename Derived>
++bool MatrixBase<Derived>::isZero
++(typename NumTraits<Scalar>::Real prec) const
++{
++  for(int j = 0; j < cols(); j++)
++    for(int i = 0; i < rows(); i++)
++      if(!ei_isMuchSmallerThan(coeff(i, j), static_cast<Scalar>(1), prec))
++        return false;
++  return true;
++}
++
++/** Sets all coefficients in this expression to zero.
++  *
++  * Example: \include MatrixBase_setZero.cpp
++  * Output: \verbinclude MatrixBase_setZero.out
++  *
++  * \sa class CwiseNullaryOp, Zero()
++  */
++template<typename Derived>
++Derived& MatrixBase<Derived>::setZero()
++{
++  return setConstant(Scalar(0));
++}
++
++// ones:
++
++/** \returns an expression of a matrix where all coefficients equal one.
++  *
++  * The parameters \a rows and \a cols are the number of rows and of columns of
++  * the returned matrix. Must be compatible with this MatrixBase type.
++  *
++  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
++  * it is redundant to pass \a rows and \a cols as arguments, so Ones() should be used
++  * instead.
++  *
++  * \addexample One \label How to get a matrix with all coefficients equal one
++  *
++  * Example: \include MatrixBase_ones_int_int.cpp
++  * Output: \verbinclude MatrixBase_ones_int_int.out
++  *
++  * \sa Ones(), Ones(int), isOnes(), class Ones
++  */
++template<typename Derived>
++const typename MatrixBase<Derived>::ConstantReturnType
++MatrixBase<Derived>::Ones(int rows, int cols)
++{
++  return Constant(rows, cols, Scalar(1));
++}
++
++/** \returns an expression of a vector where all coefficients equal one.
++  *
++  * The parameter \a size is the size of the returned vector.
++  * Must be compatible with this MatrixBase type.
++  *
++  * \only_for_vectors
++  *
++  * This variant is meant to be used for dynamic-size vector types. For fixed-size types,
++  * it is redundant to pass \a size as argument, so Ones() should be used
++  * instead.
++  *
++  * Example: \include MatrixBase_ones_int.cpp
++  * Output: \verbinclude MatrixBase_ones_int.out
++  *
++  * \sa Ones(), Ones(int,int), isOnes(), class Ones
++  */
++template<typename Derived>
++const typename MatrixBase<Derived>::ConstantReturnType
++MatrixBase<Derived>::Ones(int size)
++{
++  return Constant(size, Scalar(1));
++}
++
++/** \returns an expression of a fixed-size matrix or vector where all coefficients equal one.
++  *
++  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
++  * need to use the variants taking size arguments.
++  *
++  * Example: \include MatrixBase_ones.cpp
++  * Output: \verbinclude MatrixBase_ones.out
++  *
++  * \sa Ones(int), Ones(int,int), isOnes(), class Ones
++  */
++template<typename Derived>
++const typename MatrixBase<Derived>::ConstantReturnType
++MatrixBase<Derived>::Ones()
++{
++  return Constant(Scalar(1));
++}
++
++/** \returns true if *this is approximately equal to the matrix where all coefficients
++  *          are equal to 1, within the precision given by \a prec.
++  *
++  * Example: \include MatrixBase_isOnes.cpp
++  * Output: \verbinclude MatrixBase_isOnes.out
++  *
++  * \sa class CwiseNullaryOp, Ones()
++  */
++template<typename Derived>
++bool MatrixBase<Derived>::isOnes
++(typename NumTraits<Scalar>::Real prec) const
++{
++  return isApproxToConstant(Scalar(1), prec);
++}
++
++/** Sets all coefficients in this expression to one.
++  *
++  * Example: \include MatrixBase_setOnes.cpp
++  * Output: \verbinclude MatrixBase_setOnes.out
++  *
++  * \sa class CwiseNullaryOp, Ones()
++  */
++template<typename Derived>
++Derived& MatrixBase<Derived>::setOnes()
++{
++  return setConstant(Scalar(1));
++}
++
++// Identity:
++
++/** \returns an expression of the identity matrix (not necessarily square).
++  *
++  * The parameters \a rows and \a cols are the number of rows and of columns of
++  * the returned matrix. Must be compatible with this MatrixBase type.
++  *
++  * This variant is meant to be used for dynamic-size matrix types. For fixed-size types,
++  * it is redundant to pass \a rows and \a cols as arguments, so Identity() should be used
++  * instead.
++  *
++  * \addexample Identity \label How to get an identity matrix
++  *
++  * Example: \include MatrixBase_identity_int_int.cpp
++  * Output: \verbinclude MatrixBase_identity_int_int.out
++  *
++  * \sa Identity(), setIdentity(), isIdentity()
++  */
++template<typename Derived>
++inline const typename MatrixBase<Derived>::IdentityReturnType
++MatrixBase<Derived>::Identity(int rows, int cols)
++{
++  return NullaryExpr(rows, cols, ei_scalar_identity_op<Scalar>());
++}
++
++/** \returns an expression of the identity matrix (not necessarily square).
++  *
++  * This variant is only for fixed-size MatrixBase types. For dynamic-size types, you
++  * need to use the variant taking size arguments.
++  *
++  * Example: \include MatrixBase_identity.cpp
++  * Output: \verbinclude MatrixBase_identity.out
++  *
++  * \sa Identity(int,int), setIdentity(), isIdentity()
++  */
++template<typename Derived>
++inline const typename MatrixBase<Derived>::IdentityReturnType
++MatrixBase<Derived>::Identity()
++{
++  EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
++  return NullaryExpr(RowsAtCompileTime, ColsAtCompileTime, ei_scalar_identity_op<Scalar>());
++}
++
++/** \returns true if *this is approximately equal to the identity matrix
++  *          (not necessarily square),
++  *          within the precision given by \a prec.
++  *
++  * Example: \include MatrixBase_isIdentity.cpp
++  * Output: \verbinclude MatrixBase_isIdentity.out
++  *
++  * \sa class CwiseNullaryOp, Identity(), Identity(int,int), setIdentity()
++  */
++template<typename Derived>
++bool MatrixBase<Derived>::isIdentity
++(typename NumTraits<Scalar>::Real prec) const
++{
++  for(int j = 0; j < cols(); j++)
++  {
++    for(int i = 0; i < rows(); i++)
++    {
++      if(i == j)
++      {
++        if(!ei_isApprox(coeff(i, j), static_cast<Scalar>(1), prec))
++          return false;
++      }
++      else
++      {
++        if(!ei_isMuchSmallerThan(coeff(i, j), static_cast<RealScalar>(1), prec))
++          return false;
++      }
++    }
++  }
++  return true;
++}
++
++template<typename Derived, bool Big = (Derived::SizeAtCompileTime>=16)>
++struct ei_setIdentity_impl
++{
++  static inline Derived& run(Derived& m)
++  {
++    return m = Derived::Identity(m.rows(), m.cols());
++  }
++};
++
++template<typename Derived>
++struct ei_setIdentity_impl<Derived, true>
++{
++  static inline Derived& run(Derived& m)
++  {
++    m.setZero();
++    const int size = std::min(m.rows(), m.cols());
++    for(int i = 0; i < size; i++) m.coeffRef(i,i) = typename Derived::Scalar(1);
++    return m;
++  }
++};
++
++/** Writes the identity expression (not necessarily square) into *this.
++  *
++  * Example: \include MatrixBase_setIdentity.cpp
++  * Output: \verbinclude MatrixBase_setIdentity.out
++  *
++  * \sa class CwiseNullaryOp, Identity(), Identity(int,int), isIdentity()
++  */
++template<typename Derived>
++inline Derived& MatrixBase<Derived>::setIdentity()
++{
++  return ei_setIdentity_impl<Derived>::run(derived());
++}
++
++/** \returns an expression of the i-th unit (basis) vector.
++  *
++  * \only_for_vectors
++  *
++  * \sa MatrixBase::Unit(int), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
++  */
++template<typename Derived>
++const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(int size, int i)
++{
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
++  return BasisReturnType(SquareMatrixType::Identity(size,size), i);
++}
++
++/** \returns an expression of the i-th unit (basis) vector.
++  *
++  * \only_for_vectors
++  *
++  * This variant is for fixed-size vector only.
++  *
++  * \sa MatrixBase::Unit(int,int), MatrixBase::UnitX(), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
++  */
++template<typename Derived>
++const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::Unit(int i)
++{
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
++  return BasisReturnType(SquareMatrixType::Identity(),i);
++}
++
++/** \returns an expression of the X axis unit vector (1{,0}^*)
++  *
++  * \only_for_vectors
++  *
++  * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
++  */
++template<typename Derived>
++const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitX()
++{ return Derived::Unit(0); }
++
++/** \returns an expression of the Y axis unit vector (0,1{,0}^*)
++  *
++  * \only_for_vectors
++  *
++  * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
++  */
++template<typename Derived>
++const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitY()
++{ return Derived::Unit(1); }
++
++/** \returns an expression of the Z axis unit vector (0,0,1{,0}^*)
++  *
++  * \only_for_vectors
++  *
++  * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
++  */
++template<typename Derived>
++const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitZ()
++{ return Derived::Unit(2); }
++
++/** \returns an expression of the W axis unit vector (0,0,0,1)
++  *
++  * \only_for_vectors
++  *
++  * \sa MatrixBase::Unit(int,int), MatrixBase::Unit(int), MatrixBase::UnitY(), MatrixBase::UnitZ(), MatrixBase::UnitW()
++  */
++template<typename Derived>
++const typename MatrixBase<Derived>::BasisReturnType MatrixBase<Derived>::UnitW()
++{ return Derived::Unit(3); }
++
++#endif // EIGEN_CWISE_NULLARY_OP_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/CwiseUnaryOp.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/CwiseUnaryOp.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/CwiseUnaryOp.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/CwiseUnaryOp.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,232 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_CWISE_UNARY_OP_H
++#define EIGEN_CWISE_UNARY_OP_H
++
++/** \class CwiseUnaryOp
++  *
++  * \brief Generic expression of a coefficient-wise unary operator of a matrix or a vector
++  *
++  * \param UnaryOp template functor implementing the operator
++  * \param MatrixType the type of the matrix we are applying the unary operator
++  *
++  * This class represents an expression of a generic unary operator of a matrix or a vector.
++  * It is the return type of the unary operator-, of a matrix or a vector, and most
++  * of the time this is the only way it is used.
++  *
++  * \sa MatrixBase::unaryExpr(const CustomUnaryOp &) const, class CwiseBinaryOp, class CwiseNullaryOp
++  */
++template<typename UnaryOp, typename MatrixType>
++struct ei_traits<CwiseUnaryOp<UnaryOp, MatrixType> >
++{
++  typedef typename ei_result_of<
++                     UnaryOp(typename MatrixType::Scalar)
++                   >::type Scalar;
++  typedef typename MatrixType::Nested MatrixTypeNested;
++  typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
++  enum {
++    MatrixTypeCoeffReadCost = _MatrixTypeNested::CoeffReadCost,
++    MatrixTypeFlags = _MatrixTypeNested::Flags,
++    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
++    ColsAtCompileTime = MatrixType::ColsAtCompileTime,
++    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
++    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
++    Flags = (MatrixTypeFlags & (
++      HereditaryBits | LinearAccessBit | AlignedBit
++      | (ei_functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0))),
++    CoeffReadCost = MatrixTypeCoeffReadCost + ei_functor_traits<UnaryOp>::Cost
++  };
++};
++
++template<typename UnaryOp, typename MatrixType>
++class CwiseUnaryOp : ei_no_assignment_operator,
++  public MatrixBase<CwiseUnaryOp<UnaryOp, MatrixType> >
++{
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(CwiseUnaryOp)
++
++    class InnerIterator;
++
++    inline CwiseUnaryOp(const MatrixType& mat, const UnaryOp& func = UnaryOp())
++      : m_matrix(mat), m_functor(func) {}
++
++    inline int rows() const { return m_matrix.rows(); }
++    inline int cols() const { return m_matrix.cols(); }
++
++    inline const Scalar coeff(int row, int col) const
++    {
++      return m_functor(m_matrix.coeff(row, col));
++    }
++
++    template<int LoadMode>
++    inline PacketScalar packet(int row, int col) const
++    {
++      return m_functor.packetOp(m_matrix.template packet<LoadMode>(row, col));
++    }
++
++    inline const Scalar coeff(int index) const
++    {
++      return m_functor(m_matrix.coeff(index));
++    }
++
++    template<int LoadMode>
++    inline PacketScalar packet(int index) const
++    {
++      return m_functor.packetOp(m_matrix.template packet<LoadMode>(index));
++    }
++
++  protected:
++    const typename MatrixType::Nested m_matrix;
++    const UnaryOp m_functor;
++};
++
++/** \returns an expression of a custom coefficient-wise unary operator \a func of *this
++  *
++  * The template parameter \a CustomUnaryOp is the type of the functor
++  * of the custom unary operator.
++  *
++  * \addexample CustomCwiseUnaryFunctors \label How to use custom coeff wise unary functors
++  *
++  * Example:
++  * \include class_CwiseUnaryOp.cpp
++  * Output: \verbinclude class_CwiseUnaryOp.out
++  *
++  * \sa class CwiseUnaryOp, class CwiseBinarOp, MatrixBase::operator-, Cwise::abs
++  */
++template<typename Derived>
++template<typename CustomUnaryOp>
++inline const CwiseUnaryOp<CustomUnaryOp, Derived>
++MatrixBase<Derived>::unaryExpr(const CustomUnaryOp& func) const
++{
++  return CwiseUnaryOp<CustomUnaryOp, Derived>(derived(), func);
++}
++
++/** \returns an expression of the opposite of \c *this
++  */
++template<typename Derived>
++inline const CwiseUnaryOp<ei_scalar_opposite_op<typename ei_traits<Derived>::Scalar>,Derived>
++MatrixBase<Derived>::operator-() const
++{
++  return derived();
++}
++
++/** \returns an expression of the coefficient-wise absolute value of \c *this
++  *
++  * Example: \include Cwise_abs.cpp
++  * Output: \verbinclude Cwise_abs.out
++  *
++  * \sa abs2()
++  */
++template<typename ExpressionType>
++inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs_op)
++Cwise<ExpressionType>::abs() const
++{
++  return _expression();
++}
++
++/** \returns an expression of the coefficient-wise squared absolute value of \c *this
++  *
++  * Example: \include Cwise_abs2.cpp
++  * Output: \verbinclude Cwise_abs2.out
++  *
++  * \sa abs(), square()
++  */
++template<typename ExpressionType>
++inline const EIGEN_CWISE_UNOP_RETURN_TYPE(ei_scalar_abs2_op)
++Cwise<ExpressionType>::abs2() const
++{
++  return _expression();
++}
++
++/** \returns an expression of the complex conjugate of \c *this.
++  *
++  * \sa adjoint() */
++template<typename Derived>
++inline typename MatrixBase<Derived>::ConjugateReturnType
++MatrixBase<Derived>::conjugate() const
++{
++  return ConjugateReturnType(derived());
++}
++
++/** \returns an expression of the real part of \c *this.
++  *
++  * \sa adjoint() */
++template<typename Derived>
++inline const typename MatrixBase<Derived>::RealReturnType
++MatrixBase<Derived>::real() const
++{
++  return derived();
++}
++
++/** \returns an expression of *this with the \a Scalar type casted to
++  * \a NewScalar.
++  *
++  * The template parameter \a NewScalar is the type we are casting the scalars to.
++  *
++  * \sa class CwiseUnaryOp
++  */
++template<typename Derived>
++template<typename NewType>
++inline const CwiseUnaryOp<ei_scalar_cast_op<typename ei_traits<Derived>::Scalar, NewType>, Derived>
++MatrixBase<Derived>::cast() const
++{
++  return derived();
++}
++
++/** \relates MatrixBase */
++template<typename Derived>
++inline const typename MatrixBase<Derived>::ScalarMultipleReturnType
++MatrixBase<Derived>::operator*(const Scalar& scalar) const
++{
++  return CwiseUnaryOp<ei_scalar_multiple_op<Scalar>, Derived>
++    (derived(), ei_scalar_multiple_op<Scalar>(scalar));
++}
++
++/** \relates MatrixBase */
++template<typename Derived>
++inline const CwiseUnaryOp<ei_scalar_quotient1_op<typename ei_traits<Derived>::Scalar>, Derived>
++MatrixBase<Derived>::operator/(const Scalar& scalar) const
++{
++  return CwiseUnaryOp<ei_scalar_quotient1_op<Scalar>, Derived>
++    (derived(), ei_scalar_quotient1_op<Scalar>(scalar));
++}
++
++template<typename Derived>
++inline Derived&
++MatrixBase<Derived>::operator*=(const Scalar& other)
++{
++  return *this = *this * other;
++}
++
++template<typename Derived>
++inline Derived&
++MatrixBase<Derived>::operator/=(const Scalar& other)
++{
++  return *this = *this / other;
++}
++
++#endif // EIGEN_CWISE_UNARY_OP_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/DiagonalCoeffs.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/DiagonalCoeffs.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/DiagonalCoeffs.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/DiagonalCoeffs.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,127 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_DIAGONALCOEFFS_H
++#define EIGEN_DIAGONALCOEFFS_H
++
++/** \class DiagonalCoeffs
++  *
++  * \brief Expression of the main diagonal of a matrix
++  *
++  * \param MatrixType the type of the object in which we are taking the main diagonal
++  *
++  * The matrix is not required to be square.
++  *
++  * This class represents an expression of the main diagonal of a square matrix.
++  * It is the return type of MatrixBase::diagonal() and most of the time this is
++  * the only way it is used.
++  *
++  * \sa MatrixBase::diagonal()
++  */
++template<typename MatrixType>
++struct ei_traits<DiagonalCoeffs<MatrixType> >
++{
++  typedef typename MatrixType::Scalar Scalar;
++  typedef typename ei_nested<MatrixType>::type MatrixTypeNested;
++  typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
++  enum {
++    RowsAtCompileTime = int(MatrixType::SizeAtCompileTime) == Dynamic ? Dynamic
++                      : EIGEN_ENUM_MIN(MatrixType::RowsAtCompileTime,
++                                       MatrixType::ColsAtCompileTime),
++    ColsAtCompileTime = 1,
++    MaxRowsAtCompileTime = int(MatrixType::MaxSizeAtCompileTime) == Dynamic ? Dynamic
++                            : EIGEN_ENUM_MIN(MatrixType::MaxRowsAtCompileTime,
++                                             MatrixType::MaxColsAtCompileTime),
++    MaxColsAtCompileTime = 1,
++    Flags = (RowsAtCompileTime == Dynamic && ColsAtCompileTime == Dynamic
++            ? (unsigned int)_MatrixTypeNested::Flags
++            : (unsigned int)_MatrixTypeNested::Flags &~ LargeBit)
++          & (HereditaryBits | LinearAccessBit),
++    CoeffReadCost = _MatrixTypeNested::CoeffReadCost
++  };
++};
++
++template<typename MatrixType> class DiagonalCoeffs
++  : public MatrixBase<DiagonalCoeffs<MatrixType> >
++{
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(DiagonalCoeffs)
++
++    inline DiagonalCoeffs(const MatrixType& matrix) : m_matrix(matrix) {}
++
++    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(DiagonalCoeffs)
++
++    inline int rows() const { return std::min(m_matrix.rows(), m_matrix.cols()); }
++    inline int cols() const { return 1; }
++
++    inline Scalar& coeffRef(int row, int)
++    {
++      return m_matrix.const_cast_derived().coeffRef(row, row);
++    }
++
++    inline const Scalar coeff(int row, int) const
++    {
++      return m_matrix.coeff(row, row);
++    }
++
++    inline Scalar& coeffRef(int index)
++    {
++      return m_matrix.const_cast_derived().coeffRef(index, index);
++    }
++
++    inline const Scalar coeff(int index) const
++    {
++      return m_matrix.coeff(index, index);
++    }
++
++  protected:
++
++    const typename MatrixType::Nested m_matrix;
++};
++
++/** \returns an expression of the main diagonal of the matrix \c *this
++  *
++  * \c *this is not required to be square.
++  *
++  * Example: \include MatrixBase_diagonal.cpp
++  * Output: \verbinclude MatrixBase_diagonal.out
++  *
++  * \sa class DiagonalCoeffs */
++template<typename Derived>
++inline DiagonalCoeffs<Derived>
++MatrixBase<Derived>::diagonal()
++{
++  return DiagonalCoeffs<Derived>(derived());
++}
++
++/** This is the const version of diagonal(). */
++template<typename Derived>
++inline const DiagonalCoeffs<Derived>
++MatrixBase<Derived>::diagonal() const
++{
++  return DiagonalCoeffs<Derived>(derived());
++}
++
++#endif // EIGEN_DIAGONALCOEFFS_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/DiagonalMatrix.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/DiagonalMatrix.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/DiagonalMatrix.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/DiagonalMatrix.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,129 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_DIAGONALMATRIX_H
++#define EIGEN_DIAGONALMATRIX_H
++
++/** \class DiagonalMatrix
++  *
++  * \brief Expression of a diagonal matrix
++  *
++  * \param CoeffsVectorType the type of the vector of diagonal coefficients
++  *
++  * This class is an expression of a diagonal matrix with given vector of diagonal
++  * coefficients. It is the return
++  * type of MatrixBase::diagonal(const OtherDerived&) and most of the time this is
++  * the only way it is used.
++  *
++  * \sa MatrixBase::diagonal(const OtherDerived&)
++  */
++template<typename CoeffsVectorType>
++struct ei_traits<DiagonalMatrix<CoeffsVectorType> >
++{
++  typedef typename CoeffsVectorType::Scalar Scalar;
++  typedef typename ei_nested<CoeffsVectorType>::type CoeffsVectorTypeNested;
++  typedef typename ei_unref<CoeffsVectorTypeNested>::type _CoeffsVectorTypeNested;
++  enum {
++    RowsAtCompileTime = CoeffsVectorType::SizeAtCompileTime,
++    ColsAtCompileTime = CoeffsVectorType::SizeAtCompileTime,
++    MaxRowsAtCompileTime = CoeffsVectorType::MaxSizeAtCompileTime,
++    MaxColsAtCompileTime = CoeffsVectorType::MaxSizeAtCompileTime,
++    Flags = (_CoeffsVectorTypeNested::Flags & HereditaryBits) | Diagonal,
++    CoeffReadCost = _CoeffsVectorTypeNested::CoeffReadCost
++  };
++};
++
++template<typename CoeffsVectorType>
++class DiagonalMatrix : ei_no_assignment_operator,
++  public MatrixBase<DiagonalMatrix<CoeffsVectorType> >
++{
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(DiagonalMatrix)
++
++    inline DiagonalMatrix(const CoeffsVectorType& coeffs) : m_coeffs(coeffs)
++    {
++      ei_assert(CoeffsVectorType::IsVectorAtCompileTime
++          && coeffs.size() > 0);
++    }
++
++    inline int rows() const { return m_coeffs.size(); }
++    inline int cols() const { return m_coeffs.size(); }
++
++    inline const Scalar coeff(int row, int col) const
++    {
++      return row == col ? m_coeffs.coeff(row) : static_cast<Scalar>(0);
++    }
++
++  protected:
++    const typename CoeffsVectorType::Nested m_coeffs;
++};
++
++/** \returns an expression of a diagonal matrix with *this as vector of diagonal coefficients
++  *
++  * \only_for_vectors
++  *
++  * \addexample AsDiagonalExample \label How to build a diagonal matrix from a vector
++  *
++  * Example: \include MatrixBase_asDiagonal.cpp
++  * Output: \verbinclude MatrixBase_asDiagonal.out
++  *
++  * \sa class DiagonalMatrix, isDiagonal()
++  **/
++template<typename Derived>
++inline const DiagonalMatrix<Derived>
++MatrixBase<Derived>::asDiagonal() const
++{
++  return derived();
++}
++
++/** \returns true if *this is approximately equal to a diagonal matrix,
++  *          within the precision given by \a prec.
++  *
++  * Example: \include MatrixBase_isDiagonal.cpp
++  * Output: \verbinclude MatrixBase_isDiagonal.out
++  *
++  * \sa asDiagonal()
++  */
++template<typename Derived>
++bool MatrixBase<Derived>::isDiagonal
++(typename NumTraits<Scalar>::Real prec) const
++{
++  if(cols() != rows()) return false;
++  RealScalar maxAbsOnDiagonal = static_cast<RealScalar>(-1);
++  for(int j = 0; j < cols(); j++)
++  {
++    RealScalar absOnDiagonal = ei_abs(coeff(j,j));
++    if(absOnDiagonal > maxAbsOnDiagonal) maxAbsOnDiagonal = absOnDiagonal;
++  }
++  for(int j = 0; j < cols(); j++)
++    for(int i = 0; i < j; i++)
++    {
++      if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnDiagonal, prec)) return false;
++      if(!ei_isMuchSmallerThan(coeff(j, i), maxAbsOnDiagonal, prec)) return false;
++    }
++  return true;
++}
++
++#endif // EIGEN_DIAGONALMATRIX_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/DiagonalProduct.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/DiagonalProduct.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/DiagonalProduct.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/DiagonalProduct.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,114 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_DIAGONALPRODUCT_H
++#define EIGEN_DIAGONALPRODUCT_H
++
++template<typename LhsNested, typename RhsNested>
++struct ei_traits<Product<LhsNested, RhsNested, DiagonalProduct> >
++{
++  // clean the nested types:
++  typedef typename ei_unconst<typename ei_unref<LhsNested>::type>::type _LhsNested;
++  typedef typename ei_unconst<typename ei_unref<RhsNested>::type>::type _RhsNested;
++  typedef typename _LhsNested::Scalar Scalar;
++
++  enum {
++    LhsFlags = _LhsNested::Flags,
++    RhsFlags = _RhsNested::Flags,
++    RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
++    ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
++    MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
++    MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
++
++    LhsIsDiagonal = (_LhsNested::Flags&Diagonal)==Diagonal,
++    RhsIsDiagonal = (_RhsNested::Flags&Diagonal)==Diagonal,
++
++    CanVectorizeRhs =  (!RhsIsDiagonal) && (RhsFlags & RowMajorBit) && (RhsFlags & PacketAccessBit)
++                     && (ColsAtCompileTime % ei_packet_traits<Scalar>::size == 0),
++
++    CanVectorizeLhs =  (!LhsIsDiagonal) && (!(LhsFlags & RowMajorBit)) && (LhsFlags & PacketAccessBit)
++                     && (RowsAtCompileTime % ei_packet_traits<Scalar>::size == 0),
++
++    RemovedBits = ~(((RhsFlags & RowMajorBit) && (!CanVectorizeLhs) ? 0 : RowMajorBit)
++                | ((RowsAtCompileTime == Dynamic || ColsAtCompileTime == Dynamic) ? 0 : LargeBit)),
++
++    Flags = ((unsigned int)(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
++          | (CanVectorizeLhs || CanVectorizeRhs ? PacketAccessBit : 0),
++
++    CoeffReadCost = NumTraits<Scalar>::MulCost + _LhsNested::CoeffReadCost + _RhsNested::CoeffReadCost
++  };
++};
++
++template<typename LhsNested, typename RhsNested> class Product<LhsNested, RhsNested, DiagonalProduct> : ei_no_assignment_operator,
++  public MatrixBase<Product<LhsNested, RhsNested, DiagonalProduct> >
++{
++    typedef typename ei_traits<Product>::_LhsNested _LhsNested;
++    typedef typename ei_traits<Product>::_RhsNested _RhsNested;
++
++    enum {
++      RhsIsDiagonal = (_RhsNested::Flags&Diagonal)==Diagonal
++    };
++
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(Product)
++
++    template<typename Lhs, typename Rhs>
++    inline Product(const Lhs& lhs, const Rhs& rhs)
++      : m_lhs(lhs), m_rhs(rhs)
++    {
++      ei_assert(lhs.cols() == rhs.rows());
++    }
++
++    inline int rows() const { return m_lhs.rows(); }
++    inline int cols() const { return m_rhs.cols(); }
++
++    const Scalar coeff(int row, int col) const
++    {
++      const int unique = RhsIsDiagonal ? col : row;
++      return m_lhs.coeff(row, unique) * m_rhs.coeff(unique, col);
++    }
++
++    template<int LoadMode>
++    const PacketScalar packet(int row, int col) const
++    {
++      if (RhsIsDiagonal)
++      {
++        ei_assert((_LhsNested::Flags&RowMajorBit)==0);
++        return ei_pmul(m_lhs.template packet<LoadMode>(row, col), ei_pset1(m_rhs.coeff(col, col)));
++      }
++      else
++      {
++        ei_assert(_RhsNested::Flags&RowMajorBit);
++        return ei_pmul(ei_pset1(m_lhs.coeff(row, row)), m_rhs.template packet<LoadMode>(row, col));
++      }
++    }
++
++  protected:
++    const LhsNested m_lhs;
++    const RhsNested m_rhs;
++};
++
++#endif // EIGEN_DIAGONALPRODUCT_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Dot.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Dot.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Dot.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Dot.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,357 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_DOT_H
++#define EIGEN_DOT_H
++
++/***************************************************************************
++* Part 1 : the logic deciding a strategy for vectorization and unrolling
++***************************************************************************/
++
++template<typename Derived1, typename Derived2>
++struct ei_dot_traits
++{
++public:
++  enum {
++    Vectorization = (int(Derived1::Flags)&int(Derived2::Flags)&ActualPacketAccessBit)
++                 && (int(Derived1::Flags)&int(Derived2::Flags)&LinearAccessBit)
++                  ? LinearVectorization
++                  : NoVectorization
++  };
++
++private:
++  typedef typename Derived1::Scalar Scalar;
++  enum {
++    PacketSize = ei_packet_traits<Scalar>::size,
++    Cost = Derived1::SizeAtCompileTime * (Derived1::CoeffReadCost + Derived2::CoeffReadCost + NumTraits<Scalar>::MulCost)
++           + (Derived1::SizeAtCompileTime-1) * NumTraits<Scalar>::AddCost,
++    UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Vectorization) == int(NoVectorization) ? 1 : int(PacketSize))
++  };
++
++public:
++  enum {
++    Unrolling = Cost <= UnrollingLimit
++              ? CompleteUnrolling
++              : NoUnrolling
++  };
++};
++
++/***************************************************************************
++* Part 2 : unrollers
++***************************************************************************/
++
++/*** no vectorization ***/
++
++template<typename Derived1, typename Derived2, int Start, int Length>
++struct ei_dot_novec_unroller
++{
++  enum {
++    HalfLength = Length/2
++  };
++
++  typedef typename Derived1::Scalar Scalar;
++
++  inline static Scalar run(const Derived1& v1, const Derived2& v2)
++  {
++    return ei_dot_novec_unroller<Derived1, Derived2, Start, HalfLength>::run(v1, v2)
++         + ei_dot_novec_unroller<Derived1, Derived2, Start+HalfLength, Length-HalfLength>::run(v1, v2);
++  }
++};
++
++template<typename Derived1, typename Derived2, int Start>
++struct ei_dot_novec_unroller<Derived1, Derived2, Start, 1>
++{
++  typedef typename Derived1::Scalar Scalar;
++
++  inline static Scalar run(const Derived1& v1, const Derived2& v2)
++  {
++    return v1.coeff(Start) * ei_conj(v2.coeff(Start));
++  }
++};
++
++/*** vectorization ***/
++
++template<typename Derived1, typename Derived2, int Index, int Stop,
++         bool LastPacket = (Stop-Index == ei_packet_traits<typename Derived1::Scalar>::size)>
++struct ei_dot_vec_unroller
++{
++  typedef typename Derived1::Scalar Scalar;
++  typedef typename ei_packet_traits<Scalar>::type PacketScalar;
++
++  enum {
++    row1 = Derived1::RowsAtCompileTime == 1 ? 0 : Index,
++    col1 = Derived1::RowsAtCompileTime == 1 ? Index : 0,
++    row2 = Derived2::RowsAtCompileTime == 1 ? 0 : Index,
++    col2 = Derived2::RowsAtCompileTime == 1 ? Index : 0
++  };
++
++  inline static PacketScalar run(const Derived1& v1, const Derived2& v2)
++  {
++    return ei_pmadd(
++      v1.template packet<Aligned>(row1, col1),
++      v2.template packet<Aligned>(row2, col2),
++      ei_dot_vec_unroller<Derived1, Derived2, Index+ei_packet_traits<Scalar>::size, Stop>::run(v1, v2)
++    );
++  }
++};
++
++template<typename Derived1, typename Derived2, int Index, int Stop>
++struct ei_dot_vec_unroller<Derived1, Derived2, Index, Stop, true>
++{
++  enum {
++    row1 = Derived1::RowsAtCompileTime == 1 ? 0 : Index,
++    col1 = Derived1::RowsAtCompileTime == 1 ? Index : 0,
++    row2 = Derived2::RowsAtCompileTime == 1 ? 0 : Index,
++    col2 = Derived2::RowsAtCompileTime == 1 ? Index : 0,
++    alignment1 = (Derived1::Flags & AlignedBit) ? Aligned : Unaligned,
++    alignment2 = (Derived2::Flags & AlignedBit) ? Aligned : Unaligned
++  };
++
++  typedef typename Derived1::Scalar Scalar;
++  typedef typename ei_packet_traits<Scalar>::type PacketScalar;
++
++  inline static PacketScalar run(const Derived1& v1, const Derived2& v2)
++  {
++    return ei_pmul(v1.template packet<alignment1>(row1, col1), v2.template packet<alignment2>(row2, col2));
++  }
++};
++
++/***************************************************************************
++* Part 3 : implementation of all cases
++***************************************************************************/
++
++template<typename Derived1, typename Derived2,
++         int Vectorization = ei_dot_traits<Derived1, Derived2>::Vectorization,
++         int Unrolling = ei_dot_traits<Derived1, Derived2>::Unrolling
++>
++struct ei_dot_impl;
++
++template<typename Derived1, typename Derived2>
++struct ei_dot_impl<Derived1, Derived2, NoVectorization, NoUnrolling>
++{
++  typedef typename Derived1::Scalar Scalar;
++  static Scalar run(const Derived1& v1, const Derived2& v2)
++  {
++    Scalar res;
++    res = v1.coeff(0) * ei_conj(v2.coeff(0));
++    for(int i = 1; i < v1.size(); i++)
++      res += v1.coeff(i) * ei_conj(v2.coeff(i));
++    return res;
++  }
++};
++
++template<typename Derived1, typename Derived2>
++struct ei_dot_impl<Derived1, Derived2, NoVectorization, CompleteUnrolling>
++  : public ei_dot_novec_unroller<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>
++{};
++
++template<typename Derived1, typename Derived2>
++struct ei_dot_impl<Derived1, Derived2, LinearVectorization, NoUnrolling>
++{
++  typedef typename Derived1::Scalar Scalar;
++  typedef typename ei_packet_traits<Scalar>::type PacketScalar;
++
++  static Scalar run(const Derived1& v1, const Derived2& v2)
++  {
++    const int size = v1.size();
++    const int packetSize = ei_packet_traits<Scalar>::size;
++    const int alignedSize = (size/packetSize)*packetSize;
++    const int alignment1 = (Derived1::Flags & AlignedBit) ? Aligned : Unaligned;
++    const int alignment2 = (Derived2::Flags & AlignedBit) ? Aligned : Unaligned;
++    Scalar res;
++
++    // do the vectorizable part of the sum
++    if(size >= packetSize)
++    {
++      PacketScalar packet_res = ei_pmul(
++                                  v1.template packet<alignment1>(0),
++                                  v2.template packet<alignment2>(0)
++                                );
++      for(int index = packetSize; index<alignedSize; index += packetSize)
++      {
++        packet_res = ei_pmadd(
++                       v1.template packet<alignment1>(index),
++                       v2.template packet<alignment2>(index),
++                       packet_res
++                     );
++      }
++      res = ei_predux(packet_res);
++
++      // now we must do the rest without vectorization.
++      if(alignedSize == size) return res;
++    }
++    else // too small to vectorize anything.
++         // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
++    {
++      res = Scalar(0);
++    }
++
++    // do the remainder of the vector
++    for(int index = alignedSize; index < size; index++)
++    {
++      res += v1.coeff(index) * v2.coeff(index);
++    }
++
++    return res;
++  }
++};
++
++template<typename Derived1, typename Derived2>
++struct ei_dot_impl<Derived1, Derived2, LinearVectorization, CompleteUnrolling>
++{
++  typedef typename Derived1::Scalar Scalar;
++  static Scalar run(const Derived1& v1, const Derived2& v2)
++  {
++    return ei_predux(
++      ei_dot_vec_unroller<Derived1, Derived2, 0, Derived1::SizeAtCompileTime>::run(v1, v2)
++    );
++  }
++};
++
++/***************************************************************************
++* Part 4 : implementation of MatrixBase methods
++***************************************************************************/
++
++/** \returns the dot product of *this with other.
++  *
++  * \only_for_vectors
++  *
++  * \note If the scalar type is complex numbers, then this function returns the hermitian
++  * (sesquilinear) dot product, linear in the first variable and anti-linear in the
++  * second variable.
++  *
++  * \sa norm2(), norm()
++  */
++template<typename Derived>
++template<typename OtherDerived>
++typename ei_traits<Derived>::Scalar
++MatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
++{
++  typedef typename Derived::Nested Nested;
++  typedef typename OtherDerived::Nested OtherNested;
++  typedef typename ei_unref<Nested>::type _Nested;
++  typedef typename ei_unref<OtherNested>::type _OtherNested;
++
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(_Nested);
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(_OtherNested);
++  EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(_Nested,_OtherNested);
++  ei_assert(size() == other.size());
++
++  return ei_dot_impl<_Nested, _OtherNested>::run(derived(), other.derived());
++}
++
++/** \returns the squared norm of *this, i.e. the dot product of *this with itself.
++  *
++  * \only_for_vectors
++  *
++  * \sa dot(), norm()
++  */
++template<typename Derived>
++inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm2() const
++{
++  return ei_real(dot(*this));
++}
++
++/** \returns the norm of *this, i.e. the square root of the dot product of *this with itself.
++  *
++  * \only_for_vectors
++  *
++  * \sa dot(), norm2()
++  */
++template<typename Derived>
++inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real MatrixBase<Derived>::norm() const
++{
++  return ei_sqrt(norm2());
++}
++
++/** \returns an expression of the quotient of *this by its own norm.
++  *
++  * \only_for_vectors
++  *
++  * \sa norm(), normalize()
++  */
++template<typename Derived>
++inline const typename MatrixBase<Derived>::EvalType
++MatrixBase<Derived>::normalized() const
++{
++  typedef typename ei_nested<Derived>::type Nested;
++  typedef typename ei_unref<Nested>::type _Nested;
++  _Nested n(derived());
++  return n / n.norm();
++}
++
++/** Normalizes the vector, i.e. divides it by its own norm.
++  *
++  * \only_for_vectors
++  *
++  * \sa norm(), normalized()
++  */
++template<typename Derived>
++inline void MatrixBase<Derived>::normalize()
++{
++  *this /= norm();
++}
++
++/** \returns true if *this is approximately orthogonal to \a other,
++  *          within the precision given by \a prec.
++  *
++  * Example: \include MatrixBase_isOrthogonal.cpp
++  * Output: \verbinclude MatrixBase_isOrthogonal.out
++  */
++template<typename Derived>
++template<typename OtherDerived>
++bool MatrixBase<Derived>::isOrthogonal
++(const MatrixBase<OtherDerived>& other, RealScalar prec) const
++{
++  typename ei_nested<Derived,2>::type nested(derived());
++  typename ei_nested<OtherDerived,2>::type otherNested(other.derived());
++  return ei_abs2(nested.dot(otherNested)) <= prec * prec * nested.norm2() * otherNested.norm2();
++}
++
++/** \returns true if *this is approximately an unitary matrix,
++  *          within the precision given by \a prec. In the case where the \a Scalar
++  *          type is real numbers, a unitary matrix is an orthogonal matrix, whence the name.
++  *
++  * \note This can be used to check whether a family of vectors forms an orthonormal basis.
++  *       Indeed, \c m.isUnitary() returns true if and only if the columns (equivalently, the rows) of m form an
++  *       orthonormal basis.
++  *
++  * Example: \include MatrixBase_isUnitary.cpp
++  * Output: \verbinclude MatrixBase_isUnitary.out
++  */
++template<typename Derived>
++bool MatrixBase<Derived>::isUnitary(RealScalar prec) const
++{
++  typename Derived::Nested nested(derived());
++  for(int i = 0; i < cols(); i++)
++  {
++    if(!ei_isApprox(nested.col(i).norm2(), static_cast<Scalar>(1), prec))
++      return false;
++    for(int j = 0; j < i; j++)
++      if(!ei_isMuchSmallerThan(nested.col(i).dot(nested.col(j)), static_cast<Scalar>(1), prec))
++        return false;
++  }
++  return true;
++}
++#endif // EIGEN_DOT_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/DummyPacketMath.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/DummyPacketMath.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/DummyPacketMath.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/DummyPacketMath.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,154 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_DUMMY_PACKET_MATH_H
++#define EIGEN_DUMMY_PACKET_MATH_H
++
++// Default implementation for types not supported by the vectorization.
++// In practice these functions are provided to make easier the writting
++// of generic vectorized code. However, at runtime, they should never be
++// called, TODO so sould we raise an assertion or not ?
++/** \internal \returns a + b (coeff-wise) */
++template <typename Packet> inline Packet
++ei_padd(const Packet& a,
++        const Packet& b) { return a+b; }
++
++/** \internal \returns a - b (coeff-wise) */
++template <typename Packet> inline Packet
++ei_psub(const Packet& a,
++        const Packet& b) { return a-b; }
++
++/** \internal \returns a * b (coeff-wise) */
++template <typename Packet> inline Packet
++ei_pmul(const Packet& a,
++        const Packet& b) { return a*b; }
++
++/** \internal \returns a / b (coeff-wise) */
++template <typename Packet> inline Packet
++ei_pdiv(const Packet& a,
++        const Packet& b) { return a/b; }
++
++/** \internal \returns the min of \a a and \a b  (coeff-wise) */
++template <typename Packet> inline Packet
++ei_pmin(const Packet& a,
++        const Packet& b) { return std::min(a, b); }
++
++/** \internal \returns the max of \a a and \a b  (coeff-wise) */
++template <typename Packet> inline Packet
++ei_pmax(const Packet& a,
++        const Packet& b) { return std::max(a, b); }
++
++/** \internal \returns a packet version of \a *from, from must be 16 bytes aligned */
++template <typename Scalar> inline typename ei_packet_traits<Scalar>::type
++ei_pload(const Scalar* from) { return *from; }
++
++/** \internal \returns a packet version of \a *from, (un-aligned load) */
++template <typename Scalar> inline typename ei_packet_traits<Scalar>::type
++ei_ploadu(const Scalar* from) { return *from; }
++
++/** \internal \returns a packet with constant coefficients \a a, e.g.: (a,a,a,a) */
++template <typename Scalar> inline typename ei_packet_traits<Scalar>::type
++ei_pset1(const Scalar& a) { return a; }
++
++/** \internal copy the packet \a from to \a *to, \a to must be 16 bytes aligned */
++template <typename Scalar, typename Packet> inline void ei_pstore(Scalar* to, const Packet& from)
++{ (*to) = from; }
++
++/** \internal copy the packet \a from to \a *to, (un-aligned store) */
++template <typename Scalar, typename Packet> inline void ei_pstoreu(Scalar* to, const Packet& from)
++{ (*to) = from; }
++
++/** \internal \returns the first element of a packet */
++template <typename Packet> inline typename ei_unpacket_traits<Packet>::type ei_pfirst(const Packet& a)
++{ return a; }
++
++/** \internal \returns a packet where the element i contains the sum of the packet of \a vec[i] */
++template <typename Packet> inline Packet
++ei_preduxp(const Packet* vecs) { return vecs[0]; }
++
++/** \internal \returns the sum of the elements of \a a*/
++template <typename Packet> inline typename ei_unpacket_traits<Packet>::type ei_predux(const Packet& a)
++{ return a; }
++
++
++////////////
++
++
++/** \internal \returns a * b + c (coeff-wise) */
++template <typename Packet> inline Packet
++ei_pmadd(const Packet&  a,
++         const Packet&  b,
++         const Packet&  c)
++{ return ei_padd(ei_pmul(a, b),c); }
++
++/** \internal \returns a packet version of \a *from. If LoadMode equals Aligned, \a from must be 16 bytes aligned */
++template <typename Scalar, int LoadMode> inline typename ei_packet_traits<Scalar>::type ei_ploadt(const Scalar* from)
++{
++  if(LoadMode == Aligned)
++    return ei_pload(from);
++  else
++    return ei_ploadu(from);
++}
++
++/** \internal copy the packet \a from to \a *to. If StoreMode equals Aligned, \a to must be 16 bytes aligned */
++template <typename Scalar, typename Packet, int LoadMode> inline void ei_pstoret(Scalar* to, const Packet& from)
++{
++  if(LoadMode == Aligned)
++    ei_pstore(to, from);
++  else
++    ei_pstoreu(to, from);
++}
++
++/** \internal \returns the number of elements which have to be skipped such that data are aligned */
++template<typename Scalar>
++inline static int ei_alignmentOffset(const Scalar* ptr, int maxOffset)
++{
++  typedef typename ei_packet_traits<Scalar>::type Packet;
++  const int PacketSize = ei_packet_traits<Scalar>::size;
++  const int PacketAlignedMask = PacketSize-1;
++  const bool Vectorized = PacketSize>1;
++  return Vectorized
++          ? std::min<int>( (PacketSize - ((size_t(ptr)/sizeof(Scalar)) & PacketAlignedMask))
++                           & PacketAlignedMask, maxOffset)
++          : 0;
++}
++
++/** \internal specialization of ei_palign() */
++template<int Offset,typename PacketType>
++struct ei_palign_impl
++{
++  // by default data are aligned, so there is nothing to be done :)
++  inline static void run(PacketType&, const PacketType&) {}
++};
++
++/** \internal update \a first using the concatenation of the \a Offset last elements
++  * of \a first and packet_size minus \a Offset first elements of \a second */
++template<int Offset,typename PacketType>
++inline void ei_palign(PacketType& first, const PacketType& second)
++{
++  ei_palign_impl<Offset,PacketType>::run(first,second);
++}
++
++#endif // EIGEN_DUMMY_PACKET_MATH_H
++
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Flagged.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Flagged.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Flagged.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Flagged.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,154 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_FLAGGED_H
++#define EIGEN_FLAGGED_H
++
++/** \class Flagged
++  *
++  * \brief Expression with modified flags
++  *
++  * \param ExpressionType the type of the object of which we are modifying the flags
++  * \param Added the flags added to the expression
++  * \param Removed the flags removed from the expression (has priority over Added).
++  *
++  * This class represents an expression whose flags have been modified.
++  * It is the return type of MatrixBase::flagged()
++  * and most of the time this is the only way it is used.
++  *
++  * \sa MatrixBase::flagged()
++  */
++template<typename ExpressionType, unsigned int Added, unsigned int Removed>
++struct ei_traits<Flagged<ExpressionType, Added, Removed> >
++{
++  typedef typename ExpressionType::Scalar Scalar;
++
++  enum {
++    RowsAtCompileTime = ExpressionType::RowsAtCompileTime,
++    ColsAtCompileTime = ExpressionType::ColsAtCompileTime,
++    MaxRowsAtCompileTime = ExpressionType::MaxRowsAtCompileTime,
++    MaxColsAtCompileTime = ExpressionType::MaxColsAtCompileTime,
++    Flags = (ExpressionType::Flags | Added) & ~Removed,
++    CoeffReadCost = ExpressionType::CoeffReadCost
++  };
++};
++
++template<typename ExpressionType, unsigned int Added, unsigned int Removed> class Flagged
++  : public MatrixBase<Flagged<ExpressionType, Added, Removed> >
++{
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(Flagged)
++    typedef typename ei_meta_if<ei_must_nest_by_value<ExpressionType>::ret,
++        ExpressionType, const ExpressionType&>::ret ExpressionTypeNested;
++
++    inline Flagged(const ExpressionType& matrix) : m_matrix(matrix) {}
++
++    inline int rows() const { return m_matrix.rows(); }
++    inline int cols() const { return m_matrix.cols(); }
++    inline int stride() const { return m_matrix.stride(); }
++
++    inline const Scalar coeff(int row, int col) const
++    {
++      return m_matrix.coeff(row, col);
++    }
++
++    inline Scalar& coeffRef(int row, int col)
++    {
++      return m_matrix.const_cast_derived().coeffRef(row, col);
++    }
++
++    inline const Scalar coeff(int index) const
++    {
++      return m_matrix.coeff(index);
++    }
++
++    inline Scalar& coeffRef(int index)
++    {
++      return m_matrix.const_cast_derived().coeffRef(index);
++    }
++
++    template<int LoadMode>
++    inline const PacketScalar packet(int row, int col) const
++    {
++      return m_matrix.template packet<LoadMode>(row, col);
++    }
++
++    template<int LoadMode>
++    inline void writePacket(int row, int col, const PacketScalar& x)
++    {
++      m_matrix.const_cast_derived().template writePacket<LoadMode>(row, col, x);
++    }
++
++    template<int LoadMode>
++    inline const PacketScalar packet(int index) const
++    {
++      return m_matrix.template packet<LoadMode>(index);
++    }
++
++    template<int LoadMode>
++    inline void writePacket(int index, const PacketScalar& x)
++    {
++      m_matrix.const_cast_derived().template writePacket<LoadMode>(index, x);
++    }
++
++    const ExpressionType& _expression() const { return m_matrix; }
++
++  protected:
++    ExpressionTypeNested m_matrix;
++};
++
++/** \returns an expression of *this with added flags
++  *
++  * \addexample MarkExample \label How to mark a triangular matrix as triangular
++  *
++  * Example: \include MatrixBase_marked.cpp
++  * Output: \verbinclude MatrixBase_marked.out
++  *
++  * \sa class Flagged, extract(), part()
++  */
++template<typename Derived>
++template<unsigned int Added>
++inline const Flagged<Derived, Added, 0>
++MatrixBase<Derived>::marked() const
++{
++  return derived();
++}
++
++/** \returns an expression of *this with the following flags removed:
++  * EvalBeforeNestingBit and EvalBeforeAssigningBit.
++  *
++  * Example: \include MatrixBase_lazy.cpp
++  * Output: \verbinclude MatrixBase_lazy.out
++  *
++  * \sa class Flagged, marked()
++  */
++template<typename Derived>
++inline const Flagged<Derived, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>
++MatrixBase<Derived>::lazy() const
++{
++  return derived();
++}
++
++#endif // EIGEN_FLAGGED_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Functors.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Functors.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Functors.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Functors.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,339 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_FUNCTORS_H
++#define EIGEN_FUNCTORS_H
++
++// associative functors:
++
++/** \internal
++  * \brief Template functor to compute the sum of two scalars
++  *
++  * \sa class CwiseBinaryOp, MatrixBase::operator+, class PartialRedux, MatrixBase::sum()
++  */
++template<typename Scalar> struct ei_scalar_sum_op EIGEN_EMPTY_STRUCT {
++  inline const Scalar operator() (const Scalar& a, const Scalar& b) const { return a + b; }
++  template<typename PacketScalar>
++  inline const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const
++  { return ei_padd(a,b); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_sum_op<Scalar> > {
++  enum {
++    Cost = NumTraits<Scalar>::AddCost,
++    PacketAccess = ei_packet_traits<Scalar>::size>1
++  };
++};
++
++/** \internal
++  * \brief Template functor to compute the product of two scalars
++  *
++  * \sa class CwiseBinaryOp, Cwise::operator*(), class PartialRedux, MatrixBase::redux()
++  */
++template<typename Scalar> struct ei_scalar_product_op EIGEN_EMPTY_STRUCT {
++  inline const Scalar operator() (const Scalar& a, const Scalar& b) const { return a * b; }
++  template<typename PacketScalar>
++  inline const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const
++  { return ei_pmul(a,b); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_product_op<Scalar> > {
++  enum {
++    Cost = NumTraits<Scalar>::MulCost,
++    PacketAccess = ei_packet_traits<Scalar>::size>1
++  };
++};
++
++/** \internal
++  * \brief Template functor to compute the min of two scalars
++  *
++  * \sa class CwiseBinaryOp, MatrixBase::cwiseMin, class PartialRedux, MatrixBase::minCoeff()
++  */
++template<typename Scalar> struct ei_scalar_min_op EIGEN_EMPTY_STRUCT {
++  inline const Scalar operator() (const Scalar& a, const Scalar& b) const { return std::min(a, b); }
++  template<typename PacketScalar>
++  inline const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const
++  { return ei_pmin(a,b); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_min_op<Scalar> > {
++  enum {
++    Cost = NumTraits<Scalar>::AddCost,
++    PacketAccess = ei_packet_traits<Scalar>::size>1
++  };
++};
++
++/** \internal
++  * \brief Template functor to compute the max of two scalars
++  *
++  * \sa class CwiseBinaryOp, MatrixBase::cwiseMax, class PartialRedux, MatrixBase::maxCoeff()
++  */
++template<typename Scalar> struct ei_scalar_max_op EIGEN_EMPTY_STRUCT {
++  inline const Scalar operator() (const Scalar& a, const Scalar& b) const { return std::max(a, b); }
++  template<typename PacketScalar>
++  inline const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const
++  { return ei_pmax(a,b); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_max_op<Scalar> > {
++  enum {
++    Cost = NumTraits<Scalar>::AddCost,
++    PacketAccess = ei_packet_traits<Scalar>::size>1
++  };
++};
++
++
++// other binary functors:
++
++/** \internal
++  * \brief Template functor to compute the difference of two scalars
++  *
++  * \sa class CwiseBinaryOp, MatrixBase::operator-
++  */
++template<typename Scalar> struct ei_scalar_difference_op EIGEN_EMPTY_STRUCT {
++  inline const Scalar operator() (const Scalar& a, const Scalar& b) const { return a - b; }
++  template<typename PacketScalar>
++  inline const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const
++  { return ei_psub(a,b); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_difference_op<Scalar> > {
++  enum {
++    Cost = NumTraits<Scalar>::AddCost,
++    PacketAccess = ei_packet_traits<Scalar>::size>1
++  };
++};
++
++/** \internal
++  * \brief Template functor to compute the quotient of two scalars
++  *
++  * \sa class CwiseBinaryOp, Cwise::operator/()
++  */
++template<typename Scalar> struct ei_scalar_quotient_op EIGEN_EMPTY_STRUCT {
++  inline const Scalar operator() (const Scalar& a, const Scalar& b) const { return a / b; }
++  template<typename PacketScalar>
++  inline const PacketScalar packetOp(const PacketScalar& a, const PacketScalar& b) const
++  { return ei_pdiv(a,b); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_quotient_op<Scalar> > {
++  enum {
++    Cost = 2 * NumTraits<Scalar>::MulCost,
++    PacketAccess = ei_packet_traits<Scalar>::size>1
++                  #ifdef EIGEN_VECTORIZE_SSE
++                  && NumTraits<Scalar>::HasFloatingPoint
++                  #endif
++  };
++};
++
++// unary functors:
++
++/** \internal
++  * \brief Template functor to compute the opposite of a scalar
++  *
++  * \sa class CwiseUnaryOp, MatrixBase::operator-
++  */
++template<typename Scalar> struct ei_scalar_opposite_op EIGEN_EMPTY_STRUCT {
++  inline const Scalar operator() (const Scalar& a) const { return -a; }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_opposite_op<Scalar> >
++{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = false }; };
++
++/** \internal
++  * \brief Template functor to compute the absolute value of a scalar
++  *
++  * \sa class CwiseUnaryOp, Cwise::abs
++  */
++template<typename Scalar> struct ei_scalar_abs_op EIGEN_EMPTY_STRUCT {
++  typedef typename NumTraits<Scalar>::Real result_type;
++  inline const result_type operator() (const Scalar& a) const { return ei_abs(a); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_abs_op<Scalar> >
++{
++  enum {
++    Cost = NumTraits<Scalar>::AddCost,
++    PacketAccess = false // this could actually be vectorized with SSSE3.
++  };
++};
++
++/** \internal
++  * \brief Template functor to compute the squared absolute value of a scalar
++  *
++  * \sa class CwiseUnaryOp, Cwise::abs2
++  */
++template<typename Scalar> struct ei_scalar_abs2_op EIGEN_EMPTY_STRUCT {
++  typedef typename NumTraits<Scalar>::Real result_type;
++  inline const result_type operator() (const Scalar& a) const { return ei_abs2(a); }
++  template<typename PacketScalar>
++  inline const PacketScalar packetOp(const PacketScalar& a) const
++  { return ei_pmul(a,a); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_abs2_op<Scalar> >
++{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = int(ei_packet_traits<Scalar>::size)>1 }; };
++
++/** \internal
++  * \brief Template functor to compute the conjugate of a complex value
++  *
++  * \sa class CwiseUnaryOp, MatrixBase::conjugate()
++  */
++template<typename Scalar> struct ei_scalar_conjugate_op EIGEN_EMPTY_STRUCT {
++  inline const Scalar operator() (const Scalar& a) const { return ei_conj(a); }
++  template<typename PacketScalar>
++  inline const PacketScalar packetOp(const PacketScalar& a) const { return a; }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_conjugate_op<Scalar> >
++{
++  enum {
++    Cost = NumTraits<Scalar>::IsComplex ? NumTraits<Scalar>::AddCost : 0,
++    PacketAccess = int(ei_packet_traits<Scalar>::size)>1
++  };
++};
++
++/** \internal
++  * \brief Template functor to cast a scalar to another type
++  *
++  * \sa class CwiseUnaryOp, MatrixBase::cast()
++  */
++template<typename Scalar, typename NewType>
++struct ei_scalar_cast_op EIGEN_EMPTY_STRUCT {
++  typedef NewType result_type;
++  inline const NewType operator() (const Scalar& a) const { return static_cast<NewType>(a); }
++};
++template<typename Scalar, typename NewType>
++struct ei_functor_traits<ei_scalar_cast_op<Scalar,NewType> >
++{ enum { Cost = ei_is_same_type<Scalar, NewType>::ret ? 0 : NumTraits<NewType>::AddCost, PacketAccess = false }; };
++
++/** \internal
++  * \brief Template functor to extract the real part of a complex
++  *
++  * \sa class CwiseUnaryOp, MatrixBase::real()
++  */
++template<typename Scalar>
++struct ei_scalar_real_op EIGEN_EMPTY_STRUCT {
++  typedef typename NumTraits<Scalar>::Real result_type;
++  inline result_type operator() (const Scalar& a) const { return ei_real(a); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_real_op<Scalar> >
++{ enum { Cost =  0, PacketAccess = false }; };
++
++/** \internal
++  * \brief Template functor to multiply a scalar by a fixed other one
++  *
++  * \sa class CwiseUnaryOp, MatrixBase::operator*, MatrixBase::operator/
++  */
++/* NOTE why doing the ei_pset1() *is* an optimization ?
++ * indeed it seems better to declare m_other as a PacketScalar and do the ei_pset1() once
++ * in the constructor. However, in practice:
++ *  - GCC does not like m_other as a PacketScalar and generate a load every time it needs it
++ *  - one the other hand GCC is able to moves the ei_pset1() away the loop :)
++ *  - simpler code ;)
++ * (ICC performs well in both cases)
++ */
++template<typename Scalar>
++struct ei_scalar_multiple_op {
++  typedef typename ei_packet_traits<Scalar>::type PacketScalar;
++  inline ei_scalar_multiple_op(const Scalar& other) : m_other(other) { }
++  inline Scalar operator() (const Scalar& a) const { return a * m_other; }
++  inline const PacketScalar packetOp(const PacketScalar& a) const
++  { return ei_pmul(a, ei_pset1(m_other)); }
++  const Scalar m_other;
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_multiple_op<Scalar> >
++{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = ei_packet_traits<Scalar>::size>1 }; };
++
++template<typename Scalar, bool HasFloatingPoint>
++struct ei_scalar_quotient1_impl {
++  typedef typename ei_packet_traits<Scalar>::type PacketScalar;
++  inline ei_scalar_quotient1_impl(const Scalar& other) : m_other(static_cast<Scalar>(1) / other) {}
++  inline Scalar operator() (const Scalar& a) const { return a * m_other; }
++  inline const PacketScalar packetOp(const PacketScalar& a) const
++  { return ei_pmul(a, ei_pset1(m_other)); }
++  const Scalar m_other;
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_quotient1_impl<Scalar,true> >
++{ enum { Cost = NumTraits<Scalar>::MulCost, PacketAccess = ei_packet_traits<Scalar>::size>1 }; };
++
++template<typename Scalar>
++struct ei_scalar_quotient1_impl<Scalar,false> {
++  inline ei_scalar_quotient1_impl(const Scalar& other) : m_other(other) {}
++  inline Scalar operator() (const Scalar& a) const { return a / m_other; }
++  const Scalar m_other;
++  enum { Cost = 2 * NumTraits<Scalar>::MulCost };
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_quotient1_impl<Scalar,false> >
++{ enum { Cost = 2 * NumTraits<Scalar>::MulCost, PacketAccess = false }; };
++
++/** \internal
++  * \brief Template functor to divide a scalar by a fixed other one
++  *
++  * This functor is used to implement the quotient of a matrix by
++  * a scalar where the scalar type is not necessarily a floating point type.
++  *
++  * \sa class CwiseUnaryOp, MatrixBase::operator/
++  */
++template<typename Scalar>
++struct ei_scalar_quotient1_op : ei_scalar_quotient1_impl<Scalar, NumTraits<Scalar>::HasFloatingPoint > {
++  inline ei_scalar_quotient1_op(const Scalar& other)
++    : ei_scalar_quotient1_impl<Scalar, NumTraits<Scalar>::HasFloatingPoint >(other) {}
++};
++
++// nullary functors
++
++template<typename Scalar>
++struct ei_scalar_constant_op {
++  typedef typename ei_packet_traits<Scalar>::type PacketScalar;
++  inline ei_scalar_constant_op(const ei_scalar_constant_op& other) : m_other(other.m_other) { }
++  inline ei_scalar_constant_op(const Scalar& other) : m_other(other) { }
++  inline const Scalar operator() (int, int = 0) const { return m_other; }
++  inline const PacketScalar packetOp() const { return ei_pset1(m_other); }
++  const Scalar m_other;
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_constant_op<Scalar> >
++{ enum { Cost = 1, PacketAccess = ei_packet_traits<Scalar>::size>1, IsRepeatable = true }; };
++
++template<typename Scalar> struct ei_scalar_identity_op EIGEN_EMPTY_STRUCT {
++  inline ei_scalar_identity_op(void) {}
++  inline const Scalar operator() (int row, int col) const { return row==col ? Scalar(1) : Scalar(0); }
++};
++template<typename Scalar>
++struct ei_functor_traits<ei_scalar_identity_op<Scalar> >
++{ enum { Cost = NumTraits<Scalar>::AddCost, PacketAccess = false, IsRepeatable = true }; };
++
++// NOTE quick hack:
++// all functors allow linear access, except ei_scalar_identity_op. So we fix here a quick meta
++// to indicate whether a functor allows linear access, just always answering 'yes' except for
++// ei_scalar_identity_op.
++template<typename Functor> struct ei_functor_has_linear_access { enum { ret = 1 }; };
++template<typename Scalar> struct ei_functor_has_linear_access<ei_scalar_identity_op<Scalar> > { enum { ret = 0 }; };
++
++#endif // EIGEN_FUNCTORS_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Fuzzy.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Fuzzy.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Fuzzy.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Fuzzy.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,234 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_FUZZY_H
++#define EIGEN_FUZZY_H
++
++#ifndef EIGEN_LEGACY_COMPARES
++
++/** \returns \c true if \c *this is approximately equal to \a other, within the precision
++  * determined by \a prec.
++  *
++  * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$
++  * are considered to be approximately equal within precision \f$ p \f$ if
++  * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f]
++  * For matrices, the comparison is done using the Hilbert-Schmidt norm (aka Frobenius norm
++  * L2 norm).
++  *
++  * \note Because of the multiplicativeness of this comparison, one can't use this function
++  * to check whether \c *this is approximately equal to the zero matrix or vector.
++  * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix
++  * or vector. If you want to test whether \c *this is zero, use ei_isMuchSmallerThan(const
++  * RealScalar&, RealScalar) instead.
++  *
++  * \sa ei_isMuchSmallerThan(const RealScalar&, RealScalar) const
++  */
++template<typename Derived>
++template<typename OtherDerived>
++bool MatrixBase<Derived>::isApprox(
++  const MatrixBase<OtherDerived>& other,
++  typename NumTraits<Scalar>::Real prec
++) const
++{
++  const typename ei_nested<Derived,2>::type nested(derived());
++  const typename ei_nested<OtherDerived,2>::type otherNested(other.derived());
++  return (nested - otherNested).cwise().abs2().sum() <= prec * prec * std::min(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
++}
++
++/** \returns \c true if the norm of \c *this is much smaller than \a other,
++  * within the precision determined by \a prec.
++  *
++  * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
++  * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if
++  * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f]
++  *
++  * For matrices, the comparison is done using the Hilbert-Schmidt norm. For this reason,
++  * the value of the reference scalar \a other should come from the Hilbert-Schmidt norm
++  * of a reference matrix of same dimensions.
++  *
++  * \sa isApprox(), isMuchSmallerThan(const MatrixBase<OtherDerived>&, RealScalar) const
++  */
++template<typename Derived>
++bool MatrixBase<Derived>::isMuchSmallerThan(
++  const typename NumTraits<Scalar>::Real& other,
++  typename NumTraits<Scalar>::Real prec
++) const
++{
++  return cwise().abs2().sum() <= prec * prec * other * other;
++}
++
++/** \returns \c true if the norm of \c *this is much smaller than the norm of \a other,
++  * within the precision determined by \a prec.
++  *
++  * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
++  * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if
++  * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f]
++  * For matrices, the comparison is done using the Hilbert-Schmidt norm.
++  *
++  * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const
++  */
++template<typename Derived>
++template<typename OtherDerived>
++bool MatrixBase<Derived>::isMuchSmallerThan(
++  const MatrixBase<OtherDerived>& other,
++  typename NumTraits<Scalar>::Real prec
++) const
++{
++  return this->cwise().abs2().sum() <= prec * prec * other.cwise().abs2().sum();
++}
++
++#else
++
++template<typename Derived, typename OtherDerived=Derived, bool IsVector=Derived::IsVectorAtCompileTime>
++struct ei_fuzzy_selector;
++
++/** \returns \c true if \c *this is approximately equal to \a other, within the precision
++  * determined by \a prec.
++  *
++  * \note The fuzzy compares are done multiplicatively. Two vectors \f$ v \f$ and \f$ w \f$
++  * are considered to be approximately equal within precision \f$ p \f$ if
++  * \f[ \Vert v - w \Vert \leqslant p\,\min(\Vert v\Vert, \Vert w\Vert). \f]
++  * For matrices, the comparison is done on all columns.
++  *
++  * \note Because of the multiplicativeness of this comparison, one can't use this function
++  * to check whether \c *this is approximately equal to the zero matrix or vector.
++  * Indeed, \c isApprox(zero) returns false unless \c *this itself is exactly the zero matrix
++  * or vector. If you want to test whether \c *this is zero, use ei_isMuchSmallerThan(const
++  * RealScalar&, RealScalar) instead.
++  *
++  * \sa ei_isMuchSmallerThan(const RealScalar&, RealScalar) const
++  */
++template<typename Derived>
++template<typename OtherDerived>
++bool MatrixBase<Derived>::isApprox(
++  const MatrixBase<OtherDerived>& other,
++  typename NumTraits<Scalar>::Real prec
++) const
++{
++  return ei_fuzzy_selector<Derived,OtherDerived>::isApprox(derived(), other.derived(), prec);
++}
++
++/** \returns \c true if the norm of \c *this is much smaller than \a other,
++  * within the precision determined by \a prec.
++  *
++  * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
++  * considered to be much smaller than \f$ x \f$ within precision \f$ p \f$ if
++  * \f[ \Vert v \Vert \leqslant p\,\vert x\vert. \f]
++  * For matrices, the comparison is done on all columns.
++  *
++  * \sa isApprox(), isMuchSmallerThan(const MatrixBase<OtherDerived>&, RealScalar) const
++  */
++template<typename Derived>
++bool MatrixBase<Derived>::isMuchSmallerThan(
++  const typename NumTraits<Scalar>::Real& other,
++  typename NumTraits<Scalar>::Real prec
++) const
++{
++  return ei_fuzzy_selector<Derived>::isMuchSmallerThan(derived(), other, prec);
++}
++
++/** \returns \c true if the norm of \c *this is much smaller than the norm of \a other,
++  * within the precision determined by \a prec.
++  *
++  * \note The fuzzy compares are done multiplicatively. A vector \f$ v \f$ is
++  * considered to be much smaller than a vector \f$ w \f$ within precision \f$ p \f$ if
++  * \f[ \Vert v \Vert \leqslant p\,\Vert w\Vert. \f]
++  * For matrices, the comparison is done on all columns.
++  *
++  * \sa isApprox(), isMuchSmallerThan(const RealScalar&, RealScalar) const
++  */
++template<typename Derived>
++template<typename OtherDerived>
++bool MatrixBase<Derived>::isMuchSmallerThan(
++  const MatrixBase<OtherDerived>& other,
++  typename NumTraits<Scalar>::Real prec
++) const
++{
++  return ei_fuzzy_selector<Derived,OtherDerived>::isMuchSmallerThan(derived(), other.derived(), prec);
++}
++
++
++template<typename Derived, typename OtherDerived>
++struct ei_fuzzy_selector<Derived,OtherDerived,true>
++{
++  typedef typename Derived::RealScalar RealScalar;
++  static bool isApprox(const Derived& self, const OtherDerived& other, RealScalar prec)
++  {
++    EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived);
++    ei_assert(self.size() == other.size());
++    return((self - other).norm2() <= std::min(self.norm2(), other.norm2()) * prec * prec);
++  }
++  static bool isMuchSmallerThan(const Derived& self, const RealScalar& other, RealScalar prec)
++  {
++    return(self.norm2() <= ei_abs2(other * prec));
++  }
++  static bool isMuchSmallerThan(const Derived& self, const OtherDerived& other, RealScalar prec)
++  {
++    EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived);
++    ei_assert(self.size() == other.size());
++    return(self.norm2() <= other.norm2() * prec * prec);
++  }
++};
++
++template<typename Derived, typename OtherDerived>
++struct ei_fuzzy_selector<Derived,OtherDerived,false>
++{
++  typedef typename Derived::RealScalar RealScalar;
++  static bool isApprox(const Derived& self, const OtherDerived& other, RealScalar prec)
++  {
++    EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived);
++    ei_assert(self.rows() == other.rows() && self.cols() == other.cols());
++    typename Derived::Nested nested(self);
++    typename OtherDerived::Nested otherNested(other);
++    for(int i = 0; i < self.cols(); i++)
++      if((nested.col(i) - otherNested.col(i)).norm2()
++          > std::min(nested.col(i).norm2(), otherNested.col(i).norm2()) * prec * prec)
++        return false;
++    return true;
++  }
++  static bool isMuchSmallerThan(const Derived& self, const RealScalar& other, RealScalar prec)
++  {
++    typename Derived::Nested nested(self);
++    for(int i = 0; i < self.cols(); i++)
++      if(nested.col(i).norm2() > ei_abs2(other * prec))
++        return false;
++    return true;
++  }
++  static bool isMuchSmallerThan(const Derived& self, const OtherDerived& other, RealScalar prec)
++  {
++    EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(Derived,OtherDerived);
++    ei_assert(self.rows() == other.rows() && self.cols() == other.cols());
++    typename Derived::Nested nested(self);
++    typename OtherDerived::Nested otherNested(other);
++    for(int i = 0; i < self.cols(); i++)
++      if(nested.col(i).norm2() > otherNested.col(i).norm2() * prec * prec)
++        return false;
++    return true;
++  }
++};
++
++#endif
++
++#endif // EIGEN_FUZZY_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/IO.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/IO.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/IO.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/IO.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,56 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either 
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of 
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public 
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_IO_H
++#define EIGEN_IO_H
++
++template<typename Derived>
++std::ostream & ei_print_matrix
++(std::ostream & s,
++ const MatrixBase<Derived> & m)
++{
++  for(int i = 0; i < m.rows(); i++)
++  {
++    s << m.coeff(i, 0);
++    for(int j = 1; j < m.cols(); j++)
++      s << " " << m.coeff(i, j);
++    if( i < m.rows() - 1)
++      s << "\n";
++  }
++  return s;
++}
++
++/** \relates MatrixBase
++  *
++  * Outputs the matrix, laid out as an array as usual, to the given stream.
++  */
++template<typename Derived>
++std::ostream & operator <<
++(std::ostream & s,
++ const MatrixBase<Derived> & m)
++{
++  return ei_print_matrix(s, m.eval());
++}
++
++#endif // EIGEN_IO_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/MapBase.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/MapBase.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/MapBase.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/MapBase.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,172 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_MAPBASE_H
++#define EIGEN_MAPBASE_H
++
++/** \class MapBase
++  *
++  * \brief Base class for Map and Block expression with direct access
++  *
++  * Expression classes inheriting MapBase must define the constant \c PacketAccess,
++  * and type \c AlignedDerivedType in their respective ei_traits<> specialization structure.
++  * The value of \c PacketAccess can be either:
++  *  - \b ForceAligned which enforces both aligned loads and stores
++  *  - \b AsRequested which is the default behavior
++  * The type \c AlignedDerivedType should correspond to the equivalent expression type
++  * with \c PacketAccess being \c ForceAligned.
++  *
++  * \sa class Map, class Block
++  */
++template<typename Derived> class MapBase
++  : public MatrixBase<Derived>
++{
++  public:
++
++    typedef MatrixBase<Derived> Base;
++    enum {
++      IsRowMajor = (int(ei_traits<Derived>::Flags) & RowMajorBit) ? 1 : 0,
++      PacketAccess = ei_traits<Derived>::PacketAccess,
++      RowsAtCompileTime = ei_traits<Derived>::RowsAtCompileTime,
++      ColsAtCompileTime = ei_traits<Derived>::ColsAtCompileTime,
++      SizeAtCompileTime = Base::SizeAtCompileTime
++    };
++    
++    typedef typename ei_traits<Derived>::AlignedDerivedType AlignedDerivedType;
++    typedef typename ei_traits<Derived>::Scalar Scalar;
++    typedef typename Base::PacketScalar PacketScalar;
++    using Base::derived;
++
++    inline int rows() const { return m_rows.value(); }
++    inline int cols() const { return m_cols.value(); }
++
++    inline int stride() const { return derived().stride(); }
++
++    /** \returns an expression equivalent to \c *this but having the \c PacketAccess constant
++      * set to \c ForceAligned. Must be reimplemented by the derived class. */
++    AlignedDerivedType forceAligned() { return derived().forceAligned(); }
++
++    inline const Scalar& coeff(int row, int col) const
++    {
++      if(IsRowMajor)
++        return m_data[col + row * stride()];
++      else // column-major
++        return m_data[row + col * stride()];
++    }
++
++    inline Scalar& coeffRef(int row, int col)
++    {
++      if(IsRowMajor)
++        return const_cast<Scalar*>(m_data)[col + row * stride()];
++      else // column-major
++        return const_cast<Scalar*>(m_data)[row + col * stride()];
++    }
++    
++    inline const Scalar coeff(int index) const
++    {
++      ei_assert(Derived::IsVectorAtCompileTime || (ei_traits<Derived>::Flags & LinearAccessBit));
++      if ( ((RowsAtCompileTime == 1) == IsRowMajor) )
++        return m_data[index];
++      else
++        return m_data[index*stride()];
++    }
++
++    inline Scalar& coeffRef(int index)
++    {
++      return *const_cast<Scalar*>(m_data + index);
++    }
++
++    template<int LoadMode>
++    inline PacketScalar packet(int row, int col) const
++    {
++      return ei_ploadt<Scalar, int(PacketAccess) == ForceAligned ? Aligned : LoadMode>
++               (m_data + (IsRowMajor ? col + row * stride()
++                                     : row + col * stride()));
++    }
++
++    template<int LoadMode>
++    inline PacketScalar packet(int index) const
++    {
++      return ei_ploadt<Scalar, int(PacketAccess) == ForceAligned ? Aligned : LoadMode>(m_data + index);
++    }
++
++    template<int StoreMode>
++    inline void writePacket(int row, int col, const PacketScalar& x)
++    {
++      ei_pstoret<Scalar, PacketScalar, int(PacketAccess) == ForceAligned ? Aligned : StoreMode>
++               (const_cast<Scalar*>(m_data) + (IsRowMajor ? col + row * stride()
++                                                          : row + col * stride()), x);
++    }
++
++    template<int StoreMode>
++    inline void writePacket(int index, const PacketScalar& x)
++    {
++      ei_pstoret<Scalar, PacketScalar, int(PacketAccess) == ForceAligned ? Aligned : StoreMode>
++        (const_cast<Scalar*>(m_data) + index, x);
++    }
++
++    inline MapBase(const Scalar* data) : m_data(data), m_rows(RowsAtCompileTime), m_cols(ColsAtCompileTime)
++    {
++      EIGEN_STATIC_ASSERT_FIXED_SIZE(Derived)
++    }
++
++    inline MapBase(const Scalar* data, int size)
++            : m_data(data),
++              m_rows(RowsAtCompileTime == Dynamic ? size : RowsAtCompileTime),
++              m_cols(ColsAtCompileTime == Dynamic ? size : ColsAtCompileTime)
++    {
++      EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
++      ei_assert(size > 0);
++      ei_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == size);
++    }
++
++    inline MapBase(const Scalar* data, int rows, int cols)
++            : m_data(data), m_rows(rows), m_cols(cols)
++    {
++      ei_assert(rows > 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
++             && cols > 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
++    }
++
++    template<typename OtherDerived>
++    Derived& operator+=(const MatrixBase<OtherDerived>& other)
++    { return derived() = forceAligned() + other; }
++    
++    template<typename OtherDerived>
++    Derived& operator-=(const MatrixBase<OtherDerived>& other)
++    { return derived() = forceAligned() - other; }
++
++    Derived& operator*=(const Scalar& other)
++    { return derived() = forceAligned() * other; }
++    
++    Derived& operator/=(const Scalar& other)
++    { return derived() = forceAligned() / other; }
++
++  protected:
++    const Scalar* EIGEN_RESTRICT m_data;
++    const ei_int_if_dynamic<RowsAtCompileTime> m_rows;
++    const ei_int_if_dynamic<ColsAtCompileTime> m_cols;
++};
++
++#endif // EIGEN_MAPBASE_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Map.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Map.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Map.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Map.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,103 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_MAP_H
++#define EIGEN_MAP_H
++
++/** \class Map
++  *
++  * \brief A matrix or vector expression mapping an existing array of data.
++  *
++  * \param MatrixType the equivalent matrix type of the mapped data
++  * \param _PacketAccess allows to enforce aligned loads and stores if set to ForceAligned.
++  *                      The default is AsRequested. This parameter is internaly used by Eigen
++  *                      in expressions such as \code Map<...>(...) += other; \endcode and most
++  *                      of the time this is the only way it is used.
++  *
++  * This class represents a matrix or vector expression mapping an existing array of data.
++  * It can be used to let Eigen interface without any overhead with non-Eigen data structures,
++  * such as plain C arrays or structures from other libraries.
++  *
++  * This class is the return type of Matrix::map() but can also be used directly.
++  *
++  * \sa Matrix::map()
++  */
++template<typename MatrixType, int _PacketAccess>
++struct ei_traits<Map<MatrixType, _PacketAccess> > : public ei_traits<MatrixType>
++{
++  enum {
++    PacketAccess = _PacketAccess,
++    Flags = ei_traits<MatrixType>::Flags & ~AlignedBit
++  };
++  typedef typename ei_meta_if<int(PacketAccess)==ForceAligned,
++                              Map<MatrixType, _PacketAccess>&,
++                              Map<MatrixType, ForceAligned> >::ret AlignedDerivedType;
++};
++
++template<typename MatrixType, int PacketAccess> class Map
++  : public MapBase<Map<MatrixType, PacketAccess> >
++{
++  public:
++
++    _EIGEN_GENERIC_PUBLIC_INTERFACE(Map, MapBase<Map>)
++    typedef typename ei_traits<Map>::AlignedDerivedType AlignedDerivedType;
++
++    inline int stride() const { return this->innerSize(); }
++
++    AlignedDerivedType forceAligned()
++    {
++      if (PacketAccess==ForceAligned)
++        return *this;
++      else
++        return Map<MatrixType,ForceAligned>(Base::m_data, Base::m_rows.value(), Base::m_cols.value());
++    }
++
++    inline Map(const Scalar* data) : Base(data) {}
++
++    inline Map(const Scalar* data, int size) : Base(data, size) {}
++
++    inline Map(const Scalar* data, int rows, int cols) : Base(data, rows, cols) {}
++
++    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Map)
++};
++
++/** Constructor copying an existing array of data.
++  * Only for fixed-size matrices and vectors.
++  * \param data The array of data to copy
++  *
++  * For dynamic-size matrices and vectors, see the variants taking additional int parameters
++  * for the dimensions.
++  *
++  * \sa Matrix(const Scalar *, int), Matrix(const Scalar *, int, int),
++  * Matrix::map(const Scalar *)
++  */
++template<typename _Scalar, int _Rows, int _Cols, int _MaxRows, int _MaxCols, unsigned int _Flags>
++inline Matrix<_Scalar, _Rows, _Cols, _MaxRows, _MaxCols, _Flags>
++  ::Matrix(const Scalar *data)
++{
++  *this = Map<Matrix>(data);
++}
++
++#endif // EIGEN_MAP_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/MathFunctions.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/MathFunctions.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/MathFunctions.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/MathFunctions.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,203 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_MATHFUNCTIONS_H
++#define EIGEN_MATHFUNCTIONS_H
++
++template<typename T> inline typename NumTraits<T>::Real precision();
++template<typename T> inline T ei_random(T a, T b);
++template<typename T> inline T ei_random();
++template<typename T> inline T ei_random_amplitude()
++{
++  if(NumTraits<T>::HasFloatingPoint) return static_cast<T>(1);
++  else return static_cast<T>(10);
++}
++
++template<> inline int precision<int>() { return 0; }
++inline int ei_real(int x)  { return x; }
++inline int ei_imag(int)    { return 0; }
++inline int ei_conj(int x)  { return x; }
++inline int ei_abs(int x)   { return abs(x); }
++inline int ei_abs2(int x)  { return x*x; }
++inline int ei_sqrt(int)  { ei_assert(false); return 0; }
++inline int ei_exp(int)  { ei_assert(false); return 0; }
++inline int ei_log(int)  { ei_assert(false); return 0; }
++inline int ei_sin(int)  { ei_assert(false); return 0; }
++inline int ei_cos(int)  { ei_assert(false); return 0; }
++
++#if EIGEN_GNUC_AT_LEAST(4,3)
++inline int ei_pow(int x, int y) { return std::pow(x, y); }
++#else
++inline int ei_pow(int x, int y) { return int(std::pow(double(x), y)); }
++#endif
++
++template<> inline int ei_random(int a, int b)
++{
++  // We can't just do rand()%n as only the high-order bits are really random
++  return a + static_cast<int>((b-a+1) * (rand() / (RAND_MAX + 1.0)));
++}
++template<> inline int ei_random()
++{
++  return ei_random<int>(-ei_random_amplitude<int>(), ei_random_amplitude<int>());
++}
++inline bool ei_isMuchSmallerThan(int a, int, int = precision<int>())
++{
++  return a == 0;
++}
++inline bool ei_isApprox(int a, int b, int = precision<int>())
++{
++  return a == b;
++}
++inline bool ei_isApproxOrLessThan(int a, int b, int = precision<int>())
++{
++  return a <= b;
++}
++
++template<> inline float precision<float>() { return 1e-5f; }
++inline float ei_real(float x)  { return x; }
++inline float ei_imag(float)    { return 0.f; }
++inline float ei_conj(float x)  { return x; }
++inline float ei_abs(float x)   { return std::abs(x); }
++inline float ei_abs2(float x)  { return x*x; }
++inline float ei_sqrt(float x)  { return std::sqrt(x); }
++inline float ei_exp(float x)  { return std::exp(x); }
++inline float ei_log(float x)  { return std::log(x); }
++inline float ei_sin(float x)  { return std::sin(x); }
++inline float ei_cos(float x)  { return std::cos(x); }
++inline float ei_pow(float x, float y)  { return std::pow(x, y); }
++
++template<> inline float ei_random(float a, float b)
++{
++  return a + (b-a) * std::rand() / RAND_MAX;
++}
++template<> inline float ei_random()
++{
++  return ei_random<float>(-ei_random_amplitude<float>(), ei_random_amplitude<float>());
++}
++inline bool ei_isMuchSmallerThan(float a, float b, float prec = precision<float>())
++{
++  return ei_abs(a) <= ei_abs(b) * prec;
++}
++inline bool ei_isApprox(float a, float b, float prec = precision<float>())
++{
++  return ei_abs(a - b) <= std::min(ei_abs(a), ei_abs(b)) * prec;
++}
++inline bool ei_isApproxOrLessThan(float a, float b, float prec = precision<float>())
++{
++  return a <= b || ei_isApprox(a, b, prec);
++}
++
++template<> inline double precision<double>() { return 1e-11; }
++inline double ei_real(double x)  { return x; }
++inline double ei_imag(double)    { return 0.; }
++inline double ei_conj(double x)  { return x; }
++inline double ei_abs(double x)   { return std::abs(x); }
++inline double ei_abs2(double x)  { return x*x; }
++inline double ei_sqrt(double x)  { return std::sqrt(x); }
++inline double ei_exp(double x)  { return std::exp(x); }
++inline double ei_log(double x)  { return std::log(x); }
++inline double ei_sin(double x)  { return std::sin(x); }
++inline double ei_cos(double x)  { return std::cos(x); }
++inline double ei_pow(double x, double y)  { return std::pow(x, y); }
++
++template<> inline double ei_random(double a, double b)
++{
++  return a + (b-a) * std::rand() / RAND_MAX;
++}
++template<> inline double ei_random()
++{
++  return ei_random<double>(-ei_random_amplitude<double>(), ei_random_amplitude<double>());
++}
++inline bool ei_isMuchSmallerThan(double a, double b, double prec = precision<double>())
++{
++  return ei_abs(a) <= ei_abs(b) * prec;
++}
++inline bool ei_isApprox(double a, double b, double prec = precision<double>())
++{
++  return ei_abs(a - b) <= std::min(ei_abs(a), ei_abs(b)) * prec;
++}
++inline bool ei_isApproxOrLessThan(double a, double b, double prec = precision<double>())
++{
++  return a <= b || ei_isApprox(a, b, prec);
++}
++
++template<> inline float precision<std::complex<float> >() { return precision<float>(); }
++inline float ei_real(const std::complex<float>& x) { return std::real(x); }
++inline float ei_imag(const std::complex<float>& x) { return std::imag(x); }
++inline std::complex<float> ei_conj(const std::complex<float>& x) { return std::conj(x); }
++inline float ei_abs(const std::complex<float>& x) { return std::abs(x); }
++inline float ei_abs2(const std::complex<float>& x) { return std::norm(x); }
++inline std::complex<float> ei_exp(std::complex<float> x)  { return std::exp(x); }
++inline std::complex<float> ei_sin(std::complex<float> x)  { return std::sin(x); }
++inline std::complex<float> ei_cos(std::complex<float> x)  { return std::cos(x); }
++
++template<> inline std::complex<float> ei_random()
++{
++  return std::complex<float>(ei_random<float>(), ei_random<float>());
++}
++inline bool ei_isMuchSmallerThan(const std::complex<float>& a, const std::complex<float>& b, float prec = precision<float>())
++{
++  return ei_abs2(a) <= ei_abs2(b) * prec * prec;
++}
++inline bool ei_isMuchSmallerThan(const std::complex<float>& a, float b, float prec = precision<float>())
++{
++  return ei_abs2(a) <= ei_abs2(b) * prec * prec;
++}
++inline bool ei_isApprox(const std::complex<float>& a, const std::complex<float>& b, float prec = precision<float>())
++{
++  return ei_isApprox(ei_real(a), ei_real(b), prec)
++      && ei_isApprox(ei_imag(a), ei_imag(b), prec);
++}
++// ei_isApproxOrLessThan wouldn't make sense for complex numbers
++
++template<> inline double precision<std::complex<double> >() { return precision<double>(); }
++inline double ei_real(const std::complex<double>& x) { return std::real(x); }
++inline double ei_imag(const std::complex<double>& x) { return std::imag(x); }
++inline std::complex<double> ei_conj(const std::complex<double>& x) { return std::conj(x); }
++inline double ei_abs(const std::complex<double>& x) { return std::abs(x); }
++inline double ei_abs2(const std::complex<double>& x) { return std::norm(x); }
++inline std::complex<double> ei_exp(std::complex<double> x)  { return std::exp(x); }
++inline std::complex<double> ei_sin(std::complex<double> x)  { return std::sin(x); }
++inline std::complex<double> ei_cos(std::complex<double> x)  { return std::cos(x); }
++
++template<> inline std::complex<double> ei_random()
++{
++  return std::complex<double>(ei_random<double>(), ei_random<double>());
++}
++inline bool ei_isMuchSmallerThan(const std::complex<double>& a, const std::complex<double>& b, double prec = precision<double>())
++{
++  return ei_abs2(a) <= ei_abs2(b) * prec * prec;
++}
++inline bool ei_isMuchSmallerThan(const std::complex<double>& a, double b, double prec = precision<double>())
++{
++  return ei_abs2(a) <= ei_abs2(b) * prec * prec;
++}
++inline bool ei_isApprox(const std::complex<double>& a, const std::complex<double>& b, double prec = precision<double>())
++{
++  return ei_isApprox(ei_real(a), ei_real(b), prec)
++      && ei_isApprox(ei_imag(a), ei_imag(b), prec);
++}
++// ei_isApproxOrLessThan wouldn't make sense for complex numbers
++
++#endif // EIGEN_MATHFUNCTIONS_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/MatrixBase.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/MatrixBase.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/MatrixBase.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/MatrixBase.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,577 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_MATRIXBASE_H
++#define EIGEN_MATRIXBASE_H
++
++/** \class MatrixBase
++  *
++  * \brief Base class for all matrices, vectors, and expressions
++  *
++  * This class is the base that is inherited by all matrix, vector, and expression
++  * types. Most of the Eigen API is contained in this class. Other important classes for
++  * the Eigen API are Matrix, Cwise, and PartialRedux.
++  *
++  * Note that some methods are defined in the \ref Array module.
++  *
++  * \param Derived is the derived type, e.g. a matrix type, or an expression, etc.
++  *
++  * When writing a function taking Eigen objects as argument, if you want your function
++  * to take as argument any matrix, vector, or expression, just let it take a
++  * MatrixBase argument. As an example, here is a function printFirstRow which, given
++  * a matrix, vector, or expression \a x, prints the first row of \a x.
++  *
++  * \code
++    template<typename Derived>
++    void printFirstRow(const Eigen::MatrixBase<Derived>& x)
++    {
++      cout << x.row(0) << endl;
++    }
++  * \endcode
++  *
++  */
++template<typename Derived> class MatrixBase
++{
++    struct CommaInitializer;
++
++  public:
++
++    class InnerIterator;
++
++    typedef typename ei_traits<Derived>::Scalar Scalar;
++    typedef typename ei_packet_traits<Scalar>::type PacketScalar;
++
++    enum {
++
++      RowsAtCompileTime = ei_traits<Derived>::RowsAtCompileTime,
++        /**< The number of rows at compile-time. This is just a copy of the value provided
++          * by the \a Derived type. If a value is not known at compile-time,
++          * it is set to the \a Dynamic constant.
++          * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
++
++      ColsAtCompileTime = ei_traits<Derived>::ColsAtCompileTime,
++        /**< The number of columns at compile-time. This is just a copy of the value provided
++          * by the \a Derived type. If a value is not known at compile-time,
++          * it is set to the \a Dynamic constant.
++          * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
++
++
++      SizeAtCompileTime = (ei_size_at_compile_time<ei_traits<Derived>::RowsAtCompileTime,
++                                                   ei_traits<Derived>::ColsAtCompileTime>::ret),
++        /**< This is equal to the number of coefficients, i.e. the number of
++          * rows times the number of columns, or to \a Dynamic if this is not
++          * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
++
++      MaxRowsAtCompileTime = ei_traits<Derived>::MaxRowsAtCompileTime,
++        /**< This value is equal to the maximum possible number of rows that this expression
++          * might have. If this expression might have an arbitrarily high number of rows,
++          * this value is set to \a Dynamic.
++          *
++          * This value is useful to know when evaluating an expression, in order to determine
++          * whether it is possible to avoid doing a dynamic memory allocation.
++          *
++          * \sa RowsAtCompileTime, MaxColsAtCompileTime, MaxSizeAtCompileTime
++          */
++
++      MaxColsAtCompileTime = ei_traits<Derived>::MaxColsAtCompileTime,
++        /**< This value is equal to the maximum possible number of columns that this expression
++          * might have. If this expression might have an arbitrarily high number of columns,
++          * this value is set to \a Dynamic.
++          *
++          * This value is useful to know when evaluating an expression, in order to determine
++          * whether it is possible to avoid doing a dynamic memory allocation.
++          *
++          * \sa ColsAtCompileTime, MaxRowsAtCompileTime, MaxSizeAtCompileTime
++          */
++
++      MaxSizeAtCompileTime = (ei_size_at_compile_time<ei_traits<Derived>::MaxRowsAtCompileTime,
++                                                      ei_traits<Derived>::MaxColsAtCompileTime>::ret),
++        /**< This value is equal to the maximum possible number of coefficients that this expression
++          * might have. If this expression might have an arbitrarily high number of coefficients,
++          * this value is set to \a Dynamic.
++          *
++          * This value is useful to know when evaluating an expression, in order to determine
++          * whether it is possible to avoid doing a dynamic memory allocation.
++          *
++          * \sa SizeAtCompileTime, MaxRowsAtCompileTime, MaxColsAtCompileTime
++          */
++
++      IsVectorAtCompileTime = ei_traits<Derived>::RowsAtCompileTime == 1
++                           || ei_traits<Derived>::ColsAtCompileTime == 1,
++        /**< This is set to true if either the number of rows or the number of
++          * columns is known at compile-time to be equal to 1. Indeed, in that case,
++          * we are dealing with a column-vector (if there is only one column) or with
++          * a row-vector (if there is only one row). */
++
++      Flags = ei_traits<Derived>::Flags,
++        /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
++          * constructed from this one. See the \ref flags "list of flags".
++          */
++
++      CoeffReadCost = ei_traits<Derived>::CoeffReadCost
++        /**< This is a rough measure of how expensive it is to read one coefficient from
++          * this expression.
++          */
++    };
++
++    /** Default constructor. Just checks at compile-time for self-consistency of the flags. */
++    MatrixBase()
++    {
++      ei_assert(ei_are_flags_consistent<Flags>::ret);
++    }
++
++    /** This is the "real scalar" type; if the \a Scalar type is already real numbers
++      * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
++      * \a Scalar is \a std::complex<T> then RealScalar is \a T.
++      *
++      * \sa class NumTraits
++      */
++    typedef typename NumTraits<Scalar>::Real RealScalar;
++
++    /** type of the equivalent square matrix */
++    typedef Matrix<Scalar,EIGEN_ENUM_MAX(RowsAtCompileTime,ColsAtCompileTime),
++                          EIGEN_ENUM_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
++
++    /** \returns the number of rows. \sa cols(), RowsAtCompileTime */
++    inline int rows() const { return derived().rows(); }
++    /** \returns the number of columns. \sa row(), ColsAtCompileTime*/
++    inline int cols() const { return derived().cols(); }
++    /** \returns the number of coefficients, which is \a rows()*cols().
++      * \sa rows(), cols(), SizeAtCompileTime. */
++    inline int size() const { return rows() * cols(); }
++    /** \returns the number of nonzero coefficients which is in practice the number
++      * of stored coefficients. */
++    inline int nonZeros() const { return derived.nonZeros(); }
++    /** \returns true if either the number of rows or the number of columns is equal to 1.
++      * In other words, this function returns
++      * \code rows()==1 || cols()==1 \endcode
++      * \sa rows(), cols(), IsVectorAtCompileTime. */
++    inline bool isVector() const { return rows()==1 || cols()==1; }
++    /** \returns the size of the storage major dimension,
++      * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
++    int outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
++    /** \returns the size of the inner dimension according to the storage order,
++      * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
++    int innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
++
++    /** \internal the type to which the expression gets evaluated (needed by MSVC) */
++	typedef typename ei_eval<Derived>::type EvalType;
++    /** \internal Represents a constant matrix */
++    typedef CwiseNullaryOp<ei_scalar_constant_op<Scalar>,Derived> ConstantReturnType;
++    /** \internal Represents a scalar multiple of a matrix */
++    typedef CwiseUnaryOp<ei_scalar_multiple_op<Scalar>, Derived> ScalarMultipleReturnType;
++    /** \internal Represents a quotient of a matrix by a scalar*/
++    typedef CwiseUnaryOp<ei_scalar_quotient1_op<Scalar>, Derived> ScalarQuotient1ReturnType;
++    /** \internal the return type of MatrixBase::conjugate() */
++    typedef typename ei_meta_if<NumTraits<Scalar>::IsComplex,
++                        const CwiseUnaryOp<ei_scalar_conjugate_op<Scalar>, Derived>,
++                        const Derived&
++                     >::ret ConjugateReturnType;
++    /** \internal the return type of MatrixBase::real() */
++    typedef CwiseUnaryOp<ei_scalar_real_op<Scalar>, Derived> RealReturnType;
++    /** \internal the return type of MatrixBase::adjoint() */
++    typedef Transpose<NestByValue<typename ei_cleantype<ConjugateReturnType>::type> >
++            AdjointReturnType;
++    /** \internal the return type of MatrixBase::eigenvalues() */
++    typedef Matrix<typename NumTraits<typename ei_traits<Derived>::Scalar>::Real, ei_traits<Derived>::ColsAtCompileTime, 1> EigenvaluesReturnType;
++	/** \internal expression tyepe of a column */
++	typedef Block<Derived, ei_traits<Derived>::RowsAtCompileTime, 1> ColXpr;
++	/** \internal expression tyepe of a column */
++	typedef Block<Derived, 1, ei_traits<Derived>::ColsAtCompileTime> RowXpr;
++    /** \internal the return type of identity */
++    typedef CwiseNullaryOp<ei_scalar_identity_op<Scalar>,Derived> IdentityReturnType;
++    /** \internal the return type of unit vectors */
++    typedef Block<CwiseNullaryOp<ei_scalar_identity_op<Scalar>, SquareMatrixType>,
++                  ei_traits<Derived>::RowsAtCompileTime,
++                  ei_traits<Derived>::ColsAtCompileTime> BasisReturnType;
++
++
++    /** Copies \a other into *this. \returns a reference to *this. */
++    template<typename OtherDerived>
++    Derived& operator=(const MatrixBase<OtherDerived>& other);
++
++    /** Copies \a other into *this without evaluating other. \returns a reference to *this. */
++    template<typename OtherDerived>
++    Derived& lazyAssign(const MatrixBase<OtherDerived>& other);
++
++    /** Special case of the template operator=, in order to prevent the compiler
++      * from generating a default operator= (issue hit with g++ 4.1)
++      */
++    inline Derived& operator=(const MatrixBase& other)
++    {
++      return this->operator=<Derived>(other);
++    }
++
++    /** Overloaded for cache friendly product evaluation */
++    template<typename Lhs, typename Rhs>
++    Derived& lazyAssign(const Product<Lhs,Rhs,CacheFriendlyProduct>& product);
++
++    /** Overloaded for cache friendly product evaluation */
++    template<typename OtherDerived>
++    Derived& lazyAssign(const Flagged<OtherDerived, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other)
++    { return lazyAssign(other._expression()); }
++
++    /** Overloaded for sparse product evaluation */
++    template<typename Derived1, typename Derived2>
++    Derived& lazyAssign(const Product<Derived1,Derived2,SparseProduct>& product);
++
++    CommaInitializer operator<< (const Scalar& s);
++
++    template<typename OtherDerived>
++    CommaInitializer operator<< (const MatrixBase<OtherDerived>& other);
++
++    const Scalar coeff(int row, int col) const;
++    const Scalar operator()(int row, int col) const;
++
++    Scalar& coeffRef(int row, int col);
++    Scalar& operator()(int row, int col);
++
++    const Scalar coeff(int index) const;
++    const Scalar operator[](int index) const;
++    const Scalar operator()(int index) const;
++
++    Scalar& coeffRef(int index);
++    Scalar& operator[](int index);
++    Scalar& operator()(int index);
++
++    template<typename OtherDerived>
++    void copyCoeff(int row, int col, const MatrixBase<OtherDerived>& other);
++    template<typename OtherDerived>
++    void copyCoeff(int index, const MatrixBase<OtherDerived>& other);
++    template<typename OtherDerived, int StoreMode, int LoadMode>
++    void copyPacket(int row, int col, const MatrixBase<OtherDerived>& other);
++    template<typename OtherDerived, int StoreMode, int LoadMode>
++    void copyPacket(int index, const MatrixBase<OtherDerived>& other);
++
++    template<int LoadMode>
++    PacketScalar packet(int row, int col) const;
++    template<int StoreMode>
++    void writePacket(int row, int col, const PacketScalar& x);
++
++    template<int LoadMode>
++    PacketScalar packet(int index) const;
++    template<int StoreMode>
++    void writePacket(int index, const PacketScalar& x);
++
++    const Scalar x() const;
++    const Scalar y() const;
++    const Scalar z() const;
++    const Scalar w() const;
++    Scalar& x();
++    Scalar& y();
++    Scalar& z();
++    Scalar& w();
++
++
++    const CwiseUnaryOp<ei_scalar_opposite_op<typename ei_traits<Derived>::Scalar>,Derived> operator-() const;
++
++    template<typename OtherDerived>
++    const CwiseBinaryOp<ei_scalar_sum_op<typename ei_traits<Derived>::Scalar>, Derived, OtherDerived>
++    operator+(const MatrixBase<OtherDerived> &other) const;
++
++    template<typename OtherDerived>
++    const CwiseBinaryOp<ei_scalar_difference_op<typename ei_traits<Derived>::Scalar>, Derived, OtherDerived>
++    operator-(const MatrixBase<OtherDerived> &other) const;
++
++    template<typename OtherDerived>
++    Derived& operator+=(const MatrixBase<OtherDerived>& other);
++    template<typename OtherDerived>
++    Derived& operator-=(const MatrixBase<OtherDerived>& other);
++
++    template<typename Lhs,typename Rhs>
++    Derived& operator+=(const Flagged<Product<Lhs,Rhs,CacheFriendlyProduct>, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other);
++
++    Derived& operator*=(const Scalar& other);
++    Derived& operator/=(const Scalar& other);
++
++    const ScalarMultipleReturnType operator*(const Scalar& scalar) const;
++    const CwiseUnaryOp<ei_scalar_quotient1_op<typename ei_traits<Derived>::Scalar>, Derived>
++    operator/(const Scalar& scalar) const;
++
++    inline friend const CwiseUnaryOp<ei_scalar_multiple_op<typename ei_traits<Derived>::Scalar>, Derived>
++    operator*(const Scalar& scalar, const MatrixBase& matrix)
++    { return matrix*scalar; }
++
++
++    template<typename OtherDerived>
++    const typename ProductReturnType<Derived,OtherDerived>::Type
++    operator*(const MatrixBase<OtherDerived> &other) const;
++
++    template<typename OtherDerived>
++    Derived& operator*=(const MatrixBase<OtherDerived>& other);
++
++    template<typename OtherDerived>
++    typename OtherDerived::Eval solveTriangular(const MatrixBase<OtherDerived>& other) const;
++
++    template<typename OtherDerived>
++    void solveTriangularInPlace(MatrixBase<OtherDerived>& other) const;
++
++
++    template<typename OtherDerived>
++    Scalar dot(const MatrixBase<OtherDerived>& other) const;
++    RealScalar norm2() const;
++    RealScalar norm()  const;
++    const EvalType normalized() const;
++    void normalize();
++
++    Transpose<Derived> transpose();
++    const Transpose<Derived> transpose() const;
++    const AdjointReturnType adjoint() const;
++
++
++    RowXpr row(int i);
++    const RowXpr row(int i) const;
++
++    ColXpr col(int i);
++    const ColXpr col(int i) const;
++
++    Minor<Derived> minor(int row, int col);
++    const Minor<Derived> minor(int row, int col) const;
++
++    typename BlockReturnType<Derived>::Type block(int startRow, int startCol, int blockRows, int blockCols);
++    const typename BlockReturnType<Derived>::Type
++    block(int startRow, int startCol, int blockRows, int blockCols) const;
++
++    typename BlockReturnType<Derived>::SubVectorType block(int start, int size);
++    const typename BlockReturnType<Derived>::SubVectorType block(int start, int size) const;
++
++    typename BlockReturnType<Derived,Dynamic>::SubVectorType start(int size);
++    const typename BlockReturnType<Derived,Dynamic>::SubVectorType start(int size) const;
++
++    typename BlockReturnType<Derived,Dynamic>::SubVectorType end(int size);
++    const typename BlockReturnType<Derived,Dynamic>::SubVectorType end(int size) const;
++
++    typename BlockReturnType<Derived>::Type corner(CornerType type, int cRows, int cCols);
++    const typename BlockReturnType<Derived>::Type corner(CornerType type, int cRows, int cCols) const;
++
++    template<int BlockRows, int BlockCols>
++    typename BlockReturnType<Derived, BlockRows, BlockCols>::Type block(int startRow, int startCol);
++    template<int BlockRows, int BlockCols>
++    const typename BlockReturnType<Derived, BlockRows, BlockCols>::Type block(int startRow, int startCol) const;
++
++    template<int CRows, int CCols>
++    typename BlockReturnType<Derived, CRows, CCols>::Type corner(CornerType type);
++    template<int CRows, int CCols>
++    const typename BlockReturnType<Derived, CRows, CCols>::Type corner(CornerType type) const;
++
++    template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType start(void);
++    template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType start() const;
++
++    template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType end();
++    template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType end() const;
++
++    DiagonalCoeffs<Derived> diagonal();
++    const DiagonalCoeffs<Derived> diagonal() const;
++
++    template<unsigned int Mode> Part<Derived, Mode> part();
++    template<unsigned int Mode> const Part<Derived, Mode> part() const;
++
++
++    static const ConstantReturnType
++    Constant(int rows, int cols, const Scalar& value);
++    static const ConstantReturnType
++    Constant(int size, const Scalar& value);
++    static const ConstantReturnType
++    Constant(const Scalar& value);
++
++    template<typename CustomNullaryOp>
++    static const CwiseNullaryOp<CustomNullaryOp, Derived>
++    NullaryExpr(int rows, int cols, const CustomNullaryOp& func);
++    template<typename CustomNullaryOp>
++    static const CwiseNullaryOp<CustomNullaryOp, Derived>
++    NullaryExpr(int size, const CustomNullaryOp& func);
++    template<typename CustomNullaryOp>
++    static const CwiseNullaryOp<CustomNullaryOp, Derived>
++    NullaryExpr(const CustomNullaryOp& func);
++
++    static const ConstantReturnType Zero(int rows, int cols);
++    static const ConstantReturnType Zero(int size);
++    static const ConstantReturnType Zero();
++    static const ConstantReturnType Ones(int rows, int cols);
++    static const ConstantReturnType Ones(int size);
++    static const ConstantReturnType Ones();
++    static const IdentityReturnType Identity();
++    static const IdentityReturnType Identity(int rows, int cols);
++    static const BasisReturnType Unit(int size, int i);
++    static const BasisReturnType Unit(int i);
++    static const BasisReturnType UnitX();
++    static const BasisReturnType UnitY();
++    static const BasisReturnType UnitZ();
++    static const BasisReturnType UnitW();
++
++    const DiagonalMatrix<Derived> asDiagonal() const;
++
++    Derived& setConstant(const Scalar& value);
++    Derived& setZero();
++    Derived& setOnes();
++    Derived& setRandom();
++    Derived& setIdentity();
++
++
++    template<typename OtherDerived>
++    bool isApprox(const MatrixBase<OtherDerived>& other,
++                  RealScalar prec = precision<Scalar>()) const;
++    bool isMuchSmallerThan(const RealScalar& other,
++                           RealScalar prec = precision<Scalar>()) const;
++    template<typename OtherDerived>
++    bool isMuchSmallerThan(const MatrixBase<OtherDerived>& other,
++                           RealScalar prec = precision<Scalar>()) const;
++
++    bool isApproxToConstant(const Scalar& value, RealScalar prec = precision<Scalar>()) const;
++    bool isZero(RealScalar prec = precision<Scalar>()) const;
++    bool isOnes(RealScalar prec = precision<Scalar>()) const;
++    bool isIdentity(RealScalar prec = precision<Scalar>()) const;
++    bool isDiagonal(RealScalar prec = precision<Scalar>()) const;
++
++    bool isUpper(RealScalar prec = precision<Scalar>()) const;
++    bool isLower(RealScalar prec = precision<Scalar>()) const;
++
++    template<typename OtherDerived>
++    bool isOrthogonal(const MatrixBase<OtherDerived>& other,
++                      RealScalar prec = precision<Scalar>()) const;
++    bool isUnitary(RealScalar prec = precision<Scalar>()) const;
++
++    template<typename OtherDerived>
++    inline bool operator==(const MatrixBase<OtherDerived>& other) const
++    { return (cwise() == other).all(); }
++
++    template<typename OtherDerived>
++    inline bool operator!=(const MatrixBase<OtherDerived>& other) const
++    { return (cwise() != other).any(); }
++
++
++    template<typename NewType>
++    const CwiseUnaryOp<ei_scalar_cast_op<typename ei_traits<Derived>::Scalar, NewType>, Derived> cast() const;
++
++    /** \returns the matrix or vector obtained by evaluating this expression.
++      *
++      */
++    EIGEN_ALWAYS_INLINE const typename ei_eval<Derived>::type eval() const
++    {
++      return typename ei_eval<Derived>::type(derived());
++    }
++
++    template<typename OtherDerived>
++    void swap(const MatrixBase<OtherDerived>& other);
++
++    template<unsigned int Added>
++    const Flagged<Derived, Added, 0> marked() const;
++    const Flagged<Derived, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit> lazy() const;
++
++    /** \returns number of elements to skip to pass from one row (resp. column) to another
++      * for a row-major (resp. column-major) matrix.
++      * Combined with coeffRef() and the \ref flags flags, it allows a direct access to the data
++      * of the underlying matrix.
++      */
++    inline int stride(void) const { return derived().stride(); }
++
++    inline const NestByValue<Derived> nestByValue() const;
++
++
++    ConjugateReturnType conjugate() const;
++    const RealReturnType real() const;
++
++    template<typename CustomUnaryOp>
++    const CwiseUnaryOp<CustomUnaryOp, Derived> unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const;
++
++    template<typename CustomBinaryOp, typename OtherDerived>
++    const CwiseBinaryOp<CustomBinaryOp, Derived, OtherDerived>
++    binaryExpr(const MatrixBase<OtherDerived> &other, const CustomBinaryOp& func = CustomBinaryOp()) const;
++
++
++    Scalar sum() const;
++    Scalar trace() const;
++
++    typename ei_traits<Derived>::Scalar minCoeff() const;
++    typename ei_traits<Derived>::Scalar maxCoeff() const;
++
++    typename ei_traits<Derived>::Scalar minCoeff(int* row, int* col = 0) const;
++    typename ei_traits<Derived>::Scalar maxCoeff(int* row, int* col = 0) const;
++
++    template<typename BinaryOp>
++    typename ei_result_of<BinaryOp(typename ei_traits<Derived>::Scalar)>::type
++    redux(const BinaryOp& func) const;
++
++    template<typename Visitor>
++    void visit(Visitor& func) const;
++
++
++    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
++    inline Derived& derived() { return *static_cast<Derived*>(this); }
++    inline Derived& const_cast_derived() const
++    { return *static_cast<Derived*>(const_cast<MatrixBase*>(this)); }
++
++    const Cwise<Derived> cwise() const;
++    Cwise<Derived> cwise();
++
++/////////// Array module ///////////
++
++    bool all(void) const;
++    bool any(void) const;
++
++    const PartialRedux<Derived,Horizontal> rowwise() const;
++    const PartialRedux<Derived,Vertical> colwise() const;
++
++    static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(int rows, int cols);
++    static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random(int size);
++    static const CwiseNullaryOp<ei_scalar_random_op<Scalar>,Derived> Random();
++
++
++/////////// LU module ///////////
++
++    const LU<EvalType> lu() const;
++    const EvalType inverse() const;
++    void computeInverse(EvalType *result) const;
++    Scalar determinant() const;
++
++/////////// Cholesky module ///////////
++
++    const Cholesky<EvalType> cholesky() const;
++    const CholeskyWithoutSquareRoot<EvalType> choleskyNoSqrt() const;
++
++/////////// QR module ///////////
++
++    const QR<EvalType> qr() const;
++
++    EigenvaluesReturnType eigenvalues() const;
++    RealScalar operatorNorm() const;
++
++/////////// SVD module ///////////
++
++    const SVD<EvalType> svd() const;
++
++/////////// Geometry module ///////////
++
++    template<typename OtherDerived>
++    EvalType cross(const MatrixBase<OtherDerived>& other) const;
++    EvalType someOrthogonal(void) const;
++    
++    /**
++     */
++    #ifdef EIGEN_MATRIXBASE_PLUGIN
++    #include EIGEN_MATRIXBASE_PLUGIN
++    #endif
++};
++
++#endif // EIGEN_MATRIXBASE_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Matrix.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Matrix.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Matrix.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Matrix.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,448 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_MATRIX_H
++#define EIGEN_MATRIX_H
++
++
++/** \class Matrix
++  *
++  * \brief The matrix class, also used for vectors and row-vectors
++  *
++  * \param _Scalar the scalar type, i.e. the type of the coefficients
++  * \param _Rows the number of rows at compile-time. Use the special value \a Dynamic to
++  *              specify that the number of rows is dynamic, i.e. is not fixed at compile-time.
++  * \param _Cols the number of columns at compile-time. Use the special value \a Dynamic to
++  *              specify that the number of columns is dynamic, i.e. is not fixed at compile-time.
++  * \param _MaxRows the maximum number of rows at compile-time. By default this is equal to \a _Rows.
++  *              The most common exception is when you don't know the exact number of rows, but know that
++  *              it is smaller than some given value. Then you can set \a _MaxRows to that value, and set
++  *              _Rows to \a Dynamic.
++  * \param _MaxCols the maximum number of cols at compile-time. By default this is equal to \a _Cols.
++  *              The most common exception is when you don't know the exact number of cols, but know that
++  *              it is smaller than some given value. Then you can set \a _MaxCols to that value, and set
++  *              _Cols to \a Dynamic.
++  * \param _Flags allows to control certain features such as storage order. See the \ref flags "list of flags".
++  *
++  * This single class template covers all kinds of matrix and vectors that Eigen can handle.
++  * All matrix and vector types are just typedefs to specializations of this class template.
++  *
++  * These typedefs are as follows:
++  * \li \c %Matrix\#\#Size\#\#Type for square matrices
++  * \li \c Vector\#\#Size\#\#Type for vectors (matrices with one column)
++  * \li \c RowVector\#\#Size\#\#Type for row-vectors (matrices with one row)
++  *
++  * where \c Size can be
++  * \li \c 2 for fixed size 2
++  * \li \c 3 for fixed size 3
++  * \li \c 4 for fixed size 4
++  * \li \c X for dynamic size
++  *
++  * and \c Type can be
++  * \li \c i for type \c int
++  * \li \c f for type \c float
++  * \li \c d for type \c double
++  * \li \c cf for type \c std::complex<float>
++  * \li \c cd for type \c std::complex<double>
++  *
++  * Examples:
++  * \li \c Matrix2d is a typedef for \c Matrix<double,2,2>
++  * \li \c VectorXf is a typedef for \c Matrix<float,Dynamic,1>
++  * \li \c RowVector3i is a typedef for \c Matrix<int,1,3>
++  *
++  * See \ref matrixtypedefs for an explicit list of all matrix typedefs.
++  *
++  * Of course these typedefs do not exhaust all the possibilities offered by the Matrix class
++  * template, they only address some of the most common cases. For instance, if you want a
++  * fixed-size matrix with 3 rows and 5 columns, there is no typedef for that, so you should use
++  * \c Matrix<double,3,5>.
++  *
++  * Note that most of the API is in the base class MatrixBase.
++  */
++template<typename _Scalar, int _Rows, int _Cols, int _MaxRows, int _MaxCols, unsigned int _Flags>
++struct ei_traits<Matrix<_Scalar, _Rows, _Cols, _MaxRows, _MaxCols, _Flags> >
++{
++  typedef _Scalar Scalar;
++  enum {
++    RowsAtCompileTime = _Rows,
++    ColsAtCompileTime = _Cols,
++    MaxRowsAtCompileTime = _MaxRows,
++    MaxColsAtCompileTime = _MaxCols,
++    Flags = ei_corrected_matrix_flags<
++                _Scalar,
++                _Rows, _Cols, _MaxRows, _MaxCols,
++                _Flags
++            >::ret,
++    CoeffReadCost = NumTraits<Scalar>::ReadCost,
++    SupportedAccessPatterns = RandomAccessPattern
++  };
++};
++
++template<typename _Scalar, int _Rows, int _Cols, int _MaxRows, int _MaxCols, unsigned int _Flags>
++class Matrix : public MatrixBase<Matrix<_Scalar, _Rows, _Cols, _MaxRows, _MaxCols, _Flags> >
++{
++  public:
++    EIGEN_GENERIC_PUBLIC_INTERFACE(Matrix)
++    friend class Eigen::Map<Matrix, Unaligned>;
++    friend class Eigen::Map<Matrix, Aligned>;
++
++  protected:
++    ei_matrix_storage<Scalar, MaxSizeAtCompileTime, RowsAtCompileTime, ColsAtCompileTime> m_storage;
++
++  public:
++
++    inline int rows() const { return m_storage.rows(); }
++    inline int cols() const { return m_storage.cols(); }
++
++    inline int stride(void) const
++    {
++      if(Flags & RowMajorBit)
++        return m_storage.cols();
++      else
++        return m_storage.rows();
++    }
++
++    inline const Scalar& coeff(int row, int col) const
++    {
++      if(Flags & RowMajorBit)
++        return m_storage.data()[col + row * m_storage.cols()];
++      else // column-major
++        return m_storage.data()[row + col * m_storage.rows()];
++    }
++
++    inline const Scalar& coeff(int index) const
++    {
++      return m_storage.data()[index];
++    }
++
++    inline Scalar& coeffRef(int row, int col)
++    {
++      if(Flags & RowMajorBit)
++        return m_storage.data()[col + row * m_storage.cols()];
++      else // column-major
++        return m_storage.data()[row + col * m_storage.rows()];
++    }
++
++    inline Scalar& coeffRef(int index)
++    {
++      return m_storage.data()[index];
++    }
++
++    template<int LoadMode>
++    inline PacketScalar packet(int row, int col) const
++    {
++      return ei_ploadt<Scalar, LoadMode>
++               (m_storage.data() + (Flags & RowMajorBit
++                                   ? col + row * m_storage.cols()
++                                   : row + col * m_storage.rows()));
++    }
++
++    template<int LoadMode>
++    inline PacketScalar packet(int index) const
++    {
++      return ei_ploadt<Scalar, LoadMode>(m_storage.data() + index);
++    }
++
++    template<int StoreMode>
++    inline void writePacket(int row, int col, const PacketScalar& x)
++    {
++      ei_pstoret<Scalar, PacketScalar, StoreMode>
++              (m_storage.data() + (Flags & RowMajorBit
++                                   ? col + row * m_storage.cols()
++                                   : row + col * m_storage.rows()), x);
++    }
++
++    template<int StoreMode>
++    inline void writePacket(int index, const PacketScalar& x)
++    {
++      ei_pstoret<Scalar, PacketScalar, StoreMode>(m_storage.data() + index, x);
++    }
++
++    /** \returns a const pointer to the data array of this matrix */
++    inline const Scalar *data() const
++    { return m_storage.data(); }
++
++    /** \returns a pointer to the data array of this matrix */
++    inline Scalar *data()
++    { return m_storage.data(); }
++
++    inline void resize(int rows, int cols)
++    {
++      ei_assert(rows > 0
++          && (MaxRowsAtCompileTime == Dynamic || MaxRowsAtCompileTime >= rows)
++          && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == rows)
++          && cols > 0
++          && (MaxColsAtCompileTime == Dynamic || MaxColsAtCompileTime >= cols)
++          && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == cols));
++      m_storage.resize(rows * cols, rows, cols);
++    }
++
++    inline void resize(int size)
++    {
++      EIGEN_STATIC_ASSERT_VECTOR_ONLY(Matrix)
++      if(RowsAtCompileTime == 1)
++        m_storage.resize(size, 1, size);
++      else
++        m_storage.resize(size, size, 1);
++    }
++
++    /** Copies the value of the expression \a other into *this.
++      *
++      * *this is resized (if possible) to match the dimensions of \a other.
++      *
++      * As a special exception, copying a row-vector into a vector (and conversely)
++      * is allowed. The resizing, if any, is then done in the appropriate way so that
++      * row-vectors remain row-vectors and vectors remain vectors.
++      */
++    template<typename OtherDerived>
++    inline Matrix& operator=(const MatrixBase<OtherDerived>& other)
++    {
++      if(RowsAtCompileTime == 1)
++      {
++        ei_assert(other.isVector());
++        resize(1, other.size());
++      }
++      else if(ColsAtCompileTime == 1)
++      {
++        ei_assert(other.isVector());
++        resize(other.size(), 1);
++      }
++      else resize(other.rows(), other.cols());
++      return Base::operator=(other.derived());
++    }
++
++    /** This is a special case of the templated operator=. Its purpose is to
++      * prevent a default operator= from hiding the templated operator=.
++      */
++    inline Matrix& operator=(const Matrix& other)
++    {
++      return operator=<Matrix>(other);
++    }
++
++    EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Matrix, +=)
++    EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Matrix, -=)
++    EIGEN_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Matrix, *=)
++    EIGEN_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Matrix, /=)
++
++    /** Default constructor.
++      *
++      * For fixed-size matrices, does nothing.
++      *
++      * For dynamic-size matrices, initializes with initial size 1x1, which is inefficient, hence
++      * when performance matters one should avoid using this constructor on dynamic-size matrices.
++      */
++    inline explicit Matrix() : m_storage(1, 1, 1)
++    {
++      ei_assert(RowsAtCompileTime > 0 && ColsAtCompileTime > 0);
++    }
++
++    /** Constructs a vector or row-vector with given dimension. \only_for_vectors
++      *
++      * Note that this is only useful for dynamic-size vectors. For fixed-size vectors,
++      * it is redundant to pass the dimension here, so it makes more sense to use the default
++      * constructor Matrix() instead.
++      */
++    inline explicit Matrix(int dim)
++      : m_storage(dim, RowsAtCompileTime == 1 ? 1 : dim, ColsAtCompileTime == 1 ? 1 : dim)
++    {
++      EIGEN_STATIC_ASSERT_VECTOR_ONLY(Matrix)
++      ei_assert(dim > 0);
++      ei_assert(SizeAtCompileTime == Dynamic || SizeAtCompileTime == dim);
++    }
++
++    /** This constructor has two very different behaviors, depending on the type of *this.
++      *
++      * \li When Matrix is a fixed-size vector type of size 2, this constructor constructs
++      *     an initialized vector. The parameters \a x, \a y are copied into the first and second
++      *     coords of the vector respectively.
++      * \li Otherwise, this constructor constructs an uninitialized matrix with \a x rows and
++      *     \a y columns. This is useful for dynamic-size matrices. For fixed-size matrices,
++      *     it is redundant to pass these parameters, so one should use the default constructor
++      *     Matrix() instead.
++      */
++    inline Matrix(int x, int y) : m_storage(x*y, x, y)
++    {
++      if((RowsAtCompileTime == 1 && ColsAtCompileTime == 2)
++      || (RowsAtCompileTime == 2 && ColsAtCompileTime == 1))
++      {
++        m_storage.data()[0] = Scalar(x);
++        m_storage.data()[1] = Scalar(y);
++      }
++      else
++      {
++        ei_assert(x > 0 && (RowsAtCompileTime == Dynamic || RowsAtCompileTime == x)
++               && y > 0 && (ColsAtCompileTime == Dynamic || ColsAtCompileTime == y));
++      }
++    }
++    /** constructs an initialized 2D vector with given coefficients */
++    inline Matrix(const float& x, const float& y)
++    {
++      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 2);
++      m_storage.data()[0] = x;
++      m_storage.data()[1] = y;
++    }
++    /** constructs an initialized 2D vector with given coefficients */
++    inline Matrix(const double& x, const double& y)
++    {
++      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 2);
++      m_storage.data()[0] = x;
++      m_storage.data()[1] = y;
++    }
++    /** constructs an initialized 3D vector with given coefficients */
++    inline Matrix(const Scalar& x, const Scalar& y, const Scalar& z)
++    {
++      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 3);
++      m_storage.data()[0] = x;
++      m_storage.data()[1] = y;
++      m_storage.data()[2] = z;
++    }
++    /** constructs an initialized 4D vector with given coefficients */
++    inline Matrix(const Scalar& x, const Scalar& y, const Scalar& z, const Scalar& w)
++    {
++      EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Matrix, 4);
++      m_storage.data()[0] = x;
++      m_storage.data()[1] = y;
++      m_storage.data()[2] = z;
++      m_storage.data()[3] = w;
++    }
++
++    explicit Matrix(const Scalar *data);
++
++    /** Constructor copying the value of the expression \a other */
++    template<typename OtherDerived>
++    inline Matrix(const MatrixBase<OtherDerived>& other)
++             : m_storage(other.rows() * other.cols(), other.rows(), other.cols())
++    {
++      Base::lazyAssign(other.derived());
++    }
++    /** Copy constructor */
++    inline Matrix(const Matrix& other)
++            : Base(), m_storage(other.rows() * other.cols(), other.rows(), other.cols())
++    {
++      Base::lazyAssign(other);
++    }
++    /** Destructor */
++    inline ~Matrix() {}
++
++    /** Override MatrixBase::eval() since matrices don't need to be evaluated, it is enough to just read them.
++      * This prevents a useless copy when doing e.g. "m1 = m2.eval()"
++      */
++    const Matrix& eval() const
++    {
++      return *this;
++    }
++
++    /** Override MatrixBase::swap() since for dynamic-sized matrices of same type it is enough to swap the
++      * data pointers.
++      */
++    void swap(Matrix& other)
++    {
++      if (Base::SizeAtCompileTime==Dynamic)
++        m_storage.swap(other.m_storage);
++      else
++        this->Base::swap(other);
++    }
++
++/////////// Geometry module ///////////
++
++    explicit Matrix(const Quaternion<Scalar>& q);
++    Matrix& operator=(const Quaternion<Scalar>& q);
++    explicit Matrix(const AngleAxis<Scalar>& aa);
++    Matrix& operator=(const AngleAxis<Scalar>& aa);
++};
++
++/** \defgroup matrixtypedefs Global matrix typedefs
++  *
++  * Eigen defines several typedef shortcuts for most common matrix and vector types.
++  *
++  * The general patterns are the following:
++  *
++  * \c MatrixSizeType where \c Size can be \c 2,\c 3,\c 4 for fixed size square matrices or \c X for dynamic size,
++  * and where \c Type can be \c i for integer, \c f for float, \c d for double, \c cf for complex float, \c cd
++  * for complex double.
++  *
++  * For example, \c Matrix3d is a fixed-size 3x3 matrix type of doubles, and \c MatrixXf is a dynamic-size matrix of floats.
++  *
++  * There are also \c VectorSizeType and \c RowVectorSizeType which are self-explanatory. For example, \c Vector4cf is
++  * a fixed-size vector of 4 complex floats.
++  *
++  * \sa class Matrix
++  */
++
++#define EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Size, SizeSuffix)   \
++/** \ingroup matrixtypedefs */                                    \
++typedef Matrix<Type, Size, Size> Matrix##SizeSuffix##TypeSuffix;  \
++/** \ingroup matrixtypedefs */                                    \
++typedef Matrix<Type, Size, 1>    Vector##SizeSuffix##TypeSuffix;  \
++/** \ingroup matrixtypedefs */                                    \
++typedef Matrix<Type, 1, Size>    RowVector##SizeSuffix##TypeSuffix;
++
++#define EIGEN_MAKE_TYPEDEFS_ALL_SIZES(Type, TypeSuffix) \
++EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 2, 2) \
++EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 3, 3) \
++EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, 4, 4) \
++EIGEN_MAKE_TYPEDEFS(Type, TypeSuffix, Dynamic, X)
++
++EIGEN_MAKE_TYPEDEFS_ALL_SIZES(int,                  i)
++EIGEN_MAKE_TYPEDEFS_ALL_SIZES(float,                f)
++EIGEN_MAKE_TYPEDEFS_ALL_SIZES(double,               d)
++EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<float>,  cf)
++EIGEN_MAKE_TYPEDEFS_ALL_SIZES(std::complex<double>, cd)
++
++#undef EIGEN_MAKE_TYPEDEFS_ALL_SIZES
++#undef EIGEN_MAKE_TYPEDEFS
++
++#define EIGEN_MAKE_TYPEDEFS_LARGE(Type, TypeSuffix) \
++typedef Matrix<Type, Dynamic, Dynamic, EIGEN_DEFAULT_MATRIX_FLAGS | LargeBit> MatrixXL##TypeSuffix; \
++typedef Matrix<Type, Dynamic, 1, EIGEN_DEFAULT_MATRIX_FLAGS | LargeBit>       VectorXL##TypeSuffix; \
++typedef Matrix<Type, 1, Dynamic, EIGEN_DEFAULT_MATRIX_FLAGS | LargeBit>       RowVectorXL##TypeSuffix;
++
++EIGEN_MAKE_TYPEDEFS_LARGE(int,                  i)
++EIGEN_MAKE_TYPEDEFS_LARGE(float,                f)
++EIGEN_MAKE_TYPEDEFS_LARGE(double,               d)
++EIGEN_MAKE_TYPEDEFS_LARGE(std::complex<float>,  cf)
++EIGEN_MAKE_TYPEDEFS_LARGE(std::complex<double>, cd)
++
++#undef EIGEN_MAKE_TYPEDEFS_LARGE
++
++#define EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, SizeSuffix) \
++using Eigen::Matrix##SizeSuffix##TypeSuffix; \
++using Eigen::Vector##SizeSuffix##TypeSuffix; \
++using Eigen::RowVector##SizeSuffix##TypeSuffix;
++
++#define EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(TypeSuffix) \
++EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 2) \
++EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 3) \
++EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, 4) \
++EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, X) \
++EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE_AND_SIZE(TypeSuffix, XL)
++
++#define EIGEN_USING_MATRIX_TYPEDEFS \
++EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(i) \
++EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(f) \
++EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(d) \
++EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(cf) \
++EIGEN_USING_MATRIX_TYPEDEFS_FOR_TYPE(cd)
++
++#endif // EIGEN_MATRIX_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/MatrixStorage.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/MatrixStorage.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/MatrixStorage.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/MatrixStorage.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,231 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_MATRIXSTORAGE_H
++#define EIGEN_MATRIXSTORAGE_H
++
++/** \internal
++  *
++  * \class ei_matrix_storage
++  *
++  * \brief Stores the data of a matrix
++  *
++  * This class stores the data of fixed-size, dynamic-size or mixed matrices
++  * in a way as compact as possible.
++  *
++  * \sa Matrix
++  */
++template<typename T, int Size, int _Rows, int _Cols> class ei_matrix_storage;
++
++template <typename T, int Size, bool Align> struct ei_aligned_array
++{
++  EIGEN_ALIGN_128 T array[Size];
++};
++
++template <typename T, int Size> struct ei_aligned_array<T,Size,false>
++{
++  T array[Size];
++};
++
++template<typename T>
++inline T* ei_aligned_malloc(size_t size)
++{
++  #ifdef EIGEN_VECTORIZE
++  if (ei_packet_traits<T>::size>1)
++  {
++    void* ptr;
++    if (posix_memalign(&ptr, 16, size*sizeof(T))==0)
++      return static_cast<T*>(ptr);
++    else
++      return 0;
++  }
++  else
++  #endif
++    return new T[size];
++}
++
++template<typename T>
++inline void ei_aligned_free(T* ptr)
++{
++  #ifdef EIGEN_VECTORIZE
++  if (ei_packet_traits<T>::size>1)
++    free(ptr);
++  else
++  #endif
++    delete[] ptr;
++}
++
++// purely fixed-size matrix
++template<typename T, int Size, int _Rows, int _Cols> class ei_matrix_storage
++{
++    ei_aligned_array<T,Size,((Size*sizeof(T))%16)==0> m_data;
++  public:
++    inline ei_matrix_storage() {}
++    inline ei_matrix_storage(int,int,int) {}
++    inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); }
++    inline static int rows(void) {return _Rows;}
++    inline static int cols(void) {return _Cols;}
++    inline void resize(int,int,int) {}
++    inline const T *data() const { return m_data.array; }
++    inline T *data() { return m_data.array; }
++};
++
++// dynamic-size matrix with fixed-size storage
++template<typename T, int Size> class ei_matrix_storage<T, Size, Dynamic, Dynamic>
++{
++    ei_aligned_array<T,Size,((Size*sizeof(T))%16)==0> m_data;
++    int m_rows;
++    int m_cols;
++  public:
++    inline ei_matrix_storage(int, int rows, int cols) : m_rows(rows), m_cols(cols) {}
++    inline ~ei_matrix_storage() {}
++    inline void swap(ei_matrix_storage& other)
++    { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
++    inline int rows(void) const {return m_rows;}
++    inline int cols(void) const {return m_cols;}
++    inline void resize(int, int rows, int cols)
++    {
++      m_rows = rows;
++      m_cols = cols;
++    }
++    inline const T *data() const { return m_data.array; }
++    inline T *data() { return m_data.array; }
++};
++
++// dynamic-size matrix with fixed-size storage and fixed width
++template<typename T, int Size, int _Cols> class ei_matrix_storage<T, Size, Dynamic, _Cols>
++{
++    ei_aligned_array<T,Size,((Size*sizeof(T))%16)==0> m_data;
++    int m_rows;
++  public:
++    inline ei_matrix_storage(int, int rows, int) : m_rows(rows) {}
++    inline ~ei_matrix_storage() {}
++    inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
++    inline int rows(void) const {return m_rows;}
++    inline int cols(void) const {return _Cols;}
++    inline void resize(int /*size*/, int rows, int)
++    {
++      m_rows = rows;
++    }
++    inline const T *data() const { return m_data.array; }
++    inline T *data() { return m_data.array; }
++};
++
++// dynamic-size matrix with fixed-size storage and fixed height
++template<typename T, int Size, int _Rows> class ei_matrix_storage<T, Size, _Rows, Dynamic>
++{
++    ei_aligned_array<T,Size,((Size*sizeof(T))%16)==0> m_data;
++    int m_cols;
++  public:
++    inline ei_matrix_storage(int, int, int cols) : m_cols(cols) {}
++    inline ~ei_matrix_storage() {}
++    inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
++    inline int rows(void) const {return _Rows;}
++    inline int cols(void) const {return m_cols;}
++    inline void resize(int size, int, int cols)
++    {
++      m_cols = cols;
++    }
++    inline const T *data() const { return m_data.array; }
++    inline T *data() { return m_data.array; }
++};
++
++// purely dynamic matrix.
++template<typename T> class ei_matrix_storage<T, Dynamic, Dynamic, Dynamic>
++{
++    T *m_data;
++    int m_rows;
++    int m_cols;
++  public:
++    inline ei_matrix_storage(int size, int rows, int cols)
++      : m_data(ei_aligned_malloc<T>(size)), m_rows(rows), m_cols(cols) {}
++    inline ~ei_matrix_storage() { ei_aligned_free(m_data); }
++    inline void swap(ei_matrix_storage& other)
++    { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); std::swap(m_cols,other.m_cols); }
++    inline int rows(void) const {return m_rows;}
++    inline int cols(void) const {return m_cols;}
++    void resize(int size, int rows, int cols)
++    {
++      if(size != m_rows*m_cols)
++      {
++        ei_aligned_free(m_data);
++        m_data = ei_aligned_malloc<T>(size);
++      }
++      m_rows = rows;
++      m_cols = cols;
++    }
++    inline const T *data() const { return m_data; }
++    inline T *data() { return m_data; }
++};
++
++// matrix with dynamic width and fixed height (so that matrix has dynamic size).
++template<typename T, int _Rows> class ei_matrix_storage<T, Dynamic, _Rows, Dynamic>
++{
++    T *m_data;
++    int m_cols;
++  public:
++    inline ei_matrix_storage(int size, int, int cols) : m_data(ei_aligned_malloc<T>(size)), m_cols(cols) {}
++    inline ~ei_matrix_storage() { ei_aligned_free(m_data); }
++    inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_cols,other.m_cols); }
++    inline static int rows(void) {return _Rows;}
++    inline int cols(void) const {return m_cols;}
++    void resize(int size, int, int cols)
++    {
++      if(size != _Rows*m_cols)
++      {
++        ei_aligned_free(m_data);
++        m_data = ei_aligned_malloc<T>(size);
++      }
++      m_cols = cols;
++    }
++    inline const T *data() const { return m_data; }
++    inline T *data() { return m_data; }
++};
++
++// matrix with dynamic height and fixed width (so that matrix has dynamic size).
++template<typename T, int _Cols> class ei_matrix_storage<T, Dynamic, Dynamic, _Cols>
++{
++    T *m_data;
++    int m_rows;
++  public:
++    inline ei_matrix_storage(int size, int rows, int) : m_data(ei_aligned_malloc<T>(size)), m_rows(rows) {}
++    inline ~ei_matrix_storage() { ei_aligned_free(m_data); }
++    inline void swap(ei_matrix_storage& other) { std::swap(m_data,other.m_data); std::swap(m_rows,other.m_rows); }
++    inline int rows(void) const {return m_rows;}
++    inline static int cols(void) {return _Cols;}
++    void resize(int size, int rows, int)
++    {
++      if(size != m_rows*_Cols)
++      {
++        ei_aligned_free(m_data);
++        m_data = ei_aligned_malloc<T>(size);
++      }
++      m_rows = rows;
++    }
++    inline const T *data() const { return m_data; }
++    inline T *data() { return m_data; }
++};
++
++#endif // EIGEN_MATRIX_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Minor.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Minor.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Minor.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Minor.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,119 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_MINOR_H
++#define EIGEN_MINOR_H
++
++/** \class Minor
++  *
++  * \brief Expression of a minor
++  *
++  * \param MatrixType the type of the object in which we are taking a minor
++  *
++  * This class represents an expression of a minor. It is the return
++  * type of MatrixBase::minor() and most of the time this is the only way it
++  * is used.
++  *
++  * \sa MatrixBase::minor()
++  */
++template<typename MatrixType>
++struct ei_traits<Minor<MatrixType> >
++{
++  typedef typename MatrixType::Scalar Scalar;
++  typedef typename ei_nested<MatrixType>::type MatrixTypeNested;
++  typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
++  enum {
++    RowsAtCompileTime = (MatrixType::RowsAtCompileTime != Dynamic) ?
++                          int(MatrixType::RowsAtCompileTime) - 1 : Dynamic,
++    ColsAtCompileTime = (MatrixType::ColsAtCompileTime != Dynamic) ?
++                          int(MatrixType::ColsAtCompileTime) - 1 : Dynamic,
++    MaxRowsAtCompileTime = (MatrixType::MaxRowsAtCompileTime != Dynamic) ?
++                             int(MatrixType::MaxRowsAtCompileTime) - 1 : Dynamic,
++    MaxColsAtCompileTime = (MatrixType::MaxColsAtCompileTime != Dynamic) ?
++                             int(MatrixType::MaxColsAtCompileTime) - 1 : Dynamic,
++    Flags = _MatrixTypeNested::Flags & HereditaryBits,
++    CoeffReadCost = _MatrixTypeNested::CoeffReadCost
++  };
++};
++
++template<typename MatrixType> class Minor
++  : public MatrixBase<Minor<MatrixType> >
++{
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(Minor)
++
++    inline Minor(const MatrixType& matrix,
++                       int row, int col)
++      : m_matrix(matrix), m_row(row), m_col(col)
++    {
++      ei_assert(row >= 0 && row < matrix.rows()
++          && col >= 0 && col < matrix.cols());
++    }
++
++    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Minor)
++
++    inline int rows() const { return m_matrix.rows() - 1; }
++    inline int cols() const { return m_matrix.cols() - 1; }
++
++    inline Scalar& coeffRef(int row, int col)
++    {
++      return m_matrix.const_cast_derived().coeffRef(row + (row >= m_row), col + (col >= m_col));
++    }
++
++    inline const Scalar coeff(int row, int col) const
++    {
++      return m_matrix.coeff(row + (row >= m_row), col + (col >= m_col));
++    }
++
++  protected:
++    const typename MatrixType::Nested m_matrix;
++    const int m_row, m_col;
++};
++
++/** \return an expression of the (\a row, \a col)-minor of *this,
++  * i.e. an expression constructed from *this by removing the specified
++  * row and column.
++  *
++  * Example: \include MatrixBase_minor.cpp
++  * Output: \verbinclude MatrixBase_minor.out
++  *
++  * \sa class Minor
++  */
++template<typename Derived>
++inline Minor<Derived>
++MatrixBase<Derived>::minor(int row, int col)
++{
++  return Minor<Derived>(derived(), row, col);
++}
++
++/** This is the const version of minor(). */
++template<typename Derived>
++inline const Minor<Derived>
++MatrixBase<Derived>::minor(int row, int col) const
++{
++  return Minor<Derived>(derived(), row, col);
++}
++
++#endif // EIGEN_MINOR_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/NestByValue.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/NestByValue.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/NestByValue.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/NestByValue.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,124 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_NESTBYVALUE_H
++#define EIGEN_NESTBYVALUE_H
++
++/** \class NestByValue
++  *
++  * \brief Expression which must be nested by value
++  *
++  * \param ExpressionType the type of the object of which we are requiring nesting-by-value
++  *
++  * This class is the return type of MatrixBase::nestByValue()
++  * and most of the time this is the only way it is used.
++  *
++  * \sa MatrixBase::nestByValue()
++  */
++template<typename ExpressionType>
++struct ei_traits<NestByValue<ExpressionType> >
++{
++  typedef typename ExpressionType::Scalar Scalar;
++  enum {
++    RowsAtCompileTime = ExpressionType::RowsAtCompileTime,
++    ColsAtCompileTime = ExpressionType::ColsAtCompileTime,
++    MaxRowsAtCompileTime = ExpressionType::MaxRowsAtCompileTime,
++    MaxColsAtCompileTime = ExpressionType::MaxColsAtCompileTime,
++    Flags = ExpressionType::Flags,
++    CoeffReadCost = ExpressionType::CoeffReadCost
++  };
++};
++
++template<typename ExpressionType> class NestByValue
++  : public MatrixBase<NestByValue<ExpressionType> >
++{
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(NestByValue)
++
++    inline NestByValue(const ExpressionType& matrix) : m_expression(matrix) {}
++
++    inline int rows() const { return m_expression.rows(); }
++    inline int cols() const { return m_expression.cols(); }
++    inline int stride() const { return m_expression.stride(); }
++
++    inline const Scalar coeff(int row, int col) const
++    {
++      return m_expression.coeff(row, col);
++    }
++
++    inline Scalar& coeffRef(int row, int col)
++    {
++      return m_expression.const_cast_derived().coeffRef(row, col);
++    }
++
++    inline const Scalar coeff(int index) const
++    {
++      return m_expression.coeff(index);
++    }
++
++    inline Scalar& coeffRef(int index)
++    {
++      return m_expression.const_cast_derived().coeffRef(index);
++    }
++
++    template<int LoadMode>
++    inline const PacketScalar packet(int row, int col) const
++    {
++      return m_expression.template packet<LoadMode>(row, col);
++    }
++
++    template<int LoadMode>
++    inline void writePacket(int row, int col, const PacketScalar& x)
++    {
++      m_expression.const_cast_derived().template writePacket<LoadMode>(row, col, x);
++    }
++
++    template<int LoadMode>
++    inline const PacketScalar packet(int index) const
++    {
++      return m_expression.template packet<LoadMode>(index);
++    }
++
++    template<int LoadMode>
++    inline void writePacket(int index, const PacketScalar& x)
++    {
++      m_expression.const_cast_derived().template writePacket<LoadMode>(index, x);
++    }
++
++  protected:
++    const ExpressionType m_expression;
++};
++
++/** \returns an expression of the temporary version of *this.
++  */
++template<typename Derived>
++inline const NestByValue<Derived>
++MatrixBase<Derived>::nestByValue() const
++{
++  return NestByValue<Derived>(derived());
++}
++
++#endif // EIGEN_NESTBYVALUE_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/NumTraits.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/NumTraits.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/NumTraits.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/NumTraits.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,141 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_NUMTRAITS_H
++#define EIGEN_NUMTRAITS_H
++
++/** \class NumTraits
++  *
++  * \brief Holds some data about the various numeric (i.e. scalar) types allowed by Eigen.
++  *
++  * \param T the numeric type about which this class provides data. Recall that Eigen allows
++  *          only the following types for \a T: \c int, \c float, \c double,
++  *          \c std::complex<float>, \c std::complex<double>.
++  *
++  * The provided data consists of:
++  * \li A typedef \a Real, giving the "real part" type of \a T. If \a T is already real,
++  *     then \a Real is just a typedef to \a T. If \a T is \c std::complex<U> then \a Real
++  *     is a typedef to \a U.
++  * \li A typedef \a FloatingPoint, giving the "floating-point type" of \a T. If \a T is
++  *     \c int, then \a FloatingPoint is a typedef to \c double. Otherwise, \a FloatingPoint
++  *     is a typedef to \a T.
++  * \li An enum value \a IsComplex. It is equal to 1 if \a T is a \c std::complex
++  *     type, and to 0 otherwise.
++  * \li An enum \a HasFloatingPoint. It is equal to \c 0 if \a T is \c int,
++  *     and to \c 1 otherwise.
++  */
++template<typename T> struct NumTraits;
++
++template<> struct NumTraits<int>
++{
++  typedef int Real;
++  typedef double FloatingPoint;
++  enum {
++    IsComplex = 0,
++    HasFloatingPoint = 0,
++    ReadCost = 1,
++    AddCost = 1,
++    MulCost = 1
++  };
++};
++
++template<> struct NumTraits<float>
++{
++  typedef float Real;
++  typedef float FloatingPoint;
++  enum {
++    IsComplex = 0,
++    HasFloatingPoint = 1,
++    ReadCost = 1,
++    AddCost = 1,
++    MulCost = 1
++  };
++};
++
++template<> struct NumTraits<double>
++{
++  typedef double Real;
++  typedef double FloatingPoint;
++  enum {
++    IsComplex = 0,
++    HasFloatingPoint = 1,
++    ReadCost = 1,
++    AddCost = 1,
++    MulCost = 1
++  };
++};
++
++template<typename _Real> struct NumTraits<std::complex<_Real> >
++{
++  typedef _Real Real;
++  typedef std::complex<_Real> FloatingPoint;
++  enum {
++    IsComplex = 1,
++    HasFloatingPoint = NumTraits<Real>::HasFloatingPoint,
++    ReadCost = 2,
++    AddCost = 2 * NumTraits<Real>::AddCost,
++    MulCost = 4 * NumTraits<Real>::MulCost + 2 * NumTraits<Real>::AddCost
++  };
++};
++
++template<> struct NumTraits<long long int>
++{
++  typedef long long int Real;
++  typedef long double FloatingPoint;
++  enum {
++    IsComplex = 0,
++    HasFloatingPoint = 0,
++    ReadCost = 1,
++    AddCost = 1,
++    MulCost = 1
++  };
++};
++
++template<> struct NumTraits<long double>
++{
++  typedef long double Real;
++  typedef long double FloatingPoint;
++  enum {
++    IsComplex = 0,
++    HasFloatingPoint = 1,
++    ReadCost = 1,
++    AddCost = 2,
++    MulCost = 2
++  };
++};
++
++template<> struct NumTraits<bool>
++{
++  typedef bool Real;
++  typedef float FloatingPoint;
++  enum {
++    IsComplex = 0,
++    HasFloatingPoint = 0,
++    ReadCost = 1,
++    AddCost = 1,
++    MulCost = 1
++  };
++};
++
++#endif // EIGEN_NUMTRAITS_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Part.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Part.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Part.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Part.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,369 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Benoit Jacob <jacob at math.jussieu.fr>
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_PART_H
++#define EIGEN_PART_H
++
++/** \class Part
++  *
++  * \brief Expression of a triangular matrix extracted from a given matrix
++  *
++  * \param MatrixType the type of the object in which we are taking the triangular part
++  * \param Mode the kind of triangular matrix expression to construct. Can be Upper, StrictlyUpper,
++  *             UnitUpper, Lower, StrictlyLower, UnitLower. This is in fact a bit field; it must have either
++  *             UpperTriangularBit or LowerTriangularBit, and additionnaly it may have either ZeroDiagBit or
++  *             UnitDiagBit.
++  *
++  * This class represents an expression of the upper or lower triangular part of
++  * a square matrix, possibly with a further assumption on the diagonal. It is the return type
++  * of MatrixBase::part() and most of the time this is the only way it is used.
++  *
++  * \sa MatrixBase::part()
++  */
++template<typename MatrixType, unsigned int Mode>
++struct ei_traits<Part<MatrixType, Mode> >
++{
++  typedef typename MatrixType::Scalar Scalar;
++  typedef typename ei_nested<MatrixType>::type MatrixTypeNested;
++  typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
++  enum {
++    RowsAtCompileTime = MatrixType::RowsAtCompileTime,
++    ColsAtCompileTime = MatrixType::ColsAtCompileTime,
++    MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
++    MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
++    Flags = (_MatrixTypeNested::Flags & (HereditaryBits) & (~(PacketAccessBit | DirectAccessBit | LinearAccessBit))) | Mode,
++    CoeffReadCost = _MatrixTypeNested::CoeffReadCost
++  };
++};
++
++template<typename MatrixType, unsigned int Mode> class Part
++  : public MatrixBase<Part<MatrixType, Mode> >
++{
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(Part)
++
++    inline Part(const MatrixType& matrix) : m_matrix(matrix)
++    { ei_assert(ei_are_flags_consistent<Mode>::ret); }
++
++    /** \sa MatrixBase::operator+=() */
++    template<typename Other> Part&  operator+=(const Other& other);
++    /** \sa MatrixBase::operator-=() */
++    template<typename Other> Part&  operator-=(const Other& other);
++    /** \sa MatrixBase::operator*=() */
++    Part&  operator*=(const typename ei_traits<MatrixType>::Scalar& other);
++    /** \sa MatrixBase::operator/=() */
++    Part&  operator/=(const typename ei_traits<MatrixType>::Scalar& other);
++
++    /** \sa operator=(), MatrixBase::lazyAssign() */
++    template<typename Other> void lazyAssign(const Other& other);
++    /** \sa MatrixBase::operator=() */
++    template<typename Other> Part& operator=(const Other& other);
++
++    inline int rows() const { return m_matrix.rows(); }
++    inline int cols() const { return m_matrix.cols(); }
++    inline int stride() const { return m_matrix.stride(); }
++
++    inline Scalar coeff(int row, int col) const
++    {
++      if(Flags & LowerTriangularBit ? col>row : row>col)
++        return (Flags & SelfAdjointBit) ? ei_conj(m_matrix.coeff(col, row)) : (Scalar)0;
++      if(Flags & UnitDiagBit)
++        return col==row ? (Scalar)1 : m_matrix.coeff(row, col);
++      else if(Flags & ZeroDiagBit)
++        return col==row ? (Scalar)0 : m_matrix.coeff(row, col);
++      else
++        return m_matrix.coeff(row, col);
++    }
++
++    inline Scalar& coeffRef(int row, int col)
++    {
++      EIGEN_STATIC_ASSERT(!(Flags & UnitDiagBit), writting_to_triangular_part_with_unit_diag_is_not_supported);
++      EIGEN_STATIC_ASSERT(!(Flags & SelfAdjointBit), default_writting_to_selfadjoint_not_supported);
++      ei_assert(   (Mode==Upper && col>=row)
++                || (Mode==Lower && col<=row)
++                || (Mode==StrictlyUpper && col>row)
++                || (Mode==StrictlyLower && col<row));
++      return m_matrix.const_cast_derived().coeffRef(row, col);
++    }
++
++    /** \internal */
++    const MatrixType& _expression() const { return m_matrix; }
++
++    /** discard any writes to a row */
++    const Block<Part, 1, ColsAtCompileTime> row(int i) { return Base::row(i); }
++    const Block<Part, 1, ColsAtCompileTime> row(int i) const { return Base::row(i); }
++    /** discard any writes to a column */
++    const Block<Part, RowsAtCompileTime, 1> col(int i) { return Base::col(i); }
++    const Block<Part, RowsAtCompileTime, 1> col(int i) const { return Base::col(i); }
++
++  protected:
++
++    const typename MatrixType::Nested m_matrix;
++};
++
++/** \returns an expression of a triangular matrix extracted from the current matrix
++  *
++  * The parameter \a Mode can have the following values: \c Upper, \c StrictlyUpper, \c UnitUpper,
++  * \c Lower, \c StrictlyLower, \c UnitLower.
++  *
++  * \addexample PartExample \label How to extract a triangular part of an arbitrary matrix
++  *
++  * Example: \include MatrixBase_extract.cpp
++  * Output: \verbinclude MatrixBase_extract.out
++  *
++  * \sa class Part, part(), marked()
++  */
++template<typename Derived>
++template<unsigned int Mode>
++const Part<Derived, Mode> MatrixBase<Derived>::part() const
++{
++  return derived();
++}
++
++template<typename MatrixType, unsigned int Mode>
++template<typename Other>
++inline Part<MatrixType, Mode>& Part<MatrixType, Mode>::operator=(const Other& other)
++{
++  if(Other::Flags & EvalBeforeAssigningBit)
++  {
++    typename ei_eval<Other>::type other_evaluated(other.rows(), other.cols());
++    other_evaluated.template part<Mode>().lazyAssign(other);
++    lazyAssign(other_evaluated);
++  }
++  else
++    lazyAssign(other.derived());
++  return *this;
++}
++
++template<typename Derived1, typename Derived2, unsigned int Mode, int UnrollCount>
++struct ei_part_assignment_impl
++{
++  enum {
++    col = (UnrollCount-1) / Derived1::RowsAtCompileTime,
++    row = (UnrollCount-1) % Derived1::RowsAtCompileTime
++  };
++
++  inline static void run(Derived1 &dst, const Derived2 &src)
++  {
++    ei_part_assignment_impl<Derived1, Derived2, Mode, UnrollCount-1>::run(dst, src);
++
++    if(Mode == SelfAdjoint)
++    {
++      if(row == col)
++        dst.coeffRef(row, col) = ei_real(src.coeff(row, col));
++      else if(row < col)
++        dst.coeffRef(col, row) = ei_conj(dst.coeffRef(row, col) = src.coeff(row, col));
++    }
++    else
++    {
++      ei_assert(Mode == Upper || Mode == Lower || Mode == StrictlyUpper || Mode == StrictlyLower);
++      if((Mode == Upper && row <= col)
++      || (Mode == Lower && row >= col)
++      || (Mode == StrictlyUpper && row < col)
++      || (Mode == StrictlyLower && row > col))
++        dst.coeffRef(row, col) = src.coeff(row, col);
++    }
++  }
++};
++
++template<typename Derived1, typename Derived2, unsigned int Mode>
++struct ei_part_assignment_impl<Derived1, Derived2, Mode, 1>
++{
++  inline static void run(Derived1 &dst, const Derived2 &src)
++  {
++    if(!(Mode & ZeroDiagBit))
++      dst.coeffRef(0, 0) = src.coeff(0, 0);
++  }
++};
++
++// prevent buggy user code from causing an infinite recursion
++template<typename Derived1, typename Derived2, unsigned int Mode>
++struct ei_part_assignment_impl<Derived1, Derived2, Mode, 0>
++{
++  inline static void run(Derived1 &, const Derived2 &) {}
++};
++
++template<typename Derived1, typename Derived2>
++struct ei_part_assignment_impl<Derived1, Derived2, Upper, Dynamic>
++{
++  inline static void run(Derived1 &dst, const Derived2 &src)
++  {
++    for(int j = 0; j < dst.cols(); j++)
++      for(int i = 0; i <= j; i++)
++        dst.coeffRef(i, j) = src.coeff(i, j);
++  }
++};
++
++template<typename Derived1, typename Derived2>
++struct ei_part_assignment_impl<Derived1, Derived2, Lower, Dynamic>
++{
++  inline static void run(Derived1 &dst, const Derived2 &src)
++  {
++    for(int j = 0; j < dst.cols(); j++)
++      for(int i = j; i < dst.rows(); i++)
++        dst.coeffRef(i, j) = src.coeff(i, j);
++  }
++};
++
++template<typename Derived1, typename Derived2>
++struct ei_part_assignment_impl<Derived1, Derived2, StrictlyUpper, Dynamic>
++{
++  inline static void run(Derived1 &dst, const Derived2 &src)
++  {
++    for(int j = 0; j < dst.cols(); j++)
++      for(int i = 0; i < j; i++)
++        dst.coeffRef(i, j) = src.coeff(i, j);
++  }
++};
++template<typename Derived1, typename Derived2>
++struct ei_part_assignment_impl<Derived1, Derived2, StrictlyLower, Dynamic>
++{
++  inline static void run(Derived1 &dst, const Derived2 &src)
++  {
++    for(int j = 0; j < dst.cols(); j++)
++      for(int i = j+1; i < dst.rows(); i++)
++        dst.coeffRef(i, j) = src.coeff(i, j);
++  }
++};
++template<typename Derived1, typename Derived2>
++struct ei_part_assignment_impl<Derived1, Derived2, SelfAdjoint, Dynamic>
++{
++  inline static void run(Derived1 &dst, const Derived2 &src)
++  {
++    for(int j = 0; j < dst.cols(); j++)
++    {
++      for(int i = 0; i < j; i++)
++        dst.coeffRef(j, i) = ei_conj(dst.coeffRef(i, j) = src.coeff(i, j));
++      dst.coeffRef(j, j) = ei_real(src.coeff(j, j));
++    }
++  }
++};
++
++template<typename MatrixType, unsigned int Mode>
++template<typename Other>
++void Part<MatrixType, Mode>::lazyAssign(const Other& other)
++{
++  const bool unroll = MatrixType::SizeAtCompileTime * Other::CoeffReadCost / 2 <= EIGEN_UNROLLING_LIMIT;
++  ei_assert(m_matrix.rows() == other.rows() && m_matrix.cols() == other.cols());
++
++  ei_part_assignment_impl
++    <MatrixType, Other, Mode,
++    unroll ? int(MatrixType::SizeAtCompileTime) : Dynamic
++    >::run(m_matrix.const_cast_derived(), other.derived());
++}
++
++/** \returns a lvalue pseudo-expression allowing to perform special operations on \c *this.
++  *
++  * The \a Mode parameter can have the following values: \c Upper, \c StrictlyUpper, \c Lower,
++  * \c StrictlyLower, \c SelfAdjoint.
++  *
++  * \addexample PartExample \label How to write to a triangular part of a matrix
++  *
++  * Example: \include MatrixBase_part.cpp
++  * Output: \verbinclude MatrixBase_part.out
++  *
++  * \sa class Part, MatrixBase::extract(), MatrixBase::marked()
++  */
++template<typename Derived>
++template<unsigned int Mode>
++inline Part<Derived, Mode> MatrixBase<Derived>::part()
++{
++  return Part<Derived, Mode>(derived());
++}
++
++/** \returns true if *this is approximately equal to an upper triangular matrix,
++  *          within the precision given by \a prec.
++  *
++  * \sa isLower(), extract(), part(), marked()
++  */
++template<typename Derived>
++bool MatrixBase<Derived>::isUpper(RealScalar prec) const
++{
++  if(cols() != rows()) return false;
++  RealScalar maxAbsOnUpperPart = static_cast<RealScalar>(-1);
++  for(int j = 0; j < cols(); j++)
++    for(int i = 0; i <= j; i++)
++    {
++      RealScalar absValue = ei_abs(coeff(i,j));
++      if(absValue > maxAbsOnUpperPart) maxAbsOnUpperPart = absValue;
++    }
++  for(int j = 0; j < cols()-1; j++)
++    for(int i = j+1; i < rows(); i++)
++      if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnUpperPart, prec)) return false;
++  return true;
++}
++
++/** \returns true if *this is approximately equal to a lower triangular matrix,
++  *          within the precision given by \a prec.
++  *
++  * \sa isUpper(), extract(), part(), marked()
++  */
++template<typename Derived>
++bool MatrixBase<Derived>::isLower(RealScalar prec) const
++{
++  if(cols() != rows()) return false;
++  RealScalar maxAbsOnLowerPart = static_cast<RealScalar>(-1);
++  for(int j = 0; j < cols(); j++)
++    for(int i = j; i < rows(); i++)
++    {
++      RealScalar absValue = ei_abs(coeff(i,j));
++      if(absValue > maxAbsOnLowerPart) maxAbsOnLowerPart = absValue;
++    }
++  for(int j = 1; j < cols(); j++)
++    for(int i = 0; i < j; i++)
++      if(!ei_isMuchSmallerThan(coeff(i, j), maxAbsOnLowerPart, prec)) return false;
++  return true;
++}
++
++template<typename MatrixType, unsigned int Mode>
++template<typename Other>
++inline Part<MatrixType, Mode>& Part<MatrixType, Mode>::operator+=(const Other& other)
++{
++  return *this = m_matrix + other;
++}
++
++template<typename MatrixType, unsigned int Mode>
++template<typename Other>
++inline Part<MatrixType, Mode>& Part<MatrixType, Mode>::operator-=(const Other& other)
++{
++  return *this = m_matrix - other;
++}
++
++template<typename MatrixType, unsigned int Mode>
++inline Part<MatrixType, Mode>& Part<MatrixType, Mode>::operator*=
++(const typename ei_traits<MatrixType>::Scalar& other)
++{
++  return *this = m_matrix * other;
++}
++
++template<typename MatrixType, unsigned int Mode>
++inline Part<MatrixType, Mode>& Part<MatrixType, Mode>::operator/=
++(const typename ei_traits<MatrixType>::Scalar& other)
++{
++  return *this = m_matrix / other;
++}
++
++#endif // EIGEN_PART_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Product.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Product.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Product.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Product.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,771 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_PRODUCT_H
++#define EIGEN_PRODUCT_H
++
++/***************************
++*** Forward declarations ***
++***************************/
++
++template<int VectorizationMode, int Index, typename Lhs, typename Rhs>
++struct ei_product_coeff_impl;
++
++template<int StorageOrder, int Index, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
++struct ei_product_packet_impl;
++
++template<typename T> class ei_product_eval_to_column_major;
++
++/** \class ProductReturnType
++  *
++  * \brief Helper class to get the correct and optimized returned type of operator*
++  *
++  * \param Lhs the type of the left-hand side
++  * \param Rhs the type of the right-hand side
++  * \param ProductMode the type of the product (determined automatically by ei_product_mode)
++  *
++  * This class defines the typename Type representing the optimized product expression
++  * between two matrix expressions. In practice, using ProductReturnType<Lhs,Rhs>::Type
++  * is the recommended way to define the result type of a function returning an expression
++  * which involve a matrix product. The class Product or DiagonalProduct should never be
++  * used directly.
++  *
++  * \sa class Product, class DiagonalProduct, MatrixBase::operator*(const MatrixBase<OtherDerived>&)
++  */
++template<typename Lhs, typename Rhs, int ProductMode>
++struct ProductReturnType
++{
++  typedef typename ei_nested<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
++  typedef typename ei_nested<Rhs,Lhs::RowsAtCompileTime>::type RhsNested;
++
++  typedef Product<typename ei_unconst<LhsNested>::type,
++                  typename ei_unconst<RhsNested>::type, ProductMode> Type;
++};
++
++// cache friendly specialization
++template<typename Lhs, typename Rhs>
++struct ProductReturnType<Lhs,Rhs,CacheFriendlyProduct>
++{
++  typedef typename ei_nested<Lhs,Rhs::ColsAtCompileTime>::type LhsNested;
++
++  typedef typename ei_nested<Rhs,Lhs::RowsAtCompileTime,
++              typename ei_product_eval_to_column_major<Rhs>::type
++          >::type RhsNested;
++
++  typedef Product<typename ei_unconst<LhsNested>::type,
++                  typename ei_unconst<RhsNested>::type, CacheFriendlyProduct> Type;
++};
++
++/*  Helper class to determine the type of the product, can be either:
++ *    - NormalProduct
++ *    - CacheFriendlyProduct
++ *    - NormalProduct
++ */
++template<typename Lhs, typename Rhs> struct ei_product_mode
++{
++  enum{
++
++    value = ((Rhs::Flags&Diagonal)==Diagonal) || ((Lhs::Flags&Diagonal)==Diagonal)
++          ? DiagonalProduct
++          : (Rhs::Flags & Lhs::Flags & SparseBit)
++          ? SparseProduct
++          : Lhs::MaxColsAtCompileTime >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
++            && ( Lhs::MaxRowsAtCompileTime >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
++              || Rhs::MaxColsAtCompileTime >= EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD )
++            && (!(Rhs::IsVectorAtCompileTime && (Lhs::Flags&RowMajorBit)  && (!(Lhs::Flags&DirectAccessBit))))
++            && (!(Lhs::IsVectorAtCompileTime && (!(Rhs::Flags&RowMajorBit)) && (!(Rhs::Flags&DirectAccessBit))))
++          ? CacheFriendlyProduct
++          : NormalProduct };
++};
++
++/** \class Product
++  *
++  * \brief Expression of the product of two matrices
++  *
++  * \param LhsNested the type used to store the left-hand side
++  * \param RhsNested the type used to store the right-hand side
++  * \param ProductMode the type of the product
++  *
++  * This class represents an expression of the product of two matrices.
++  * It is the return type of the operator* between matrices. Its template
++  * arguments are determined automatically by ProductReturnType. Therefore,
++  * Product should never be used direclty. To determine the result type of a
++  * function which involves a matrix product, use ProductReturnType::Type.
++  *
++  * \sa ProductReturnType, MatrixBase::operator*(const MatrixBase<OtherDerived>&)
++  */
++template<typename LhsNested, typename RhsNested, int ProductMode>
++struct ei_traits<Product<LhsNested, RhsNested, ProductMode> >
++{
++  // clean the nested types:
++  typedef typename ei_unconst<typename ei_unref<LhsNested>::type>::type _LhsNested;
++  typedef typename ei_unconst<typename ei_unref<RhsNested>::type>::type _RhsNested;
++  typedef typename _LhsNested::Scalar Scalar;
++
++  enum {
++    LhsCoeffReadCost = _LhsNested::CoeffReadCost,
++    RhsCoeffReadCost = _RhsNested::CoeffReadCost,
++    LhsFlags = _LhsNested::Flags,
++    RhsFlags = _RhsNested::Flags,
++
++    RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
++    ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
++    InnerSize = EIGEN_ENUM_MIN(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
++
++    MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
++    MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
++
++    LhsRowMajor = LhsFlags & RowMajorBit,
++    RhsRowMajor = RhsFlags & RowMajorBit,
++
++    CanVectorizeRhs = RhsRowMajor && (RhsFlags & PacketAccessBit)
++                    && (ColsAtCompileTime % ei_packet_traits<Scalar>::size == 0),
++
++    CanVectorizeLhs = (!LhsRowMajor) && (LhsFlags & PacketAccessBit)
++                    && (RowsAtCompileTime % ei_packet_traits<Scalar>::size == 0),
++
++    EvalToRowMajor = RhsRowMajor && (ProductMode==(int)CacheFriendlyProduct ? LhsRowMajor : (!CanVectorizeLhs)),
++
++    RemovedBits = ~((EvalToRowMajor ? 0 : RowMajorBit)
++                | ((RowsAtCompileTime == Dynamic || ColsAtCompileTime == Dynamic) ? 0 : LargeBit)),
++
++    Flags = ((unsigned int)(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
++          | EvalBeforeAssigningBit
++          | EvalBeforeNestingBit
++          | (CanVectorizeLhs || CanVectorizeRhs ? PacketAccessBit : 0),
++
++    CoeffReadCost = InnerSize == Dynamic ? Dynamic
++                  : InnerSize * (NumTraits<Scalar>::MulCost + LhsCoeffReadCost + RhsCoeffReadCost)
++                    + (InnerSize - 1) * NumTraits<Scalar>::AddCost,
++
++    /* CanVectorizeInner deserves special explanation. It does not affect the product flags. It is not used outside
++     * of Product. If the Product itself is not a packet-access expression, there is still a chance that the inner
++     * loop of the product might be vectorized. This is the meaning of CanVectorizeInner. Since it doesn't affect
++     * the Flags, it is safe to make this value depend on ActualPacketAccessBit, that doesn't affect the ABI.
++     */
++    CanVectorizeInner = LhsRowMajor && (!RhsRowMajor) && (LhsFlags & RhsFlags & ActualPacketAccessBit)
++                      && (InnerSize % ei_packet_traits<Scalar>::size == 0)
++  };
++};
++
++template<typename LhsNested, typename RhsNested, int ProductMode> class Product : ei_no_assignment_operator,
++  public MatrixBase<Product<LhsNested, RhsNested, ProductMode> >
++{
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(Product)
++
++  private:
++
++    typedef typename ei_traits<Product>::_LhsNested _LhsNested;
++    typedef typename ei_traits<Product>::_RhsNested _RhsNested;
++
++    enum {
++      PacketSize = ei_packet_traits<Scalar>::size,
++      InnerSize  = ei_traits<Product>::InnerSize,
++      Unroll = CoeffReadCost <= EIGEN_UNROLLING_LIMIT,
++      CanVectorizeInner = ei_traits<Product>::CanVectorizeInner
++    };
++
++    typedef ei_product_coeff_impl<CanVectorizeInner ? InnerVectorization : NoVectorization,
++                                  Unroll ? InnerSize-1 : Dynamic,
++                                  _LhsNested, _RhsNested> ScalarCoeffImpl;
++
++  public:
++
++    template<typename Lhs, typename Rhs>
++    inline Product(const Lhs& lhs, const Rhs& rhs)
++      : m_lhs(lhs), m_rhs(rhs)
++    {
++      ei_assert(lhs.cols() == rhs.rows());
++    }
++
++    /** \internal
++      * compute \a res += \c *this using the cache friendly product.
++      */
++    template<typename DestDerived>
++    void _cacheFriendlyEvalAndAdd(DestDerived& res) const;
++
++    /** \internal
++      * \returns whether it is worth it to use the cache friendly product.
++      */
++    inline bool _useCacheFriendlyProduct() const
++    {
++      return  m_lhs.cols()>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
++              && (  rows()>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
++                 || cols()>=EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD);
++    }
++
++    inline int rows() const { return m_lhs.rows(); }
++    inline int cols() const { return m_rhs.cols(); }
++
++    const Scalar coeff(int row, int col) const
++    {
++      Scalar res;
++      ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res);
++      return res;
++    }
++
++    /* Allow index-based non-packet access. It is impossible though to allow index-based packed access,
++     * which is why we don't set the LinearAccessBit.
++     */
++    const Scalar coeff(int index) const
++    {
++      Scalar res;
++      const int row = RowsAtCompileTime == 1 ? 0 : index;
++      const int col = RowsAtCompileTime == 1 ? index : 0;
++      ScalarCoeffImpl::run(row, col, m_lhs, m_rhs, res);
++      return res;
++    }
++
++    template<int LoadMode>
++    const PacketScalar packet(int row, int col) const
++    {
++      PacketScalar res;
++      ei_product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
++                                   Unroll ? InnerSize-1 : Dynamic,
++                                   _LhsNested, _RhsNested, PacketScalar, LoadMode>
++        ::run(row, col, m_lhs, m_rhs, res);
++      return res;
++    }
++
++    inline const _LhsNested& lhs() const { return m_lhs; }
++    inline const _RhsNested& rhs() const { return m_rhs; }
++
++  protected:
++    const LhsNested m_lhs;
++    const RhsNested m_rhs;
++};
++
++/** \returns the matrix product of \c *this and \a other.
++  *
++  * \note If instead of the matrix product you want the coefficient-wise product, see Cwise::operator*().
++  *
++  * \sa lazy(), operator*=(const MatrixBase&), Cwise::operator*()
++  */
++template<typename Derived>
++template<typename OtherDerived>
++inline const typename ProductReturnType<Derived,OtherDerived>::Type
++MatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
++{
++  return typename ProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
++}
++
++/** replaces \c *this by \c *this * \a other.
++  *
++  * \returns a reference to \c *this
++  */
++template<typename Derived>
++template<typename OtherDerived>
++inline Derived &
++MatrixBase<Derived>::operator*=(const MatrixBase<OtherDerived> &other)
++{
++  return *this = *this * other;
++}
++
++/***************************************************************************
++* Normal product .coeff() implementation (with meta-unrolling)
++***************************************************************************/
++
++/**************************************
++*** Scalar path  - no vectorization ***
++**************************************/
++
++template<int Index, typename Lhs, typename Rhs>
++struct ei_product_coeff_impl<NoVectorization, Index, Lhs, Rhs>
++{
++  inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
++  {
++    ei_product_coeff_impl<NoVectorization, Index-1, Lhs, Rhs>::run(row, col, lhs, rhs, res);
++    res += lhs.coeff(row, Index) * rhs.coeff(Index, col);
++  }
++};
++
++template<typename Lhs, typename Rhs>
++struct ei_product_coeff_impl<NoVectorization, 0, Lhs, Rhs>
++{
++  inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
++  {
++    res = lhs.coeff(row, 0) * rhs.coeff(0, col);
++  }
++};
++
++template<typename Lhs, typename Rhs>
++struct ei_product_coeff_impl<NoVectorization, Dynamic, Lhs, Rhs>
++{
++  inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar& res)
++  {
++    res = lhs.coeff(row, 0) * rhs.coeff(0, col);
++      for(int i = 1; i < lhs.cols(); i++)
++        res += lhs.coeff(row, i) * rhs.coeff(i, col);
++  }
++};
++
++// prevent buggy user code from causing an infinite recursion
++template<typename Lhs, typename Rhs>
++struct ei_product_coeff_impl<NoVectorization, -1, Lhs, Rhs>
++{
++  inline static void run(int, int, const Lhs&, const Rhs&, typename Lhs::Scalar&) {}
++};
++
++/*******************************************
++*** Scalar path with inner vectorization ***
++*******************************************/
++
++template<int Index, typename Lhs, typename Rhs, typename PacketScalar>
++struct ei_product_coeff_vectorized_unroller
++{
++  enum { PacketSize = ei_packet_traits<typename Lhs::Scalar>::size };
++  inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres)
++  {
++    ei_product_coeff_vectorized_unroller<Index-PacketSize, Lhs, Rhs, PacketScalar>::run(row, col, lhs, rhs, pres);
++    pres = ei_padd(pres, ei_pmul( lhs.template packet<Aligned>(row, Index) , rhs.template packet<Aligned>(Index, col) ));
++  }
++};
++
++template<typename Lhs, typename Rhs, typename PacketScalar>
++struct ei_product_coeff_vectorized_unroller<0, Lhs, Rhs, PacketScalar>
++{
++  inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::PacketScalar &pres)
++  {
++    pres = ei_pmul(lhs.template packet<Aligned>(row, 0) , rhs.template packet<Aligned>(0, col));
++  }
++};
++
++template<int Index, typename Lhs, typename Rhs>
++struct ei_product_coeff_impl<InnerVectorization, Index, Lhs, Rhs>
++{
++  typedef typename Lhs::PacketScalar PacketScalar;
++  enum { PacketSize = ei_packet_traits<typename Lhs::Scalar>::size };
++  inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
++  {
++    PacketScalar pres;
++    ei_product_coeff_vectorized_unroller<Index+1-PacketSize, Lhs, Rhs, PacketScalar>::run(row, col, lhs, rhs, pres);
++    ei_product_coeff_impl<NoVectorization,Index,Lhs,Rhs>::run(row, col, lhs, rhs, res);
++    res = ei_predux(pres);
++  }
++};
++
++template<typename Lhs, typename Rhs, int LhsRows = Lhs::RowsAtCompileTime, int RhsCols = Rhs::ColsAtCompileTime>
++struct ei_product_coeff_vectorized_dyn_selector
++{
++  inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
++  {
++    res = ei_dot_impl<
++      Block<Lhs, 1, ei_traits<Lhs>::ColsAtCompileTime>,
++      Block<Rhs, ei_traits<Rhs>::RowsAtCompileTime, 1>,
++      LinearVectorization, NoUnrolling>::run(lhs.row(row), rhs.col(col));
++  }
++};
++
++// NOTE the 3 following specializations are because taking .col(0) on a vector is a bit slower
++// NOTE maybe they are now useless since we have a specialization for Block<Matrix>
++template<typename Lhs, typename Rhs, int RhsCols>
++struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,RhsCols>
++{
++  inline static void run(int /*row*/, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
++  {
++    res = ei_dot_impl<
++      Lhs,
++      Block<Rhs, ei_traits<Rhs>::RowsAtCompileTime, 1>,
++      LinearVectorization, NoUnrolling>::run(lhs, rhs.col(col));
++  }
++};
++
++template<typename Lhs, typename Rhs, int LhsRows>
++struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,LhsRows,1>
++{
++  inline static void run(int row, int /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
++  {
++    res = ei_dot_impl<
++      Block<Lhs, 1, ei_traits<Lhs>::ColsAtCompileTime>,
++      Rhs,
++      LinearVectorization, NoUnrolling>::run(lhs.row(row), rhs);
++  }
++};
++
++template<typename Lhs, typename Rhs>
++struct ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,1>
++{
++  inline static void run(int /*row*/, int /*col*/, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
++  {
++    res = ei_dot_impl<
++      Lhs,
++      Rhs,
++      LinearVectorization, NoUnrolling>::run(lhs, rhs);
++  }
++};
++
++template<typename Lhs, typename Rhs>
++struct ei_product_coeff_impl<InnerVectorization, Dynamic, Lhs, Rhs>
++{
++  inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, typename Lhs::Scalar &res)
++  {
++    ei_product_coeff_vectorized_dyn_selector<Lhs,Rhs>::run(row, col, lhs, rhs, res);
++  }
++};
++
++/*******************
++*** Packet path  ***
++*******************/
++
++template<int Index, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
++struct ei_product_packet_impl<RowMajor, Index, Lhs, Rhs, PacketScalar, LoadMode>
++{
++  inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
++  {
++    ei_product_packet_impl<RowMajor, Index-1, Lhs, Rhs, PacketScalar, LoadMode>::run(row, col, lhs, rhs, res);
++    res =  ei_pmadd(ei_pset1(lhs.coeff(row, Index)), rhs.template packet<LoadMode>(Index, col), res);
++  }
++};
++
++template<int Index, typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
++struct ei_product_packet_impl<ColMajor, Index, Lhs, Rhs, PacketScalar, LoadMode>
++{
++  inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
++  {
++    ei_product_packet_impl<ColMajor, Index-1, Lhs, Rhs, PacketScalar, LoadMode>::run(row, col, lhs, rhs, res);
++    res =  ei_pmadd(lhs.template packet<LoadMode>(row, Index), ei_pset1(rhs.coeff(Index, col)), res);
++  }
++};
++
++template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
++struct ei_product_packet_impl<RowMajor, 0, Lhs, Rhs, PacketScalar, LoadMode>
++{
++  inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
++  {
++    res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
++  }
++};
++
++template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
++struct ei_product_packet_impl<ColMajor, 0, Lhs, Rhs, PacketScalar, LoadMode>
++{
++  inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar &res)
++  {
++    res = ei_pmul(lhs.template packet<LoadMode>(row, 0), ei_pset1(rhs.coeff(0, col)));
++  }
++};
++
++template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
++struct ei_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, PacketScalar, LoadMode>
++{
++  inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res)
++  {
++    res = ei_pmul(ei_pset1(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
++      for(int i = 1; i < lhs.cols(); i++)
++        res =  ei_pmadd(ei_pset1(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
++  }
++};
++
++template<typename Lhs, typename Rhs, typename PacketScalar, int LoadMode>
++struct ei_product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, PacketScalar, LoadMode>
++{
++  inline static void run(int row, int col, const Lhs& lhs, const Rhs& rhs, PacketScalar& res)
++  {
++    res = ei_pmul(lhs.template packet<LoadMode>(row, 0), ei_pset1(rhs.coeff(0, col)));
++      for(int i = 1; i < lhs.cols(); i++)
++        res =  ei_pmadd(lhs.template packet<LoadMode>(row, i), ei_pset1(rhs.coeff(i, col)), res);
++  }
++};
++
++/***************************************************************************
++* Cache friendly product callers and specific nested evaluation strategies
++***************************************************************************/
++
++template<typename Scalar, typename RhsType>
++static void ei_cache_friendly_product_colmajor_times_vector(
++  int size, const Scalar* lhs, int lhsStride, const RhsType& rhs, Scalar* res);
++
++template<typename Scalar, typename ResType>
++static void ei_cache_friendly_product_rowmajor_times_vector(
++  const Scalar* lhs, int lhsStride, const Scalar* rhs, int rhsSize, ResType& res);
++
++template<typename ProductType,
++  int LhsRows  = ei_traits<ProductType>::RowsAtCompileTime,
++  int LhsOrder = int(ei_traits<ProductType>::LhsFlags)&RowMajorBit ? RowMajor : ColMajor,
++  int LhsHasDirectAccess = int(ei_traits<ProductType>::LhsFlags)&DirectAccessBit? HasDirectAccess : NoDirectAccess,
++  int RhsCols  = ei_traits<ProductType>::ColsAtCompileTime,
++  int RhsOrder = int(ei_traits<ProductType>::RhsFlags)&RowMajorBit ? RowMajor : ColMajor,
++  int RhsHasDirectAccess = int(ei_traits<ProductType>::RhsFlags)&DirectAccessBit? HasDirectAccess : NoDirectAccess>
++struct ei_cache_friendly_product_selector
++{
++  template<typename DestDerived>
++  inline static void run(DestDerived& res, const ProductType& product)
++  {
++    product._cacheFriendlyEvalAndAdd(res);
++  }
++};
++
++// optimized colmajor * vector path
++template<typename ProductType, int LhsRows, int RhsOrder, int RhsAccess>
++struct ei_cache_friendly_product_selector<ProductType,LhsRows,ColMajor,NoDirectAccess,1,RhsOrder,RhsAccess>
++{
++  template<typename DestDerived>
++  inline static void run(DestDerived& res, const ProductType& product)
++  {
++    const int size = product.rhs().rows();
++    for (int k=0; k<size; ++k)
++        res += product.rhs().coeff(k) * product.lhs().col(k);
++  }
++};
++
++// optimized cache friendly colmajor * vector path for matrix with direct access flag
++// NOTE this path could also be enabled for expressions if we add runtime align queries
++template<typename ProductType, int LhsRows, int RhsOrder, int RhsAccess>
++struct ei_cache_friendly_product_selector<ProductType,LhsRows,ColMajor,HasDirectAccess,1,RhsOrder,RhsAccess>
++{
++  typedef typename ProductType::Scalar Scalar;
++
++  template<typename DestDerived>
++  inline static void run(DestDerived& res, const ProductType& product)
++  {
++    enum {
++      EvalToRes = (ei_packet_traits<Scalar>::size==1)
++                ||((DestDerived::Flags&ActualPacketAccessBit) && (!(DestDerived::Flags & RowMajorBit))) };
++    Scalar* EIGEN_RESTRICT _res;
++    if (EvalToRes)
++       _res = &res.coeffRef(0);
++    else
++    {
++      _res = ei_alloc_stack(Scalar,res.size());
++      Map<Matrix<Scalar,DestDerived::RowsAtCompileTime,1> >(_res, res.size()) = res;
++    }
++    ei_cache_friendly_product_colmajor_times_vector(res.size(),
++      &product.lhs().const_cast_derived().coeffRef(0,0), product.lhs().stride(),
++      product.rhs(), _res);
++
++    if (!EvalToRes)
++    {
++      res = Map<Matrix<Scalar,DestDerived::SizeAtCompileTime,1> >(_res, res.size());
++      ei_free_stack(_res, Scalar, res.size());
++    }
++  }
++};
++
++// optimized vector * rowmajor path
++template<typename ProductType, int LhsOrder, int LhsAccess, int RhsCols>
++struct ei_cache_friendly_product_selector<ProductType,1,LhsOrder,LhsAccess,RhsCols,RowMajor,NoDirectAccess>
++{
++  template<typename DestDerived>
++  inline static void run(DestDerived& res, const ProductType& product)
++  {
++    const int cols = product.lhs().cols();
++    for (int j=0; j<cols; ++j)
++      res += product.lhs().coeff(j) * product.rhs().row(j);
++  }
++};
++
++// optimized cache friendly vector * rowmajor path for matrix with direct access flag
++// NOTE this path coul also be enabled for expressions if we add runtime align queries
++template<typename ProductType, int LhsOrder, int LhsAccess, int RhsCols>
++struct ei_cache_friendly_product_selector<ProductType,1,LhsOrder,LhsAccess,RhsCols,RowMajor,HasDirectAccess>
++{
++  typedef typename ProductType::Scalar Scalar;
++
++  template<typename DestDerived>
++  inline static void run(DestDerived& res, const ProductType& product)
++  {
++    enum {
++      EvalToRes = (ei_packet_traits<Scalar>::size==1)
++                ||((DestDerived::Flags & ActualPacketAccessBit) && (DestDerived::Flags & RowMajorBit)) };
++    Scalar* EIGEN_RESTRICT _res;
++    if (EvalToRes)
++       _res = &res.coeffRef(0);
++    else
++    {
++      _res = ei_alloc_stack(Scalar, res.size());
++      Map<Matrix<Scalar,DestDerived::SizeAtCompileTime,1> >(_res, res.size()) = res;
++    }
++    ei_cache_friendly_product_colmajor_times_vector(res.size(),
++      &product.rhs().const_cast_derived().coeffRef(0,0), product.rhs().stride(),
++      product.lhs().transpose(), _res);
++
++    if (!EvalToRes)
++    {
++      res = Map<Matrix<Scalar,DestDerived::SizeAtCompileTime,1> >(_res, res.size());
++      ei_free_stack(_res, Scalar, res.size());
++    }
++  }
++};
++
++// optimized rowmajor - vector product
++template<typename ProductType, int LhsRows, int RhsOrder, int RhsAccess>
++struct ei_cache_friendly_product_selector<ProductType,LhsRows,RowMajor,HasDirectAccess,1,RhsOrder,RhsAccess>
++{
++  typedef typename ProductType::Scalar Scalar;
++  typedef typename ei_traits<ProductType>::_RhsNested Rhs;
++  enum {
++      UseRhsDirectly = ((ei_packet_traits<Scalar>::size==1) || (Rhs::Flags&ActualPacketAccessBit))
++                     && (!(Rhs::Flags & RowMajorBit)) };
++
++  template<typename DestDerived>
++  inline static void run(DestDerived& res, const ProductType& product)
++  {
++    Scalar* EIGEN_RESTRICT _rhs;
++    if (UseRhsDirectly)
++       _rhs = &product.rhs().const_cast_derived().coeffRef(0);
++    else
++    {
++      _rhs = ei_alloc_stack(Scalar, product.rhs().size());
++      Map<Matrix<Scalar,Rhs::SizeAtCompileTime,1> >(_rhs, product.rhs().size()) = product.rhs();
++    }
++    ei_cache_friendly_product_rowmajor_times_vector(&product.lhs().const_cast_derived().coeffRef(0,0), product.lhs().stride(),
++                                                    _rhs, product.rhs().size(), res);
++
++    if (!UseRhsDirectly) ei_free_stack(_rhs, Scalar, product.rhs().size());
++  }
++};
++
++// optimized vector - colmajor product
++template<typename ProductType, int LhsOrder, int LhsAccess, int RhsCols>
++struct ei_cache_friendly_product_selector<ProductType,1,LhsOrder,LhsAccess,RhsCols,ColMajor,HasDirectAccess>
++{
++  typedef typename ProductType::Scalar Scalar;
++  typedef typename ei_traits<ProductType>::_LhsNested Lhs;
++  enum {
++      UseLhsDirectly = ((ei_packet_traits<Scalar>::size==1) || (Lhs::Flags&ActualPacketAccessBit))
++                     && (Lhs::Flags & RowMajorBit) };
++
++  template<typename DestDerived>
++  inline static void run(DestDerived& res, const ProductType& product)
++  {
++    Scalar* EIGEN_RESTRICT _lhs;
++    if (UseLhsDirectly)
++       _lhs = &product.lhs().const_cast_derived().coeffRef(0);
++    else
++    {
++      _lhs = ei_alloc_stack(Scalar, product.lhs().size());
++      Map<Matrix<Scalar,Lhs::SizeAtCompileTime,1> >(_lhs, product.lhs().size()) = product.lhs();
++    }
++    ei_cache_friendly_product_rowmajor_times_vector(&product.rhs().const_cast_derived().coeffRef(0,0), product.rhs().stride(),
++                                                    _lhs, product.lhs().size(), res);
++
++    if(!UseLhsDirectly) ei_free_stack(_lhs, Scalar, product.lhs().size());
++  }
++};
++
++// discard this case which has to be handled by the default path
++// (we keep it to be sure to hit a compilation error if this is not the case)
++template<typename ProductType, int LhsRows, int RhsOrder, int RhsAccess>
++struct ei_cache_friendly_product_selector<ProductType,LhsRows,RowMajor,NoDirectAccess,1,RhsOrder,RhsAccess>
++{};
++
++// discard this case which has to be handled by the default path
++// (we keep it to be sure to hit a compilation error if this is not the case)
++template<typename ProductType, int LhsOrder, int LhsAccess, int RhsCols>
++struct ei_cache_friendly_product_selector<ProductType,1,LhsOrder,LhsAccess,RhsCols,ColMajor,NoDirectAccess>
++{};
++
++
++/** \internal */
++template<typename Derived>
++template<typename Lhs,typename Rhs>
++inline Derived&
++MatrixBase<Derived>::operator+=(const Flagged<Product<Lhs,Rhs,CacheFriendlyProduct>, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other)
++{
++  if (other._expression()._useCacheFriendlyProduct())
++    ei_cache_friendly_product_selector<Product<Lhs,Rhs,CacheFriendlyProduct> >::run(const_cast_derived(), other._expression());
++  else
++    lazyAssign(derived() + other._expression());
++  return derived();
++}
++
++template<typename Derived>
++template<typename Lhs, typename Rhs>
++inline Derived& MatrixBase<Derived>::lazyAssign(const Product<Lhs,Rhs,CacheFriendlyProduct>& product)
++{
++  if (product._useCacheFriendlyProduct())
++  {
++    setZero();
++    ei_cache_friendly_product_selector<Product<Lhs,Rhs,CacheFriendlyProduct> >::run(const_cast_derived(), product);
++  }
++  else
++  {
++    lazyAssign<Product<Lhs,Rhs,CacheFriendlyProduct> >(product);
++  }
++  return derived();
++}
++
++template<typename T> class ei_product_eval_to_column_major
++{
++    typedef typename ei_traits<T>::Scalar _Scalar;
++    enum {
++          _Rows = ei_traits<T>::RowsAtCompileTime,
++          _Cols = ei_traits<T>::ColsAtCompileTime,
++          _MaxRows = ei_traits<T>::MaxRowsAtCompileTime,
++          _MaxCols = ei_traits<T>::MaxColsAtCompileTime,
++          _Flags = ei_traits<T>::Flags
++    };
++
++  public:
++    typedef Matrix<_Scalar,
++                  _Rows, _Cols, _MaxRows, _MaxCols,
++                  ei_corrected_matrix_flags<
++                      _Scalar,
++                      _Rows, _Cols, _MaxRows, _MaxCols,
++                      _Flags
++                  >::ret & ~RowMajorBit
++            > type;
++};
++
++template<typename T> struct ei_product_copy_rhs
++{
++  typedef typename ei_meta_if<
++         (ei_traits<T>::Flags & RowMajorBit)
++      || (!(ei_traits<T>::Flags & DirectAccessBit)),
++      typename ei_product_eval_to_column_major<T>::type,
++      const T&
++    >::ret type;
++};
++
++template<typename T> struct ei_product_copy_lhs
++{
++  typedef typename ei_meta_if<
++      (!(int(ei_traits<T>::Flags) & DirectAccessBit)),
++      typename ei_eval<T>::type,
++      const T&
++    >::ret type;
++};
++
++template<typename Lhs, typename Rhs, int ProductMode>
++template<typename DestDerived>
++inline void Product<Lhs,Rhs,ProductMode>::_cacheFriendlyEvalAndAdd(DestDerived& res) const
++{
++  typedef typename ei_product_copy_lhs<_LhsNested>::type LhsCopy;
++  typedef typename ei_unref<LhsCopy>::type _LhsCopy;
++  typedef typename ei_product_copy_rhs<_RhsNested>::type RhsCopy;
++  typedef typename ei_unref<RhsCopy>::type _RhsCopy;
++  LhsCopy lhs(m_lhs);
++  RhsCopy rhs(m_rhs);
++  ei_cache_friendly_product<Scalar>(
++    rows(), cols(), lhs.cols(),
++    _LhsCopy::Flags&RowMajorBit, (const Scalar*)&(lhs.const_cast_derived().coeffRef(0,0)), lhs.stride(),
++    _RhsCopy::Flags&RowMajorBit, (const Scalar*)&(rhs.const_cast_derived().coeffRef(0,0)), rhs.stride(),
++    Flags&RowMajorBit, (Scalar*)&(res.coeffRef(0,0)), res.stride()
++  );
++}
++
++#endif // EIGEN_PRODUCT_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Redux.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Redux.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Redux.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Redux.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,117 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_REDUX_H
++#define EIGEN_REDUX_H
++
++template<typename BinaryOp, typename Derived, int Start, int Length>
++struct ei_redux_impl
++{
++  enum {
++    HalfLength = Length/2
++  };
++
++  typedef typename ei_result_of<BinaryOp(typename Derived::Scalar)>::type Scalar;
++
++  static Scalar run(const Derived &mat, const BinaryOp& func)
++  {
++    return func(
++      ei_redux_impl<BinaryOp, Derived, Start, HalfLength>::run(mat, func),
++      ei_redux_impl<BinaryOp, Derived, Start+HalfLength, Length - HalfLength>::run(mat, func));
++  }
++};
++
++template<typename BinaryOp, typename Derived, int Start>
++struct ei_redux_impl<BinaryOp, Derived, Start, 1>
++{
++  enum {
++    col = Start / Derived::RowsAtCompileTime,
++    row = Start % Derived::RowsAtCompileTime
++  };
++
++  typedef typename ei_result_of<BinaryOp(typename Derived::Scalar)>::type Scalar;
++
++  static Scalar run(const Derived &mat, const BinaryOp &)
++  {
++    return mat.coeff(row, col);
++  }
++};
++
++template<typename BinaryOp, typename Derived, int Start>
++struct ei_redux_impl<BinaryOp, Derived, Start, Dynamic>
++{
++  typedef typename ei_result_of<BinaryOp(typename Derived::Scalar)>::type Scalar;
++  static Scalar run(const Derived& mat, const BinaryOp& func)
++  {
++    Scalar res;
++    res = mat.coeff(0,0);
++    for(int i = 1; i < mat.rows(); i++)
++      res = func(res, mat.coeff(i, 0));
++    for(int j = 1; j < mat.cols(); j++)
++      for(int i = 0; i < mat.rows(); i++)
++        res = func(res, mat.coeff(i, j));
++    return res;
++  }
++};
++
++/** \returns the result of a full redux operation on the whole matrix or vector using \a func
++  *
++  * The template parameter \a BinaryOp is the type of the functor \a func which must be
++  * an assiociative operator. Both current STL and TR1 functor styles are handled.
++  *
++  * \sa MatrixBase::sum(), MatrixBase::minCoeff(), MatrixBase::maxCoeff(), MatrixBase::colwise(), MatrixBase::rowwise()
++  */
++template<typename Derived>
++template<typename BinaryOp>
++typename ei_result_of<BinaryOp(typename ei_traits<Derived>::Scalar)>::type
++MatrixBase<Derived>::redux(const BinaryOp& func) const
++{
++  const bool unroll = SizeAtCompileTime * CoeffReadCost
++                    + (SizeAtCompileTime-1) * ei_functor_traits<BinaryOp>::Cost
++                    <= EIGEN_UNROLLING_LIMIT;
++  return ei_redux_impl<BinaryOp, Derived, 0,
++                            unroll ? int(SizeAtCompileTime) : Dynamic>
++          ::run(derived(), func);
++}
++
++/** \returns the minimum of all coefficients of *this
++  */
++template<typename Derived>
++inline typename ei_traits<Derived>::Scalar
++MatrixBase<Derived>::minCoeff() const
++{
++  return this->redux(Eigen::ei_scalar_min_op<Scalar>());
++}
++
++/** \returns the maximum of all coefficients of *this
++  */
++template<typename Derived>
++inline typename ei_traits<Derived>::Scalar
++MatrixBase<Derived>::maxCoeff() const
++{
++  return this->redux(Eigen::ei_scalar_max_op<Scalar>());
++}
++
++#endif // EIGEN_REDUX_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/SolveTriangular.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/SolveTriangular.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/SolveTriangular.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/SolveTriangular.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,272 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_SOLVETRIANGULAR_H
++#define EIGEN_SOLVETRIANGULAR_H
++
++template<typename XprType> struct ei_is_part { enum {value=false}; };
++template<typename XprType, unsigned int Mode> struct ei_is_part<Part<XprType,Mode> > { enum {value=true}; };
++
++template<typename Lhs, typename Rhs,
++  int TriangularPart = (int(Lhs::Flags) & LowerTriangularBit)
++                     ? Lower
++                     : (int(Lhs::Flags) & UpperTriangularBit)
++                     ? Upper
++                     : -1,
++  int StorageOrder = ei_is_part<Lhs>::value ? -1  // this is to solve ambiguous specializations
++                   : int(Lhs::Flags) & RowMajorBit ? RowMajor : ColMajor
++  >
++struct ei_solve_triangular_selector;
++
++// transform a Part xpr to a Flagged xpr
++template<typename Lhs, unsigned int LhsMode, typename Rhs, int UpLo, int StorageOrder>
++struct ei_solve_triangular_selector<Part<Lhs,LhsMode>,Rhs,UpLo,StorageOrder>
++{
++  static void run(const Part<Lhs,LhsMode>& lhs, Rhs& other)
++  {
++    ei_solve_triangular_selector<Flagged<Lhs,LhsMode,0>,Rhs>::run(lhs._expression(), other);
++  }
++};
++
++// forward substitution, row-major
++template<typename Lhs, typename Rhs, int UpLo>
++struct ei_solve_triangular_selector<Lhs,Rhs,UpLo,RowMajor>
++{
++  typedef typename Rhs::Scalar Scalar;
++  static void run(const Lhs& lhs, Rhs& other)
++  {
++    const bool IsLower = (UpLo==Lower);
++    const int size = lhs.cols();
++    /* We perform the inverse product per block of 4 rows such that we perfectly match
++     * our optimized matrix * vector product. blockyStart represents the number of rows
++     * we have process first using the non-block version.
++     */
++    int blockyStart = (std::max(size-5,0)/4)*4;
++    if (IsLower)
++      blockyStart = size - blockyStart;
++    else
++      blockyStart -= 1;
++    for(int c=0 ; c<other.cols() ; ++c)
++    {
++      // process first rows using the non block version
++      if(!(Lhs::Flags & UnitDiagBit))
++      {
++        if (IsLower)
++          other.coeffRef(0,c) = other.coeff(0,c)/lhs.coeff(0, 0);
++        else
++          other.coeffRef(size-1,c) = other.coeff(size-1, c)/lhs.coeff(size-1, size-1);
++      }
++      for(int i=(IsLower ? 1 : size-2); IsLower ? i<blockyStart : i>blockyStart; i += (IsLower ? 1 : -1) )
++      {
++        Scalar tmp = other.coeff(i,c)
++          - (IsLower ? ((lhs.row(i).start(i)) * other.col(c).start(i)).coeff(0,0)
++                     : ((lhs.row(i).end(size-i-1)) * other.col(c).end(size-i-1)).coeff(0,0));
++        if (Lhs::Flags & UnitDiagBit)
++          other.coeffRef(i,c) = tmp;
++        else
++          other.coeffRef(i,c) = tmp/lhs.coeff(i,i);
++      }
++
++      // now let process the remaining rows 4 at once
++      for(int i=blockyStart; IsLower ? i<size : i>0; )
++      {
++        int startBlock = i;
++        int endBlock = startBlock + (IsLower ? 4 : -4);
++        
++        /* Process the i cols times 4 rows block, and keep the result in a temporary vector */
++        Matrix<Scalar,4,1> btmp;
++        if (IsLower)
++          btmp = lhs.block(startBlock,0,4,i) * other.col(c).start(i);
++        else
++          btmp = lhs.block(i-3,i+1,4,size-1-i) * other.col(c).end(size-1-i);
++        
++        /* Let's process the 4x4 sub-matrix as usual.
++         * btmp stores the diagonal coefficients used to update the remaining part of the result.
++         */
++        {
++          Scalar tmp = other.coeff(startBlock,c)-btmp.coeff(IsLower?0:3);
++          if (Lhs::Flags & UnitDiagBit)
++            other.coeffRef(i,c) = tmp;
++          else
++            other.coeffRef(i,c) = tmp/lhs.coeff(i,i);
++        }
++
++        i += IsLower ? 1 : -1;
++        for (;IsLower ? i<endBlock : i>endBlock; i += IsLower ? 1 : -1)
++        {
++          int remainingSize = IsLower ? i-startBlock : startBlock-i;
++          Scalar tmp = other.coeff(i,c)
++            - btmp.coeff(IsLower ? remainingSize : 3-remainingSize)
++            - (   lhs.row(i).block(IsLower ? startBlock : i+1, remainingSize)
++              * other.col(c).block(IsLower ? startBlock : i+1, remainingSize)).coeff(0,0);
++
++          if (Lhs::Flags & UnitDiagBit)
++            other.coeffRef(i,c) = tmp;
++          else
++            other.coeffRef(i,c) = tmp/lhs.coeff(i,i);
++        }
++      }
++    }
++  }
++};
++
++// Implements the following configurations:
++//  - inv(Lower,         ColMajor) * Column vector
++//  - inv(Lower,UnitDiag,ColMajor) * Column vector
++//  - inv(Upper,         ColMajor) * Column vector
++//  - inv(Upper,UnitDiag,ColMajor) * Column vector
++template<typename Lhs, typename Rhs, int UpLo>
++struct ei_solve_triangular_selector<Lhs,Rhs,UpLo,ColMajor>
++{
++  typedef typename Rhs::Scalar Scalar;
++  typedef typename ei_packet_traits<Scalar>::type Packet;
++  enum { PacketSize =  ei_packet_traits<Scalar>::size };
++
++  static void run(const Lhs& lhs, Rhs& other)
++  {
++    static const bool IsLower = (UpLo==Lower);
++    const int size = lhs.cols();
++    for(int c=0 ; c<other.cols() ; ++c)
++    {
++      /* let's perform the inverse product per block of 4 columns such that we perfectly match
++       * our optimized matrix * vector product. blockyEnd represents the number of rows
++       * we can process using the block version.
++       */
++      int blockyEnd = (std::max(size-5,0)/4)*4;
++      if (!IsLower)
++        blockyEnd = size-1 - blockyEnd;
++      for(int i=IsLower ? 0 : size-1; IsLower ? i<blockyEnd : i>blockyEnd;)
++      {
++        /* Let's process the 4x4 sub-matrix as usual.
++         * btmp stores the diagonal coefficients used to update the remaining part of the result.
++         */
++        int startBlock = i;
++        int endBlock = startBlock + (IsLower ? 4 : -4);
++        Matrix<Scalar,4,1> btmp;
++        for (;IsLower ? i<endBlock : i>endBlock;
++             i += IsLower ? 1 : -1)
++        {
++          if(!(Lhs::Flags & UnitDiagBit))
++            other.coeffRef(i,c) /= lhs.coeff(i,i);
++          int remainingSize = IsLower ? endBlock-i-1 : i-endBlock-1;
++          if (remainingSize>0)
++            other.col(c).block((IsLower ? i : endBlock) + 1, remainingSize) -=
++                other.coeffRef(i,c)
++              * Block<Lhs,Dynamic,1>(lhs, (IsLower ? i : endBlock) + 1, i, remainingSize, 1);
++          btmp.coeffRef(IsLower ? i-startBlock : remainingSize) = -other.coeffRef(i,c);
++        }
++
++        /* Now we can efficiently update the remaining part of the result as a matrix * vector product.
++         * NOTE in order to reduce both compilation time and binary size, let's directly call
++         * the fast product implementation. It is equivalent to the following code:
++         *   other.col(c).end(size-endBlock) += (lhs.block(endBlock, startBlock, size-endBlock, endBlock-startBlock)
++         *                                       * other.col(c).block(startBlock,endBlock-startBlock)).lazy();
++         */
++        // FIXME this is cool but what about conjugate/adjoint expressions ? do we want to evaluate them ?
++        // this is a more general problem though.
++        ei_cache_friendly_product_colmajor_times_vector(
++          IsLower ? size-endBlock : endBlock+1,
++          &(lhs.const_cast_derived().coeffRef(IsLower ? endBlock : 0, IsLower ? startBlock : endBlock+1)),
++          lhs.stride(),
++          btmp, &(other.coeffRef(IsLower ? endBlock : 0, c)));
++      }
++
++      /* Now we have to process the remaining part as usual */
++      int i;
++      for(i=blockyEnd; IsLower ? i<size-1 : i>0; i += (IsLower ? 1 : -1) )
++      {
++        if(!(Lhs::Flags & UnitDiagBit))
++          other.coeffRef(i,c) /= lhs.coeff(i,i);
++
++        /* NOTE we cannot use lhs.col(i).end(size-i-1) because Part::coeffRef gets called by .col() to
++         * get the address of the start of the row
++         */
++        if(IsLower)
++          other.col(c).end(size-i-1) -= other.coeffRef(i,c) * Block<Lhs,Dynamic,1>(lhs, i+1,i, size-i-1,1);
++        else
++          other.col(c).start(i) -= other.coeffRef(i,c) * Block<Lhs,Dynamic,1>(lhs, 0,i, i, 1);
++      }
++      if(!(Lhs::Flags & UnitDiagBit))
++        other.coeffRef(i,c) /= lhs.coeff(i,i);
++    }
++  }
++};
++
++/** "in-place" version of MatrixBase::solveTriangular() where the result is written in \a other
++  *
++  * See MatrixBase:solveTriangular() for the details.
++  */
++template<typename Derived>
++template<typename OtherDerived>
++void MatrixBase<Derived>::solveTriangularInPlace(MatrixBase<OtherDerived>& other) const
++{
++  ei_assert(derived().cols() == derived().rows());
++  ei_assert(derived().cols() == other.rows());
++  ei_assert(!(Flags & ZeroDiagBit));
++  ei_assert(Flags & (UpperTriangularBit|LowerTriangularBit));
++
++  ei_solve_triangular_selector<Derived, OtherDerived>::run(derived(), other.derived());
++}
++
++/** \returns the product of the inverse of \c *this with \a other, \a *this being triangular.
++  *
++  * This function computes the inverse-matrix matrix product inverse(\c *this) * \a other.
++  * The matrix \c *this must be triangular and invertible (i.e., all the coefficients of the
++  * diagonal must be non zero). It works as a forward (resp. backward) substitution if \c *this
++  * is an upper (resp. lower) triangular matrix.
++  *
++  * It is required that \c *this be marked as either an upper or a lower triangular matrix, which
++  * can be done by marked(), and that is automatically the case with expressions such as those returned
++  * by extract().
++  * 
++  * \addexample SolveTriangular \label How to solve a triangular system (aka. how to multiply the inverse of a triangular matrix by another one)
++  * 
++  * Example: \include MatrixBase_marked.cpp
++  * Output: \verbinclude MatrixBase_marked.out
++  * 
++  * This function is essentially a wrapper to the faster solveTriangularInPlace() function creating
++  * a temporary copy of \a other, calling solveTriangularInPlace() on the copy and returning it.
++  * Therefore, if \a other is not needed anymore, it is quite faster to call solveTriangularInPlace()
++  * instead of solveTriangular().
++  * 
++  * For users comming from BLAS, this function (and more specifically solveTriangularInPlace()) offer
++  * all the operations supported by the \c *TRSV and \c *TRSM BLAS routines.
++  *
++  * \b Tips: to perform a \em "right-inverse-multiply" you can simply transpose the operation, e.g.:
++  * \code
++  * M * T^1  <=>  T.transpose().solveTriangularInPlace(M.transpose());
++  * \endcode
++  * 
++  * \sa solveTriangularInPlace(), marked(), extract()
++  */
++template<typename Derived>
++template<typename OtherDerived>
++typename OtherDerived::Eval MatrixBase<Derived>::solveTriangular(const MatrixBase<OtherDerived>& other) const
++{
++  typename OtherDerived::Eval res(other);
++  solveTriangularInPlace(res);
++  return res;
++}
++
++#endif // EIGEN_SOLVETRIANGULAR_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Sum.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Sum.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Sum.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Sum.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,267 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_SUM_H
++#define EIGEN_SUM_H
++
++/***************************************************************************
++* Part 1 : the logic deciding a strategy for vectorization and unrolling
++***************************************************************************/
++
++template<typename Derived>
++struct ei_sum_traits
++{
++private:
++  enum {
++    PacketSize = ei_packet_traits<typename Derived::Scalar>::size
++  };
++
++public:
++  enum {
++    Vectorization = (int(Derived::Flags)&ActualPacketAccessBit)
++                 && (int(Derived::Flags)&LinearAccessBit)
++                 && (int(Derived::SizeAtCompileTime)>2*PacketSize)
++                  ? LinearVectorization
++                  : NoVectorization
++  };
++
++private:
++  enum {
++    Cost = Derived::SizeAtCompileTime * Derived::CoeffReadCost
++           + (Derived::SizeAtCompileTime-1) * NumTraits<typename Derived::Scalar>::AddCost,
++    UnrollingLimit = EIGEN_UNROLLING_LIMIT * (int(Vectorization) == int(NoVectorization) ? 1 : int(PacketSize))
++  };
++
++public:
++  enum {
++    Unrolling = Cost <= UnrollingLimit
++              ? CompleteUnrolling
++              : NoUnrolling
++  };
++};
++
++/***************************************************************************
++* Part 2 : unrollers
++***************************************************************************/
++
++/*** no vectorization ***/
++
++template<typename Derived, int Start, int Length>
++struct ei_sum_novec_unroller
++{
++  enum {
++    HalfLength = Length/2
++  };
++
++  typedef typename Derived::Scalar Scalar;
++
++  inline static Scalar run(const Derived &mat)
++  {
++    return ei_sum_novec_unroller<Derived, Start, HalfLength>::run(mat)
++         + ei_sum_novec_unroller<Derived, Start+HalfLength, Length-HalfLength>::run(mat);
++  }
++};
++
++template<typename Derived, int Start>
++struct ei_sum_novec_unroller<Derived, Start, 1>
++{
++  enum {
++    col = Start / Derived::RowsAtCompileTime,
++    row = Start % Derived::RowsAtCompileTime
++  };
++
++  typedef typename Derived::Scalar Scalar;
++
++  inline static Scalar run(const Derived &mat)
++  {
++    return mat.coeff(row, col);
++  }
++};
++
++/*** vectorization ***/
++
++template<typename Derived, int Index, int Stop,
++         bool LastPacket = (Stop-Index == ei_packet_traits<typename Derived::Scalar>::size)>
++struct ei_sum_vec_unroller
++{
++  enum {
++    row = int(Derived::Flags)&RowMajorBit
++        ? Index / int(Derived::ColsAtCompileTime)
++        : Index % Derived::RowsAtCompileTime,
++    col = int(Derived::Flags)&RowMajorBit
++        ? Index % int(Derived::ColsAtCompileTime)
++        : Index / Derived::RowsAtCompileTime
++  };
++
++  typedef typename Derived::Scalar Scalar;
++  typedef typename ei_packet_traits<Scalar>::type PacketScalar;
++
++  inline static PacketScalar run(const Derived &mat)
++  {
++    return ei_padd(
++      mat.template packet<Aligned>(row, col),
++      ei_sum_vec_unroller<Derived, Index+ei_packet_traits<typename Derived::Scalar>::size, Stop>::run(mat)
++    );
++  }
++};
++
++template<typename Derived, int Index, int Stop>
++struct ei_sum_vec_unroller<Derived, Index, Stop, true>
++{
++  enum {
++    row = int(Derived::Flags)&RowMajorBit
++        ? Index / int(Derived::ColsAtCompileTime)
++        : Index % Derived::RowsAtCompileTime,
++    col = int(Derived::Flags)&RowMajorBit
++        ? Index % int(Derived::ColsAtCompileTime)
++        : Index / Derived::RowsAtCompileTime,
++    alignment = (Derived::Flags & AlignedBit) ? Aligned : Unaligned
++  };
++
++  typedef typename Derived::Scalar Scalar;
++  typedef typename ei_packet_traits<Scalar>::type PacketScalar;
++
++  inline static PacketScalar run(const Derived &mat)
++  {
++    return mat.template packet<alignment>(row, col);
++  }
++};
++
++/***************************************************************************
++* Part 3 : implementation of all cases
++***************************************************************************/
++
++template<typename Derived,
++         int Vectorization = ei_sum_traits<Derived>::Vectorization,
++         int Unrolling = ei_sum_traits<Derived>::Unrolling
++>
++struct ei_sum_impl;
++
++template<typename Derived>
++struct ei_sum_impl<Derived, NoVectorization, NoUnrolling>
++{
++  typedef typename Derived::Scalar Scalar;
++  static Scalar run(const Derived& mat)
++  {
++    Scalar res;
++    res = mat.coeff(0, 0);
++    for(int i = 1; i < mat.rows(); i++)
++      res += mat.coeff(i, 0);
++    for(int j = 1; j < mat.cols(); j++)
++      for(int i = 0; i < mat.rows(); i++)
++        res += mat.coeff(i, j);
++    return res;
++  }
++};
++
++template<typename Derived>
++struct ei_sum_impl<Derived, NoVectorization, CompleteUnrolling>
++  : public ei_sum_novec_unroller<Derived, 0, Derived::SizeAtCompileTime>
++{};
++
++template<typename Derived>
++struct ei_sum_impl<Derived, LinearVectorization, NoUnrolling>
++{
++  typedef typename Derived::Scalar Scalar;
++  typedef typename ei_packet_traits<Scalar>::type PacketScalar;
++
++  static Scalar run(const Derived& mat)
++  {
++    const int size = mat.size();
++    const int packetSize = ei_packet_traits<Scalar>::size;
++    const int alignedStart =  (Derived::Flags & AlignedBit)
++                           || !(Derived::Flags & DirectAccessBit)
++                           ? 0
++                           : ei_alignmentOffset(&mat.const_cast_derived().coeffRef(0), size);
++    const int alignment = (Derived::Flags & DirectAccessBit) || (Derived::Flags & AlignedBit)
++                        ? Aligned : Unaligned;
++    const int alignedSize = ((size-alignedStart)/packetSize)*packetSize;
++    const int alignedEnd = alignedStart + alignedSize;
++    Scalar res;
++
++    if(alignedSize)
++    {
++      PacketScalar packet_res = mat.template packet<alignment>(alignedStart);
++      for(int index = alignedStart + packetSize; index < alignedEnd; index += packetSize)
++        packet_res = ei_padd(packet_res, mat.template packet<alignment>(index));
++      res = ei_predux(packet_res);
++    }
++    else // too small to vectorize anything.
++         // since this is dynamic-size hence inefficient anyway for such small sizes, don't try to optimize.
++    {
++      res = Scalar(0);
++    }
++
++    for(int index = 0; index < alignedStart; index++)
++      res += mat.coeff(index);
++
++    for(int index = alignedEnd; index < size; index++)
++      res += mat.coeff(index);
++
++    return res;
++  }
++};
++
++template<typename Derived>
++struct ei_sum_impl<Derived, LinearVectorization, CompleteUnrolling>
++{
++  typedef typename Derived::Scalar Scalar;
++  static Scalar run(const Derived& mat)
++  {
++    return ei_predux(
++      ei_sum_vec_unroller<Derived, 0, Derived::SizeAtCompileTime>::run(mat)
++    );
++  }
++};
++
++/***************************************************************************
++* Part 4 : implementation of MatrixBase methods
++***************************************************************************/
++
++/** \returns the sum of all coefficients of *this
++  *
++  * \sa trace()
++  */
++template<typename Derived>
++inline typename ei_traits<Derived>::Scalar
++MatrixBase<Derived>::sum() const
++{
++  return ei_sum_impl<Derived>::run(derived());
++}
++
++/** \returns the trace of \c *this, i.e. the sum of the coefficients on the main diagonal.
++  *
++  * \c *this can be any matrix, not necessarily square.
++  *
++  * \sa diagonal(), sum()
++  */
++template<typename Derived>
++inline typename ei_traits<Derived>::Scalar
++MatrixBase<Derived>::trace() const
++{
++  return diagonal().sum();
++}
++
++#endif // EIGEN_SUM_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Swap.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Swap.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Swap.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Swap.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,142 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_SWAP_H
++#define EIGEN_SWAP_H
++
++/** \class SwapWrapper
++  *
++  * \internal
++  *
++  * \brief Internal helper class for swapping two expressions
++  */
++template<typename ExpressionType>
++struct ei_traits<SwapWrapper<ExpressionType> >
++{
++  typedef typename ExpressionType::Scalar Scalar;
++  enum {
++    RowsAtCompileTime = ExpressionType::RowsAtCompileTime,
++    ColsAtCompileTime = ExpressionType::ColsAtCompileTime,
++    MaxRowsAtCompileTime = ExpressionType::MaxRowsAtCompileTime,
++    MaxColsAtCompileTime = ExpressionType::MaxColsAtCompileTime,
++    Flags = ExpressionType::Flags,
++    CoeffReadCost = ExpressionType::CoeffReadCost
++  };
++};
++
++template<typename ExpressionType> class SwapWrapper
++  : public MatrixBase<SwapWrapper<ExpressionType> >
++{
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(SwapWrapper)
++    typedef typename ei_packet_traits<Scalar>::type Packet;
++
++    inline SwapWrapper(ExpressionType& xpr) : m_expression(xpr) {}
++
++    inline int rows() const { return m_expression.rows(); }
++    inline int cols() const { return m_expression.cols(); }
++    inline int stride() const { return m_expression.stride(); }
++
++    inline Scalar& coeffRef(int row, int col)
++    {
++      return m_expression.const_cast_derived().coeffRef(row, col);
++    }
++
++    inline Scalar& coeffRef(int index)
++    {
++      return m_expression.const_cast_derived().coeffRef(index);
++    }
++
++    template<typename OtherDerived>
++    void copyCoeff(int row, int col, const MatrixBase<OtherDerived>& other)
++    {
++      OtherDerived& _other = other.const_cast_derived();
++      ei_internal_assert(row >= 0 && row < rows()
++                         && col >= 0 && col < cols());
++      Scalar tmp = m_expression.coeff(row, col);
++      m_expression.coeffRef(row, col) = _other.coeff(row, col);
++      _other.coeffRef(row, col) = tmp;
++    }
++
++    template<typename OtherDerived>
++    void copyCoeff(int index, const MatrixBase<OtherDerived>& other)
++    {
++      OtherDerived& _other = other.const_cast_derived();
++      ei_internal_assert(index >= 0 && index < m_expression.size());
++      Scalar tmp = m_expression.coeff(index);
++      m_expression.coeffRef(index) = _other.coeff(index);
++      _other.coeffRef(index) = tmp;
++    }
++
++    template<typename OtherDerived, int StoreMode, int LoadMode>
++    void copyPacket(int row, int col, const MatrixBase<OtherDerived>& other)
++    {
++      OtherDerived& _other = other.const_cast_derived();
++      ei_internal_assert(row >= 0 && row < rows()
++                        && col >= 0 && col < cols());
++      Packet tmp = m_expression.template packet<StoreMode>(row, col);
++      m_expression.template writePacket<StoreMode>(row, col,
++        _other.template packet<LoadMode>(row, col)
++      );
++      _other.template writePacket<LoadMode>(row, col, tmp);
++    }
++
++    template<typename OtherDerived, int StoreMode, int LoadMode>
++    void copyPacket(int index, const MatrixBase<OtherDerived>& other)
++    {
++      OtherDerived& _other = other.const_cast_derived();
++      ei_internal_assert(index >= 0 && index < m_expression.size());
++      Packet tmp = m_expression.template packet<StoreMode>(index);
++      m_expression.template writePacket<StoreMode>(index,
++        _other.template packet<LoadMode>(index)
++      );
++      _other.template writePacket<LoadMode>(index, tmp);
++    }
++
++  protected:
++    ExpressionType& m_expression;
++};
++
++/** swaps *this with the expression \a other.
++  *
++  * \note \a other is only marked for internal reasons, but of course
++  * it gets const-casted. One reason is that one will often call swap
++  * on temporary objects (hence non-const references are forbidden).
++  * Another reason is that lazyAssign takes a const argument anyway.
++  */
++template<typename Derived>
++template<typename OtherDerived>
++void MatrixBase<Derived>::swap(const MatrixBase<OtherDerived>& other)
++{
++  SwapWrapper<Derived>(derived()).lazyAssign(other);
++}
++
++#endif // EIGEN_SWAP_H
++
++
++
++
++
++
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Transpose.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Transpose.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Transpose.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Transpose.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,159 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_TRANSPOSE_H
++#define EIGEN_TRANSPOSE_H
++
++/** \class Transpose
++  *
++  * \brief Expression of the transpose of a matrix
++  *
++  * \param MatrixType the type of the object of which we are taking the transpose
++  *
++  * This class represents an expression of the transpose of a matrix.
++  * It is the return type of MatrixBase::transpose() and MatrixBase::adjoint()
++  * and most of the time this is the only way it is used.
++  *
++  * \sa MatrixBase::transpose(), MatrixBase::adjoint()
++  */
++template<typename MatrixType>
++struct ei_traits<Transpose<MatrixType> >
++{
++  typedef typename MatrixType::Scalar Scalar;
++  typedef typename ei_nested<MatrixType>::type MatrixTypeNested;
++  typedef typename ei_unref<MatrixTypeNested>::type _MatrixTypeNested;
++  enum {
++    RowsAtCompileTime = MatrixType::ColsAtCompileTime,
++    ColsAtCompileTime = MatrixType::RowsAtCompileTime,
++    MaxRowsAtCompileTime = MatrixType::MaxColsAtCompileTime,
++    MaxColsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
++    Flags = ((int(_MatrixTypeNested::Flags) ^ RowMajorBit)
++          & ~(LowerTriangularBit | UpperTriangularBit))
++          | (int(_MatrixTypeNested::Flags)&UpperTriangularBit ? LowerTriangularBit : 0)
++          | (int(_MatrixTypeNested::Flags)&LowerTriangularBit ? UpperTriangularBit : 0),
++    CoeffReadCost = _MatrixTypeNested::CoeffReadCost
++  };
++};
++
++template<typename MatrixType> class Transpose
++  : public MatrixBase<Transpose<MatrixType> >
++{
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(Transpose)
++
++    class InnerIterator;
++
++    inline Transpose(const MatrixType& matrix) : m_matrix(matrix) {}
++
++    EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Transpose)
++
++    inline int rows() const { return m_matrix.cols(); }
++    inline int cols() const { return m_matrix.rows(); }
++    inline int nonZeros() const { return m_matrix.nonZeros(); }
++    inline int stride(void) const { return m_matrix.stride(); }
++
++    inline Scalar& coeffRef(int row, int col)
++    {
++      return m_matrix.const_cast_derived().coeffRef(col, row);
++    }
++
++    inline const Scalar coeff(int row, int col) const
++    {
++      return m_matrix.coeff(col, row);
++    }
++
++    inline const Scalar coeff(int index) const
++    {
++      return m_matrix.coeff(index);
++    }
++
++    inline Scalar& coeffRef(int index)
++    {
++      return m_matrix.const_cast_derived().coeffRef(index);
++    }
++
++    template<int LoadMode>
++    inline const PacketScalar packet(int row, int col) const
++    {
++      return m_matrix.template packet<LoadMode>(col, row);
++    }
++
++    template<int LoadMode>
++    inline void writePacket(int row, int col, const PacketScalar& x)
++    {
++      m_matrix.const_cast_derived().template writePacket<LoadMode>(col, row, x);
++    }
++
++    template<int LoadMode>
++    inline const PacketScalar packet(int index) const
++    {
++      return m_matrix.template packet<LoadMode>(index);
++    }
++
++    template<int LoadMode>
++    inline void writePacket(int index, const PacketScalar& x)
++    {
++      m_matrix.const_cast_derived().template writePacket<LoadMode>(index, x);
++    }
++
++  protected:
++    const typename MatrixType::Nested m_matrix;
++};
++
++/** \returns an expression of the transpose of *this.
++  *
++  * Example: \include MatrixBase_transpose.cpp
++  * Output: \verbinclude MatrixBase_transpose.out
++  *
++  * \sa adjoint(), class DiagonalCoeffs */
++template<typename Derived>
++inline Transpose<Derived>
++MatrixBase<Derived>::transpose()
++{
++  return derived();
++}
++
++/** This is the const version of transpose(). \sa adjoint() */
++template<typename Derived>
++inline const Transpose<Derived>
++MatrixBase<Derived>::transpose() const
++{
++  return derived();
++}
++
++/** \returns an expression of the adjoint (i.e. conjugate transpose) of *this.
++  *
++  * Example: \include MatrixBase_adjoint.cpp
++  * Output: \verbinclude MatrixBase_adjoint.out
++  *
++  * \sa transpose(), conjugate(), class Transpose, class ei_scalar_conjugate_op */
++template<typename Derived>
++inline const typename MatrixBase<Derived>::AdjointReturnType
++MatrixBase<Derived>::adjoint() const
++{
++  return conjugate().nestByValue();
++}
++
++#endif // EIGEN_TRANSPOSE_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/util/Constants.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/util/Constants.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/util/Constants.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/util/Constants.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,229 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_CONSTANTS_H
++#define EIGEN_CONSTANTS_H
++
++const int Dynamic = 10000;
++
++/** \defgroup flags
++  *
++  * These are the possible bits which can be OR'ed to constitute the flags of a matrix or
++  * expression.
++  *
++  * \sa MatrixBase::Flags
++  */
++
++/** \ingroup flags
++  *
++  * for a matrix, this means that the storage order is row-major.
++  * If this bit is not set, the storage order is column-major.
++  * For an expression, this determines the storage order of
++  * the matrix created by evaluation of that expression. */
++const unsigned int RowMajorBit = 0x1;
++
++/** \ingroup flags
++  *
++  * means the expression should be evaluated by the calling expression */
++const unsigned int EvalBeforeNestingBit = 0x2;
++
++/** \ingroup flags
++  *
++  * means the expression should be evaluated before any assignement */
++const unsigned int EvalBeforeAssigningBit = 0x4;
++
++/** \ingroup flags
++  *
++  * Short version: means the expression might be vectorized
++  *
++  * Long version: means that the coefficients can be handled by packets
++  * and start at a memory location whose alignment meets the requirements
++  * of the present CPU architecture for optimized packet access. In the fixed-size
++  * case, there is the additional condition that the total size of the coefficients
++  * array is a multiple of the packet size, so that it is possible to access all the
++  * coefficients by packets. In the dynamic-size case, there is no such condition
++  * on the total size, so it might not be possible to access the few last coeffs
++  * by packets.
++  *
++  * \note This bit can be set regardless of whether vectorization is actually enabled.
++  *       To check for actual vectorizability, see \a ActualPacketAccessBit.
++  */
++const unsigned int PacketAccessBit = 0x8;
++
++#ifdef EIGEN_VECTORIZE
++/** \ingroup flags
++  *
++  * If vectorization is enabled (EIGEN_VECTORIZE is defined) this constant
++  * is set to the value \a PacketAccessBit.
++  *
++  * If vectorization is not enabled (EIGEN_VECTORIZE is not defined) this constant
++  * is set to the value 0.
++  */
++const unsigned int ActualPacketAccessBit = PacketAccessBit;
++#else
++const unsigned int ActualPacketAccessBit = 0x0;
++#endif
++
++/** \ingroup flags
++  *
++  * Short version: means the expression can be seen as 1D vector.
++  *
++  * Long version: means that one can access the coefficients
++  * of this expression by coeff(int), and coeffRef(int) in the case of a lvalue expression. These
++  * index-based access methods are guaranteed
++  * to not have to do any runtime computation of a (row, col)-pair from the index, so that it
++  * is guaranteed that whenever it is available, index-based access is at least as fast as
++  * (row,col)-based access. Expressions for which that isn't possible don't have the LinearAccessBit.
++  *
++  * If both PacketAccessBit and LinearAccessBit are set, then the
++  * packets of this expression can be accessed by packet(int), and writePacket(int) in the case of a
++  * lvalue expression.
++  *
++  * Typically, all vector expressions have the LinearAccessBit, but there is one exception:
++  * Product expressions don't have it, because it would be troublesome for vectorization, even when the
++  * Product is a vector expression. Thus, vector Product expressions allow index-based coefficient access but
++  * not index-based packet access, so they don't have the LinearAccessBit.
++  */
++const unsigned int LinearAccessBit = 0x10;
++
++/** \ingroup flags
++  *
++  * Means that the underlying array of coefficients can be directly accessed. This means two things.
++  * First, references to the coefficients must be available through coeffRef(int, int). This rules out read-only
++  * expressions whose coefficients are computed on demand by coeff(int, int). Second, the memory layout of the
++  * array of coefficients must be exactly the natural one suggested by rows(), cols(), stride(), and the RowMajorBit.
++  * This rules out expressions such as DiagonalCoeffs, whose coefficients, though referencable, do not have
++  * such a regular memory layout.
++  */
++const unsigned int DirectAccessBit = 0x20;
++
++/** \ingroup flags
++  *
++  * means the first coefficient packet is guaranteed to be aligned */
++const unsigned int AlignedBit = 0x40;
++
++/** \ingroup flags
++  *
++  * means all diagonal coefficients are equal to 0 */
++const unsigned int ZeroDiagBit = 0x80;
++
++/** \ingroup flags
++  *
++  * means all diagonal coefficients are equal to 1 */
++const unsigned int UnitDiagBit = 0x100;
++
++/** \ingroup flags
++  *
++  * means the matrix is selfadjoint (M=M*). */
++const unsigned int SelfAdjointBit = 0x200;
++
++/** \ingroup flags
++  *
++  * means the strictly lower triangular part is 0 */
++const unsigned int UpperTriangularBit = 0x400;
++
++/** \ingroup flags
++  *
++  * means the strictly upper triangular part is 0 */
++const unsigned int LowerTriangularBit = 0x800;
++
++/** \ingroup flags
++  *
++  * means the expression includes sparse matrices and the sparse path has to be taken. */
++const unsigned int SparseBit = 0x1000;
++
++/** \ingroup flags
++  *
++  * currently unused. Means the matrix probably has a very big size.
++  * Could eventually be used as a hint to determine which algorithms
++  * to use. */
++const unsigned int LargeBit = 0x2000;
++
++// list of flags that are inherited by default
++const unsigned int HereditaryBits = RowMajorBit
++                                  | EvalBeforeNestingBit
++                                  | EvalBeforeAssigningBit
++                                  | LargeBit
++                                  | SparseBit;
++
++// Possible values for the Mode parameter of part() and of extract()
++const unsigned int Upper = UpperTriangularBit;
++const unsigned int StrictlyUpper = UpperTriangularBit | ZeroDiagBit;
++const unsigned int Lower = LowerTriangularBit;
++const unsigned int StrictlyLower = LowerTriangularBit | ZeroDiagBit;
++const unsigned int SelfAdjoint = SelfAdjointBit;
++
++// additional possible values for the Mode parameter of extract()
++const unsigned int UnitUpper = UpperTriangularBit | UnitDiagBit;
++const unsigned int UnitLower = LowerTriangularBit | UnitDiagBit;
++const unsigned int Diagonal = Upper | Lower;
++
++enum { Aligned, Unaligned };
++enum { ForceAligned, AsRequested };
++enum { ConditionalJumpCost = 5 };
++enum CornerType { TopLeft, TopRight, BottomLeft, BottomRight };
++enum DirectionType { Vertical, Horizontal };
++enum ProductEvaluationMode { NormalProduct, CacheFriendlyProduct, DiagonalProduct, SparseProduct };
++
++enum {
++  /** \internal Equivalent to a slice vectorization for fixed-size matrices having good alignement
++    * and good size */
++  InnerVectorization,
++  /** \internal Vectorization path using a single loop plus scalar loops for the
++    * unaligned boundaries */
++  LinearVectorization,
++  /** \internal Generic vectorization path using one vectorized loop per row/column with some
++    * scalar loops to handle the unaligned boundaries */
++  SliceVectorization,
++  NoVectorization
++};
++
++enum {
++  CompleteUnrolling,
++  InnerUnrolling,
++  NoUnrolling
++};
++
++enum {
++  Dense   = 0,
++  Sparse  = SparseBit
++};
++
++enum {
++  ColMajor = 0,
++  RowMajor = RowMajorBit
++};
++
++enum {
++  NoDirectAccess = 0,
++  HasDirectAccess = DirectAccessBit
++};
++
++const int FullyCoherentAccessPattern  = 0x1;
++const int InnerCoherentAccessPattern  = 0x2 | FullyCoherentAccessPattern;
++const int OuterCoherentAccessPattern  = 0x4 | InnerCoherentAccessPattern;
++const int RandomAccessPattern         = 0x8 | OuterCoherentAccessPattern;
++
++#endif // EIGEN_CONSTANTS_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/util/ForwardDeclarations.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/util/ForwardDeclarations.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/util/ForwardDeclarations.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/util/ForwardDeclarations.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,112 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_FORWARDDECLARATIONS_H
++#define EIGEN_FORWARDDECLARATIONS_H
++
++template<typename T> struct ei_traits;
++template<typename T> struct NumTraits;
++template<typename Scalar, int Rows, int Cols, int MaxRows, int MaxCols, unsigned int SuggestedFlags> class ei_corrected_matrix_flags;
++
++template<typename _Scalar, int _Rows, int _Cols,
++         int _MaxRows = _Rows, int _MaxCols = _Cols,
++         unsigned int _Flags = ei_corrected_matrix_flags<
++                                   _Scalar,
++                                   _Rows, _Cols, _MaxRows, _MaxCols,
++                                   EIGEN_DEFAULT_MATRIX_FLAGS
++                               >::ret
++>
++class Matrix;
++
++template<typename ExpressionType, unsigned int Added, unsigned int Removed> class Flagged;
++template<typename ExpressionType> class NestByValue;
++template<typename ExpressionType> class SwapWrapper;
++template<typename MatrixType> class Minor;
++template<typename MatrixType, int BlockRows=Dynamic, int BlockCols=Dynamic, int PacketAccess=AsRequested,
++         int _DirectAccessStatus = ei_traits<MatrixType>::Flags&DirectAccessBit> class Block;
++template<typename MatrixType> class Transpose;
++template<typename MatrixType> class Conjugate;
++template<typename NullaryOp, typename MatrixType>         class CwiseNullaryOp;
++template<typename UnaryOp,   typename MatrixType>         class CwiseUnaryOp;
++template<typename BinaryOp,  typename Lhs, typename Rhs>  class CwiseBinaryOp;
++template<typename Lhs, typename Rhs, int ProductMode> class Product;
++template<typename CoeffsVectorType> class DiagonalMatrix;
++template<typename MatrixType> class DiagonalCoeffs;
++template<typename MatrixType, int PacketAccess = AsRequested> class Map;
++template<typename MatrixType, unsigned int Mode> class Part;
++template<typename MatrixType, unsigned int Mode> class Extract;
++template<typename ExpressionType> class Cwise;
++template<typename ExpressionType, int Direction> class PartialRedux;
++template<typename MatrixType, typename BinaryOp, int Direction> class PartialReduxExpr;
++
++template<typename Lhs, typename Rhs> struct ei_product_mode;
++template<typename Lhs, typename Rhs, int ProductMode = ei_product_mode<Lhs,Rhs>::value> struct ProductReturnType;
++
++template<typename Scalar> struct ei_scalar_sum_op;
++template<typename Scalar> struct ei_scalar_difference_op;
++template<typename Scalar> struct ei_scalar_product_op;
++template<typename Scalar> struct ei_scalar_quotient_op;
++template<typename Scalar> struct ei_scalar_opposite_op;
++template<typename Scalar> struct ei_scalar_conjugate_op;
++template<typename Scalar> struct ei_scalar_real_op;
++template<typename Scalar> struct ei_scalar_abs_op;
++template<typename Scalar> struct ei_scalar_abs2_op;
++template<typename Scalar> struct ei_scalar_sqrt_op;
++template<typename Scalar> struct ei_scalar_exp_op;
++template<typename Scalar> struct ei_scalar_log_op;
++template<typename Scalar> struct ei_scalar_cos_op;
++template<typename Scalar> struct ei_scalar_sin_op;
++template<typename Scalar> struct ei_scalar_pow_op;
++template<typename Scalar> struct ei_scalar_inverse_op;
++template<typename Scalar> struct ei_scalar_square_op;
++template<typename Scalar> struct ei_scalar_cube_op;
++template<typename Scalar, typename NewType> struct ei_scalar_cast_op;
++template<typename Scalar> struct ei_scalar_multiple_op;
++template<typename Scalar> struct ei_scalar_quotient1_op;
++template<typename Scalar> struct ei_scalar_min_op;
++template<typename Scalar> struct ei_scalar_max_op;
++template<typename Scalar> struct ei_scalar_random_op;
++template<typename Scalar> struct ei_scalar_add_op;
++
++template<typename Scalar>
++void ei_cache_friendly_product(
++  int _rows, int _cols, int depth,
++  bool _lhsRowMajor, const Scalar* _lhs, int _lhsStride,
++  bool _rhsRowMajor, const Scalar* _rhs, int _rhsStride,
++  bool resRowMajor, Scalar* res, int resStride);
++
++template<typename MatrixType> class LU;
++template<typename MatrixType> class QR;
++template<typename MatrixType> class SVD;
++template<typename MatrixType> class Cholesky;
++template<typename MatrixType> class CholeskyWithoutSquareRoot;
++
++// Geometry module:
++template<typename Lhs, typename Rhs> class Cross;
++template<typename Scalar> class Quaternion;
++template<typename Scalar> class Rotation2D;
++template<typename Scalar> class AngleAxis;
++template<typename Scalar,int Dim> class Transform;
++
++#endif // EIGEN_FORWARDDECLARATIONS_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/util/Macros.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/util/Macros.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/util/Macros.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/util/Macros.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,165 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_MACROS_H
++#define EIGEN_MACROS_H
++
++#undef minor
++
++#ifdef EIGEN_DONT_USE_UNROLLED_LOOPS
++#define EIGEN_UNROLLING_LIMIT 0
++#endif
++
++/** Defines the maximal loop size to enable meta unrolling of loops */
++#ifndef EIGEN_UNROLLING_LIMIT
++#define EIGEN_UNROLLING_LIMIT 100
++#endif
++
++#define EIGEN_DEFAULT_MATRIX_FLAGS 0
++
++/** Define a hint size when dealing with large matrices and L2 cache friendlyness
++  * More precisely, its square value represents the amount of bytes which can be assumed to stay in L2 cache.
++  */
++#ifndef EIGEN_TUNE_FOR_L2_CACHE_SIZE
++#define EIGEN_TUNE_FOR_L2_CACHE_SIZE 1024
++#endif
++
++#define USING_PART_OF_NAMESPACE_EIGEN \
++EIGEN_USING_MATRIX_TYPEDEFS \
++using Eigen::Matrix; \
++using Eigen::MatrixBase;
++
++#ifdef NDEBUG
++# ifndef EIGEN_NO_DEBUG
++#  define EIGEN_NO_DEBUG
++# endif
++#endif
++
++#ifndef ei_assert
++#ifdef EIGEN_NO_DEBUG
++#define ei_assert(x)
++#else
++#define ei_assert(x) assert(x)
++#endif
++#endif
++
++#ifdef EIGEN_INTERNAL_DEBUGGING
++#define ei_internal_assert(x) ei_assert(x);
++#else
++#define ei_internal_assert(x)
++#endif
++
++#ifdef EIGEN_NO_DEBUG
++#define EIGEN_ONLY_USED_FOR_DEBUG(x) (void)x
++#else
++#define EIGEN_ONLY_USED_FOR_DEBUG(x)
++#endif
++
++// FIXME with the always_inline attribute,
++// gcc 3.4.x reports the following compilation error:
++//   Eval.h:91: sorry, unimplemented: inlining failed in call to 'const Eigen::Eval<Derived> Eigen::MatrixBase<Scalar, Derived>::eval() const'
++//    : function body not available
++#if EIGEN_GNUC_AT_LEAST(4,0)
++#define EIGEN_ALWAYS_INLINE __attribute__((always_inline)) inline
++#else
++#define EIGEN_ALWAYS_INLINE inline
++#endif
++
++#if (defined __GNUC__)
++#define EIGEN_DONT_INLINE __attribute__((noinline))
++#else
++#define EIGEN_DONT_INLINE
++#endif
++
++#if (defined __GNUC__)
++#define EIGEN_ALIGN_128 __attribute__ ((aligned(16)))
++#else
++#define EIGEN_ALIGN_128
++#endif
++
++#define EIGEN_RESTRICT __restrict
++
++#define EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \
++template<typename OtherDerived> \
++Derived& operator Op(const MatrixBase<OtherDerived>& other) \
++{ \
++  return Eigen::MatrixBase<Derived>::operator Op(other.derived()); \
++} \
++Derived& operator Op(const Derived& other) \
++{ \
++  return Eigen::MatrixBase<Derived>::operator Op(other); \
++}
++
++#define EIGEN_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \
++template<typename Other> \
++Derived& operator Op(const Other& scalar) \
++{ \
++  return Eigen::MatrixBase<Derived>::operator Op(scalar); \
++}
++
++#define EIGEN_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
++EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \
++EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \
++EIGEN_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
++EIGEN_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
++EIGEN_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
++
++#define _EIGEN_GENERIC_PUBLIC_INTERFACE(Derived, BaseClass) \
++typedef BaseClass Base; \
++typedef typename Eigen::ei_traits<Derived>::Scalar Scalar; \
++typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
++typedef typename Base::PacketScalar PacketScalar; \
++typedef typename Eigen::ei_nested<Derived>::type Nested; \
++typedef typename Eigen::ei_eval<Derived>::type Eval; \
++enum { RowsAtCompileTime = Eigen::ei_traits<Derived>::RowsAtCompileTime, \
++       ColsAtCompileTime = Eigen::ei_traits<Derived>::ColsAtCompileTime, \
++       MaxRowsAtCompileTime = Eigen::ei_traits<Derived>::MaxRowsAtCompileTime, \
++       MaxColsAtCompileTime = Eigen::ei_traits<Derived>::MaxColsAtCompileTime, \
++       Flags = Eigen::ei_traits<Derived>::Flags, \
++       CoeffReadCost = Eigen::ei_traits<Derived>::CoeffReadCost, \
++       SizeAtCompileTime = Base::SizeAtCompileTime, \
++       MaxSizeAtCompileTime = Base::MaxSizeAtCompileTime, \
++       IsVectorAtCompileTime = Base::IsVectorAtCompileTime };
++
++#define EIGEN_GENERIC_PUBLIC_INTERFACE(Derived) \
++_EIGEN_GENERIC_PUBLIC_INTERFACE(Derived, Eigen::MatrixBase<Derived>) \
++friend class Eigen::MatrixBase<Derived>;
++
++#define EIGEN_ENUM_MIN(a,b) (((int)a <= (int)b) ? (int)a : (int)b)
++#define EIGEN_ENUM_MAX(a,b) (((int)a >= (int)b) ? (int)a : (int)b)
++
++/*  ei_alloc_stack(TYPE,SIZE) allocates sizeof(TYPE)*SIZE bytes on the stack if sizeof(TYPE)*SIZE is smaller
++ *  than EIGEN_STACK_ALLOCATION_LIMIT. Otherwise the memory is allocated using the operator new.
++ *  Data allocated with ei_alloc_stack must be freed calling ei_free_stack(PTR,TYPE,SIZE)
++ */
++#ifdef __linux__
++# define ei_alloc_stack(TYPE,SIZE) ((sizeof(TYPE)*(SIZE)>16000000) ? new TYPE[SIZE] : (TYPE*)alloca(sizeof(TYPE)*(SIZE)))
++# define ei_free_stack(PTR,TYPE,SIZE) if (sizeof(TYPE)*SIZE>16000000) delete[] PTR
++#else
++# define ei_alloc_stack(TYPE,SIZE) new TYPE[SIZE]
++# define ei_free_stack(PTR,TYPE,SIZE) delete[] PTR
++#endif
++
++#endif // EIGEN_MACROS_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/util/Meta.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/util/Meta.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/util/Meta.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/util/Meta.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,258 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_META_H
++#define EIGEN_META_H
++
++// just a workaround because GCC seems to not really like empty structs
++#ifdef __GNUG__
++  struct ei_empty_struct{char _ei_dummy_;};
++  #define EIGEN_EMPTY_STRUCT : Eigen::ei_empty_struct
++#else
++  #define EIGEN_EMPTY_STRUCT
++#endif
++
++//classes inheriting ei_no_assignment_operator don't generate a default operator=.
++class ei_no_assignment_operator
++{
++  private:
++    ei_no_assignment_operator& operator=(const ei_no_assignment_operator&);
++};
++
++template<int Value> class ei_int_if_dynamic EIGEN_EMPTY_STRUCT
++{
++  public:
++    ei_int_if_dynamic() {}
++    explicit ei_int_if_dynamic(int) {}
++    static int value() { return Value; }
++    void setValue(int) {}
++};
++
++template<> class ei_int_if_dynamic<Dynamic>
++{
++    int m_value;
++    ei_int_if_dynamic() {}
++  public:
++    explicit ei_int_if_dynamic(int value) : m_value(value) {}
++    int value() const { return m_value; }
++    void setValue(int value) { m_value = value; }
++};
++
++
++template<bool Condition, typename Then, typename Else>
++struct ei_meta_if { typedef Then ret; };
++
++template<typename Then, typename Else>
++struct ei_meta_if <false, Then, Else> { typedef Else ret; };
++
++template<typename T, typename U> struct ei_is_same_type { enum { ret = 0 }; };
++template<typename T> struct ei_is_same_type<T,T> { enum { ret = 1 }; };
++
++struct ei_meta_true {};
++struct ei_meta_false {};
++
++/** \internal
++  * Convenient struct to get the result type of a unary or binary functor.
++  *
++  * It supports both the current STL mechanism (using the result_type member) as well as
++  * upcoming next STL generation (using a templated result member).
++  * If none of these members is provided, then the type of the first argument is returned.
++  */
++template<typename T> struct ei_result_of {};
++
++struct ei_has_none {int a[1];};
++struct ei_has_std_result_type {int a[2];};
++struct ei_has_tr1_result {int a[3];};
++
++template<typename Func, typename ArgType, int SizeOf=sizeof(ei_has_none)>
++struct ei_unary_result_of_select {typedef ArgType type;};
++
++template<typename Func, typename ArgType>
++struct ei_unary_result_of_select<Func, ArgType, sizeof(ei_has_std_result_type)> {typedef typename Func::result_type type;};
++
++template<typename Func, typename ArgType>
++struct ei_unary_result_of_select<Func, ArgType, sizeof(ei_has_tr1_result)> {typedef typename Func::template result<Func(ArgType)>::type type;};
++
++template<typename Func, typename ArgType>
++struct ei_result_of<Func(ArgType)> {
++    template<typename T>
++    static ei_has_std_result_type testFunctor(T const *, typename T::result_type const * = 0);
++    template<typename T>
++    static ei_has_tr1_result      testFunctor(T const *, typename T::template result<T(ArgType)>::type const * = 0);
++    static ei_has_none            testFunctor(...);
++
++    // note that the following indirection is needed for gcc-3.3
++    enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};
++    typedef typename ei_unary_result_of_select<Func, ArgType, FunctorType>::type type;
++};
++
++template<typename Func, typename ArgType0, typename ArgType1, int SizeOf=sizeof(ei_has_none)>
++struct ei_binary_result_of_select {typedef ArgType0 type;};
++
++template<typename Func, typename ArgType0, typename ArgType1>
++struct ei_binary_result_of_select<Func, ArgType0, ArgType1, sizeof(ei_has_std_result_type)>
++{typedef typename Func::result_type type;};
++
++template<typename Func, typename ArgType0, typename ArgType1>
++struct ei_binary_result_of_select<Func, ArgType0, ArgType1, sizeof(ei_has_tr1_result)>
++{typedef typename Func::template result<Func(ArgType0,ArgType1)>::type type;};
++
++template<typename Func, typename ArgType0, typename ArgType1>
++struct ei_result_of<Func(ArgType0,ArgType1)> {
++    template<typename T>
++    static ei_has_std_result_type testFunctor(T const *, typename T::result_type const * = 0);
++    template<typename T>
++    static ei_has_tr1_result      testFunctor(T const *, typename T::template result<T(ArgType0,ArgType1)>::type const * = 0);
++    static ei_has_none            testFunctor(...);
++
++    // note that the following indirection is needed for gcc-3.3
++    enum {FunctorType = sizeof(testFunctor(static_cast<Func*>(0)))};
++    typedef typename ei_binary_result_of_select<Func, ArgType0, ArgType1, FunctorType>::type type;
++};
++
++template<typename T> struct ei_functor_traits
++{
++  enum
++  {
++    Cost = 10,
++    PacketAccess = false
++  };
++};
++
++template<typename T> struct ei_packet_traits
++{
++  typedef T type;
++  enum {size=1};
++};
++
++template<typename T> struct ei_unpacket_traits
++{
++  typedef T type;
++  enum {size=1};
++};
++
++
++template<typename Scalar, int Rows, int Cols, int MaxRows, int MaxCols, unsigned int SuggestedFlags>
++class ei_corrected_matrix_flags
++{
++    enum { row_major_bit = (Rows != 1 && Cols != 1)  // if this is not a vector,
++                                                     // then the storage order really matters,
++                                                     // so let us strictly honor the user's choice.
++                         ? SuggestedFlags&RowMajorBit
++                         : Cols > 1 ? RowMajorBit : 0,
++           inner_max_size = row_major_bit ? MaxCols : MaxRows,
++           is_big = inner_max_size == Dynamic,
++           linear_size = Cols * Rows,
++           packet_access_bit
++            = ei_packet_traits<Scalar>::size > 1
++              && (is_big || linear_size%ei_packet_traits<Scalar>::size==0)
++              ? PacketAccessBit : 0,
++           aligned_bit = packet_access_bit
++                         && (is_big || linear_size%ei_packet_traits<Scalar>::size==0) ? AlignedBit : 0
++    };
++
++  public:
++    enum { ret = (SuggestedFlags & ~(EvalBeforeNestingBit | EvalBeforeAssigningBit | PacketAccessBit | RowMajorBit))
++                                    | LinearAccessBit | DirectAccessBit | packet_access_bit | row_major_bit | aligned_bit
++    };
++};
++
++template<int _Rows, int _Cols> struct ei_size_at_compile_time
++{
++  enum { ret = (_Rows==Dynamic || _Cols==Dynamic) ? Dynamic : _Rows * _Cols };
++};
++
++template<typename T, int Sparseness = ei_traits<T>::Flags&SparseBit> class ei_eval;
++
++template<typename T> class ei_eval<T,Dense>
++{
++    typedef typename ei_traits<T>::Scalar _Scalar;
++    enum {_Rows = ei_traits<T>::RowsAtCompileTime,
++          _Cols = ei_traits<T>::ColsAtCompileTime,
++          _MaxRows = ei_traits<T>::MaxRowsAtCompileTime,
++          _MaxCols = ei_traits<T>::MaxColsAtCompileTime,
++          _Flags = ei_traits<T>::Flags
++    };
++
++  public:
++    typedef Matrix<_Scalar,
++                  _Rows, _Cols, _MaxRows, _MaxCols,
++                  ei_corrected_matrix_flags<
++                      _Scalar,
++                      _Rows, _Cols, _MaxRows, _MaxCols,
++                      _Flags
++                  >::ret
++            > type;
++};
++
++template<typename T> struct ei_unref { typedef T type; };
++template<typename T> struct ei_unref<T&> { typedef T type; };
++
++template<typename T> struct ei_unconst { typedef T type; };
++template<typename T> struct ei_unconst<const T> { typedef T type; };
++
++template<typename T> struct ei_cleantype { typedef T type; };
++template<typename T> struct ei_cleantype<const T> { typedef T type; };
++template<typename T> struct ei_cleantype<const T&> { typedef T type; };
++template<typename T> struct ei_cleantype<T&> { typedef T type; };
++
++template<typename T> struct ei_must_nest_by_value { enum { ret = false }; };
++template<typename T> struct ei_must_nest_by_value<NestByValue<T> > { enum { ret = true }; };
++
++
++template<typename T, int n=1, typename EvalType = typename ei_eval<T>::type> struct ei_nested
++{
++  enum {
++    CostEval   = (n+1) * int(NumTraits<typename ei_traits<T>::Scalar>::ReadCost),
++	CostNoEval = (n-1) * int(ei_traits<T>::CoeffReadCost)
++  };
++  typedef typename ei_meta_if<
++    ei_must_nest_by_value<T>::ret,
++    T,
++    typename ei_meta_if<
++      (int(ei_traits<T>::Flags) & EvalBeforeNestingBit)
++      || ( int(CostEval) <= int(CostNoEval) ),
++      EvalType,
++      const T&
++    >::ret
++  >::ret type;
++};
++
++template<unsigned int Flags> struct ei_are_flags_consistent
++{
++  enum { ret = !( (Flags&UnitDiagBit && Flags&ZeroDiagBit) )
++  };
++};
++
++/** \internal Gives the type of a sub-matrix or sub-vector of a matrix of type \a ExpressionType and size \a Size
++  * TODO: could be a good idea to define a big ReturnType struct ??
++  */
++template<typename ExpressionType, int RowsOrSize=Dynamic, int Cols=Dynamic> struct BlockReturnType {
++	typedef Block<ExpressionType, (ei_traits<ExpressionType>::RowsAtCompileTime == 1 ? 1 : RowsOrSize),
++                                  (ei_traits<ExpressionType>::ColsAtCompileTime == 1 ? 1 : RowsOrSize)> SubVectorType;
++	typedef Block<ExpressionType, RowsOrSize, Cols> Type;
++};
++
++#endif // EIGEN_META_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/util/StaticAssert.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/util/StaticAssert.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/util/StaticAssert.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/util/StaticAssert.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,125 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_STATIC_ASSERT_H
++#define EIGEN_STATIC_ASSERT_H
++
++/* Some notes on Eigen's static assertion mechanism:
++ *
++ *  - in EIGEN_STATIC_ASSERT(CONDITION,MSG) the parameter CONDITION must be a compile time boolean
++ *    expression, and MSG an enum listed in struct ei_static_assert<true>
++ *
++ *  - define EIGEN_NO_STATIC_ASSERT to disable them (and save compilation time)
++ *    in that case, the static assertion is converted to the following runtime assert:
++ *      ei_assert(CONDITION && "MSG")
++ *
++ *  - currently EIGEN_STATIC_ASSERT can only be used in function scope
++ *
++ */
++
++#ifndef EIGEN_NO_STATIC_ASSERT
++
++  #ifdef __GXX_EXPERIMENTAL_CXX0X__
++
++    // if native static_assert is enabled, let's use it
++    #define EIGEN_STATIC_ASSERT(X,MSG) static_assert(X,#MSG)
++
++  #else // CXX0X
++
++    template<bool condition>
++    struct ei_static_assert {};
++
++    template<>
++    struct ei_static_assert<true>
++    {
++      enum {
++        you_tried_calling_a_vector_method_on_a_matrix,
++        you_mixed_vectors_of_different_sizes,
++        you_mixed_matrices_of_different_sizes,
++        this_method_is_only_for_vectors_of_a_specific_size,
++        this_method_is_only_for_matrices_of_a_specific_size,
++        you_did_a_programming_error,
++        you_called_a_fixed_size_method_on_a_dynamic_size_matrix_or_vector,
++        unaligned_load_and_store_operations_unimplemented_on_AltiVec,
++        scalar_type_must_be_floating_point,
++        default_writting_to_selfadjoint_not_supported,
++        writting_to_triangular_part_with_unit_diag_is_not_supported,
++        this_method_is_only_for_fixed_size
++      };
++    };
++
++    #define EIGEN_STATIC_ASSERT(CONDITION,MSG) \
++      if (ei_static_assert<CONDITION ? true : false>::MSG) {}
++
++  #endif // CXX0X
++
++#else // EIGEN_NO_STATIC_ASSERT
++
++  #define EIGEN_STATIC_ASSERT(CONDITION,MSG) ei_assert((CONDITION) && #MSG)
++
++#endif // EIGEN_NO_STATIC_ASSERT
++
++
++// static assertion failing if the type \a TYPE is not a vector type
++#define EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE) \
++  EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime, \
++                      you_tried_calling_a_vector_method_on_a_matrix)
++
++// static assertion failing if the type \a TYPE is not fixed-size
++#define EIGEN_STATIC_ASSERT_FIXED_SIZE(TYPE) \
++  EIGEN_STATIC_ASSERT(TYPE::SizeAtCompileTime!=Eigen::Dynamic, \
++                      you_called_a_fixed_size_method_on_a_dynamic_size_matrix_or_vector)
++
++// static assertion failing if the type \a TYPE is not a vector type of the given size
++#define EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(TYPE, SIZE) \
++  EIGEN_STATIC_ASSERT(TYPE::IsVectorAtCompileTime && TYPE::SizeAtCompileTime==SIZE, \
++                      this_method_is_only_for_vectors_of_a_specific_size)
++
++// static assertion failing if the type \a TYPE is not a vector type of the given size
++#define EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(TYPE, ROWS, COLS) \
++  EIGEN_STATIC_ASSERT(TYPE::RowsAtCompileTime==ROWS && TYPE::ColsAtCompileTime==COLS, \
++                      this_method_is_only_for_matrices_of_a_specific_size)
++
++// static assertion failing if the two vector expression types are not compatible (same fixed-size or dynamic size)
++#define EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(TYPE0,TYPE1) \
++  EIGEN_STATIC_ASSERT( \
++      (int(TYPE0::SizeAtCompileTime)==Eigen::Dynamic \
++    || int(TYPE1::SizeAtCompileTime)==Eigen::Dynamic \
++    || int(TYPE0::SizeAtCompileTime)==int(TYPE1::SizeAtCompileTime)),\
++    you_mixed_vectors_of_different_sizes)
++
++// static assertion failing if the two matrix expression types are not compatible (same fixed-size or dynamic size)
++#define EIGEN_STATIC_ASSERT_SAME_MATRIX_SIZE(TYPE0,TYPE1) \
++  EIGEN_STATIC_ASSERT( \
++     ((int(TYPE0::RowsAtCompileTime)==Eigen::Dynamic \
++    || int(TYPE1::RowsAtCompileTime)==Eigen::Dynamic \
++    || int(TYPE0::RowsAtCompileTime)==int(TYPE1::RowsAtCompileTime)) \
++   && (int(TYPE0::ColsAtCompileTime)==Eigen::Dynamic \
++    || int(TYPE1::ColsAtCompileTime)==Eigen::Dynamic \
++    || int(TYPE0::ColsAtCompileTime)==int(TYPE1::ColsAtCompileTime))),\
++    you_mixed_matrices_of_different_sizes)
++
++
++#endif // EIGEN_STATIC_ASSERT_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Visitor.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Visitor.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Core/Visitor.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Core/Visitor.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,197 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_VISITOR_H
++#define EIGEN_VISITOR_H
++
++template<typename Visitor, typename Derived, int UnrollCount>
++struct ei_visitor_impl
++{
++  enum {
++    col = (UnrollCount-1) / Derived::RowsAtCompileTime,
++    row = (UnrollCount-1) % Derived::RowsAtCompileTime
++  };
++
++  inline static void run(const Derived &mat, Visitor& visitor)
++  {
++    ei_visitor_impl<Visitor, Derived, UnrollCount-1>::run(mat, visitor);
++    visitor(mat.coeff(row, col), row, col);
++  }
++};
++
++template<typename Visitor, typename Derived>
++struct ei_visitor_impl<Visitor, Derived, 1>
++{
++  inline static void run(const Derived &mat, Visitor& visitor)
++  {
++    return visitor.init(mat.coeff(0, 0), 0, 0);
++  }
++};
++
++template<typename Visitor, typename Derived>
++struct ei_visitor_impl<Visitor, Derived, Dynamic>
++{
++  inline static void run(const Derived& mat, Visitor& visitor)
++  {
++    visitor.init(mat.coeff(0,0), 0, 0);
++    for(int i = 1; i < mat.rows(); i++)
++      visitor(mat.coeff(i, 0), i, 0);
++    for(int j = 1; j < mat.cols(); j++)
++      for(int i = 0; i < mat.rows(); i++)
++        visitor(mat.coeff(i, j), i, j);
++  }
++};
++
++
++/** Applies the visitor \a visitor to the whole coefficients of the matrix or vector.
++  *
++  * The template parameter \a Visitor is the type of the visitor and provides the following interface:
++  * \code
++  * struct MyVisitor {
++  *   // called for the first coefficient
++  *   void init(const Scalar& value, int i, int j);
++  *   // called for all other coefficients
++  *   void operator() (const Scalar& value, int i, int j);
++  * };
++  * \endcode
++  *
++  * \note compared to one or two \em for \em loops, visitors offer automatic
++  * unrolling for small fixed size matrix.
++  *
++  * \sa minCoeff(int*,int*), maxCoeff(int*,int*), MatrixBase::redux()
++  */
++template<typename Derived>
++template<typename Visitor>
++void MatrixBase<Derived>::visit(Visitor& visitor) const
++{
++  const bool unroll = SizeAtCompileTime * CoeffReadCost
++                    + (SizeAtCompileTime-1) * ei_functor_traits<Visitor>::Cost
++                    <= EIGEN_UNROLLING_LIMIT;
++  return ei_visitor_impl<Visitor, Derived,
++      unroll ? int(SizeAtCompileTime) : Dynamic
++    >::run(derived(), visitor);
++}
++
++/** \internal
++  * \brief Base class to implement min and max visitors
++  */
++template <typename Scalar>
++struct ei_coeff_visitor
++{
++  int row, col;
++  Scalar res;
++  inline void init(const Scalar& value, int i, int j)
++  {
++    res = value;
++    row = i;
++    col = j;
++  }
++};
++
++/** \internal
++  * \brief Visitor computing the min coefficient with its value and coordinates
++  *
++  * \sa MatrixBase::minCoeff(int*, int*)
++  */
++template <typename Scalar>
++struct ei_min_coeff_visitor : ei_coeff_visitor<Scalar>
++{
++  void operator() (const Scalar& value, int i, int j)
++  {
++    if(value < this->res)
++    {
++      this->res = value;
++      this->row = i;
++      this->col = j;
++    }
++  }
++};
++
++template<typename Scalar>
++struct ei_functor_traits<ei_min_coeff_visitor<Scalar> > {
++  enum {
++    Cost = NumTraits<Scalar>::AddCost
++  };
++};
++
++/** \internal
++  * \brief Visitor computing the max coefficient with its value and coordinates
++  *
++  * \sa MatrixBase::maxCoeff(int*, int*)
++  */
++template <typename Scalar>
++struct ei_max_coeff_visitor : ei_coeff_visitor<Scalar>
++{
++  void operator() (const Scalar& value, int i, int j)
++  {
++    if(value > this->res)
++    {
++      this->res = value;
++      this->row = i;
++      this->col = j;
++    }
++  }
++};
++
++template<typename Scalar>
++struct ei_functor_traits<ei_max_coeff_visitor<Scalar> > {
++  enum {
++    Cost = NumTraits<Scalar>::AddCost
++  };
++};
++
++/** \returns the minimum of all coefficients of *this
++  * and puts in *row and *col its location.
++  *
++  * \sa MatrixBase::maxCoeff(int*,int*), MatrixBase::visitor(), MatrixBase::minCoeff()
++  */
++template<typename Derived>
++typename ei_traits<Derived>::Scalar
++MatrixBase<Derived>::minCoeff(int* row, int* col) const
++{
++  ei_min_coeff_visitor<Scalar> minVisitor;
++  this->visit(minVisitor);
++  *row = minVisitor.row;
++  if (col) *col = minVisitor.col;
++  return minVisitor.res;
++}
++
++/** \returns the maximum of all coefficients of *this
++  * and puts in *row and *col its location.
++  *
++  * \sa MatrixBase::minCoeff(int*,int*), MatrixBase::visitor(), MatrixBase::maxCoeff()
++  */
++template<typename Derived>
++typename ei_traits<Derived>::Scalar
++MatrixBase<Derived>::maxCoeff(int* row, int* col) const
++{
++  ei_max_coeff_visitor<Scalar> maxVisitor;
++  this->visit(maxVisitor);
++  *row = maxVisitor.row;
++  if (col) *col = maxVisitor.col;
++  return maxVisitor.res;
++}
++
++
++#endif // EIGEN_VISITOR_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Geometry/AngleAxis.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Geometry/AngleAxis.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Geometry/AngleAxis.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Geometry/AngleAxis.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,212 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_ANGLEAXIS_H
++#define EIGEN_ANGLEAXIS_H
++
++/** \geometry_module \ingroup Geometry
++  *
++  * \class AngleAxis
++  *
++  * \brief Represents a 3D rotation as a rotation angle around an arbitrary 3D axis
++  *
++  * \param _Scalar the scalar type, i.e., the type of the coefficients.
++  *
++  * The following two typedefs are provided for convenience:
++  * \li \c AngleAxisf for \c float
++  * \li \c AngleAxisd for \c double
++  *
++  * \addexample AngleAxisForEuler \label How to define a rotation from Euler-angles
++  *
++  * Combined with MatrixBase::Unit{X,Y,Z}, AngleAxis can be used to easily
++  * mimic Euler-angles. Here is an example:
++  * \include AngleAxis_mimic_euler.cpp
++  * Output: \verbinclude AngleAxis_mimic_euler.out
++  *
++  * \sa class Quaternion, class Transform, MatrixBase::UnitX()
++  */
++template<typename _Scalar>
++class AngleAxis
++{
++public:
++  enum { Dim = 3 };
++  /** the scalar type of the coefficients */
++  typedef _Scalar Scalar;
++  typedef Matrix<Scalar,3,3> Matrix3;
++  typedef Matrix<Scalar,3,1> Vector3;
++  typedef Quaternion<Scalar> QuaternionType;
++
++protected:
++
++  Vector3 m_axis;
++  Scalar m_angle;
++
++public:
++
++  /** Default constructor without initialization. */
++  AngleAxis() {}
++  /** Constructs and initialize the angle-axis rotation from an \a angle in radian
++    * and an \a axis which must be normalized. */
++  template<typename Derived>
++  inline AngleAxis(Scalar angle, const MatrixBase<Derived>& axis) : m_axis(axis), m_angle(angle) {}
++  /** Constructs and initialize the angle-axis rotation from a quaternion \a q. */
++  inline AngleAxis(const QuaternionType& q) { *this = q; }
++  /** Constructs and initialize the angle-axis rotation from a 3x3 rotation matrix. */
++  template<typename Derived>
++  inline explicit AngleAxis(const MatrixBase<Derived>& m) { *this = m; }
++
++  Scalar angle() const { return m_angle; }
++  Scalar& angle() { return m_angle; }
++
++  const Vector3& axis() const { return m_axis; }
++  Vector3& axis() { return m_axis; }
++
++  /** Concatenates two rotations */
++  inline QuaternionType operator* (const AngleAxis& other) const
++  { return QuaternionType(*this) * QuaternionType(other); }
++
++  /** Concatenates two rotations */
++  inline QuaternionType operator* (const QuaternionType& other) const
++  { return QuaternionType(*this) * other; }
++
++  /** Concatenates two rotations */
++  friend inline QuaternionType operator* (const QuaternionType& a, const AngleAxis& b)
++  { return a * QuaternionType(b); }
++
++  /** Concatenates two rotations */
++  inline typename ProductReturnType<Matrix3,Matrix3>::Type
++  operator* (const Matrix3& other) const
++  { return toRotationMatrix() * other; }
++
++  /** Concatenates two rotations */
++  inline friend typename ProductReturnType<Matrix3,Matrix3>::Type
++  operator* (const Matrix3& a, const AngleAxis& b)
++  { return a * b.toRotationMatrix(); }
++
++  /** \returns the inverse rotation, i.e., an angle-axis with opposite rotation angle */
++  AngleAxis inverse() const
++  { return AngleAxis(-m_angle, m_axis); }
++
++  AngleAxis& operator=(const QuaternionType& q);
++  template<typename Derived>
++  AngleAxis& operator=(const MatrixBase<Derived>& m);
++
++  template<typename Derived>
++  AngleAxis& fromRotationMatrix(const MatrixBase<Derived>& m);
++  Matrix3 toRotationMatrix(void) const;
++};
++
++/** \ingroup Geometry
++  * single precision angle-axis type */
++typedef AngleAxis<float> AngleAxisf;
++/** \ingroup Geometry
++  * double precision angle-axis type */
++typedef AngleAxis<double> AngleAxisd;
++
++/** Set \c *this from a quaternion.
++  * The axis is normalized.
++  */
++template<typename Scalar>
++AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const QuaternionType& q)
++{
++  Scalar n2 = q.vec().norm2();
++  if (ei_isMuchSmallerThan(n2,Scalar(1)))
++  {
++    m_angle = 0;
++    m_axis << 1, 0, 0;
++  }
++  else
++  {
++    m_angle = 2*std::acos(q.w());
++    m_axis = q.vec() / ei_sqrt(n2);
++  }
++  return *this;
++}
++
++/** Set \c *this from a 3x3 rotation matrix \a mat.
++  */
++template<typename Scalar>
++template<typename Derived>
++AngleAxis<Scalar>& AngleAxis<Scalar>::operator=(const MatrixBase<Derived>& mat)
++{
++  // Since a direct conversion would not be really faster,
++  // let's use the robust Quaternion implementation:
++  return *this = QuaternionType(mat);
++}
++
++/** Constructs and \returns an equivalent 3x3 rotation matrix.
++  */
++template<typename Scalar>
++typename AngleAxis<Scalar>::Matrix3
++AngleAxis<Scalar>::toRotationMatrix(void) const
++{
++  Matrix3 res;
++  Vector3 sin_axis  = ei_sin(m_angle) * m_axis;
++  Scalar c = ei_cos(m_angle);
++  Vector3 cos1_axis = (Scalar(1)-c) * m_axis;
++
++  Scalar tmp;
++  tmp = cos1_axis.x() * m_axis.y();
++  res.coeffRef(0,1) = tmp - sin_axis.z();
++  res.coeffRef(1,0) = tmp + sin_axis.z();
++
++  tmp = cos1_axis.x() * m_axis.z();
++  res.coeffRef(0,2) = tmp + sin_axis.y();
++  res.coeffRef(2,0) = tmp - sin_axis.y();
++
++  tmp = cos1_axis.y() * m_axis.z();
++  res.coeffRef(1,2) = tmp - sin_axis.x();
++  res.coeffRef(2,1) = tmp + sin_axis.x();
++
++  res.diagonal() = (cos1_axis.cwise() * m_axis).cwise() + c;
++
++  return res;
++}
++
++/** \geometry_module
++  *
++  * Constructs a 3x3 rotation matrix from the angle-axis \a aa
++  *
++  * \sa Matrix(const Quaternion&)
++  */
++template<typename _Scalar, int _Rows, int _Cols, int _MaxRows, int _MaxCols, unsigned int _Flags>
++Matrix<_Scalar, _Rows, _Cols, _MaxRows, _MaxCols, _Flags>::Matrix(const AngleAxis<Scalar>& aa)
++{
++  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,3,3);
++  *this = aa.toRotationMatrix();
++}
++
++/** \geometry_module
++  *
++  * Set a 3x3 rotation matrix from the angle-axis \a aa
++  */
++template<typename _Scalar, int _Rows, int _Cols, int _MaxRows, int _MaxCols, unsigned int _Flags>
++Matrix<_Scalar, _Rows, _Cols, _MaxRows, _MaxCols, _Flags>&
++Matrix<_Scalar, _Rows, _Cols, _MaxRows, _MaxCols, _Flags>::operator=(const AngleAxis<Scalar>& aa)
++{
++  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,3,3);
++  return *this = aa.toRotationMatrix();
++}
++
++#endif // EIGEN_ANGLEAXIS_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Geometry/OrthoMethods.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Geometry/OrthoMethods.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Geometry/OrthoMethods.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Geometry/OrthoMethods.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,111 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_ORTHOMETHODS_H
++#define EIGEN_ORTHOMETHODS_H
++
++/** \geometry_module
++  * \returns the cross product of \c *this and \a other */
++template<typename Derived>
++template<typename OtherDerived>
++inline typename MatrixBase<Derived>::EvalType
++MatrixBase<Derived>::cross(const MatrixBase<OtherDerived>& other) const
++{
++  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,3);
++
++  // Note that there is no need for an expression here since the compiler
++  // optimize such a small temporary very well (even within a complex expression)
++  const typename ei_nested<Derived,2>::type lhs(derived());
++  const typename ei_nested<OtherDerived,2>::type rhs(other.derived());
++  return typename ei_eval<Derived>::type(
++    lhs.coeff(1) * rhs.coeff(2) - lhs.coeff(2) * rhs.coeff(1),
++    lhs.coeff(2) * rhs.coeff(0) - lhs.coeff(0) * rhs.coeff(2),
++    lhs.coeff(0) * rhs.coeff(1) - lhs.coeff(1) * rhs.coeff(0)
++  );
++}
++
++template<typename Derived, int Size = Derived::SizeAtCompileTime>
++struct ei_someOrthogonal_selector
++{
++  typedef typename ei_eval<Derived>::type VectorType;
++  typedef typename ei_traits<Derived>::Scalar Scalar;
++  typedef typename NumTraits<Scalar>::Real RealScalar;
++  inline static VectorType run(const Derived& src)
++  {
++    VectorType perp;
++    /* Let us compute the crossed product of *this with a vector
++     * that is not too close to being colinear to *this.
++     */
++
++    /* unless the x and y coords are both close to zero, we can
++     * simply take ( -y, x, 0 ) and normalize it.
++     */
++    if((!ei_isMuchSmallerThan(src.x(), src.z()))
++    || (!ei_isMuchSmallerThan(src.y(), src.z())))
++    {
++      RealScalar invnm = Scalar(1)/src.template start<2>().norm();
++      perp.template start<3>() << -ei_conj(src.y())*invnm, ei_conj(src.x())*invnm, 0;
++    }
++    /* if both x and y are close to zero, then the vector is close
++     * to the z-axis, so it's far from colinear to the x-axis for instance.
++     * So we take the crossed product with (1,0,0) and normalize it.
++     */
++    else
++    {
++      RealScalar invnm = Scalar(1)/src.template end<2>().norm();
++      perp.template start<3>() << 0, -ei_conj(src.z())*invnm, ei_conj(src.y())*invnm;
++    }
++    if (Derived::SizeAtCompileTime>3
++    || (Derived::SizeAtCompileTime==Dynamic && src.size()>3))
++      perp.end(src.size()-3).setZero();
++
++    return perp;
++   }
++};
++
++template<typename Derived>
++struct ei_someOrthogonal_selector<Derived,2>
++{
++  typedef typename ei_eval<Derived>::type VectorType;
++  inline static VectorType run(const Derived& src)
++  { return VectorType(-ei_conj(src.y()), ei_conj(src.x())).normalized(); }
++};
++
++/** \returns an orthogonal vector of \c *this
++  *
++  * The size of \c *this must be at least 2. If the size is exactly 2,
++  * then the returned vector is a counter clock wise rotation of \c *this, i.e., (-y,x).
++  *
++  * \sa cross()
++  */
++template<typename Derived>
++typename MatrixBase<Derived>::EvalType
++MatrixBase<Derived>::someOrthogonal() const
++{
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
++  return ei_someOrthogonal_selector<Derived>::run(derived());
++}
++
++#endif // EIGEN_ORTHOMETHODS_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Geometry/Quaternion.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Geometry/Quaternion.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Geometry/Quaternion.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Geometry/Quaternion.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,452 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_QUATERNION_H
++#define EIGEN_QUATERNION_H
++
++template<typename Other,
++         int OtherRows=Other::RowsAtCompileTime,
++         int OtherCols=Other::ColsAtCompileTime>
++struct ei_quaternion_assign_impl;
++
++/** \geometry_module \ingroup Geometry
++  *
++  * \class Quaternion
++  *
++  * \brief The quaternion class used to represent 3D orientations and rotations
++  *
++  * \param _Scalar the scalar type, i.e., the type of the coefficients
++  *
++  * This class represents a quaternion \f$ w+xi+yj+zk \f$ that is a convenient representation of
++  * orientations and rotations of objects in three dimensions. Compared to other representations
++  * like Euler angles or 3x3 matrices, quatertions offer the following advantages:
++  * \li \b compact storage (4 scalars)
++  * \li \b efficient to compose (28 flops),
++  * \li \b stable spherical interpolation
++  *
++  * The following two typedefs are provided for convenience:
++  * \li \c Quaternionf for \c float
++  * \li \c Quaterniond for \c double
++  *
++  * \sa  class AngleAxis, class Transform
++  */
++template<typename _Scalar>
++class Quaternion
++{
++  typedef Matrix<_Scalar, 4, 1> Coefficients;
++  Coefficients m_coeffs;
++
++public:
++
++  /** the scalar type of the coefficients */
++  typedef _Scalar Scalar;
++
++  /** the type of a 3D vector */
++  typedef Matrix<Scalar,3,1> Vector3;
++  /** the equivalent rotation matrix type */
++  typedef Matrix<Scalar,3,3> Matrix3;
++  /** the equivalent angle-axis type */
++  typedef AngleAxis<Scalar> AngleAxisType;
++
++  /** \returns the \c x coefficient */
++  inline Scalar x() const { return m_coeffs.coeff(0); }
++  /** \returns the \c y coefficient */
++  inline Scalar y() const { return m_coeffs.coeff(1); }
++  /** \returns the \c z coefficient */
++  inline Scalar z() const { return m_coeffs.coeff(2); }
++  /** \returns the \c w coefficient */
++  inline Scalar w() const { return m_coeffs.coeff(3); }
++
++  /** \returns a reference to the \c x coefficient */
++  inline Scalar& x() { return m_coeffs.coeffRef(0); }
++  /** \returns a reference to the \c y coefficient */
++  inline Scalar& y() { return m_coeffs.coeffRef(1); }
++  /** \returns a reference to the \c z coefficient */
++  inline Scalar& z() { return m_coeffs.coeffRef(2); }
++  /** \returns a reference to the \c w coefficient */
++  inline Scalar& w() { return m_coeffs.coeffRef(3); }
++
++  /** \returns a read-only vector expression of the imaginary part (x,y,z) */
++  inline const Block<Coefficients,3,1> vec() const { return m_coeffs.template start<3>(); }
++
++  /** \returns a vector expression of the imaginary part (x,y,z) */
++  inline Block<Coefficients,3,1> vec() { return m_coeffs.template start<3>(); }
++
++  /** \returns a read-only vector expression of the coefficients (x,y,z,w) */
++  inline const Coefficients& coeffs() const { return m_coeffs; }
++
++  /** \returns a vector expression of the coefficients (x,y,z,w) */
++  inline Coefficients& coeffs() { return m_coeffs; }
++
++  /** Default constructor and initializing an identity quaternion. */
++  inline Quaternion()
++  { m_coeffs << 0, 0, 0, 1; }
++
++  /** Constructs and initializes the quaternion \f$ w+xi+yj+zk \f$ from
++    * its four coefficients \a w, \a x, \a y and \a z.
++    * 
++    * \warning Note the order of the arguments: the real \a w coefficient first,
++    * while internally the coefficients are stored in the following order:
++    * [\c x, \c y, \c z, \c w]
++    */
++  inline Quaternion(Scalar w, Scalar x, Scalar y, Scalar z)
++  { m_coeffs << x, y, z, w; }
++
++  /** Copy constructor */
++  inline Quaternion(const Quaternion& other) { m_coeffs = other.m_coeffs; }
++
++  /** Constructs and initializes a quaternion from the angle-axis \a aa */
++  explicit inline Quaternion(const AngleAxisType& aa) { *this = aa; }
++
++  /** Constructs and initializes a quaternion from either:
++    *  - a rotation matrix expression,
++    *  - a 4D vector expression representing quaternion coefficients.
++    * \sa operator=(MatrixBase<Derived>)
++    */
++  template<typename Derived>
++  explicit inline Quaternion(const MatrixBase<Derived>& other) { *this = other; }
++
++  Quaternion& operator=(const Quaternion& other);
++  Quaternion& operator=(const AngleAxisType& aa);
++  template<typename Derived>
++  Quaternion& operator=(const MatrixBase<Derived>& m);
++
++  /** \returns a quaternion representing an identity rotation
++    * \sa MatrixBase::Identity()
++    */
++  inline static Quaternion Identity() { return Quaternion(1, 0, 0, 0); }
++
++  /** \sa Quaternion::Identity(), MatrixBase::setIdentity()
++    */
++  inline Quaternion& setIdentity() { m_coeffs << 1, 0, 0, 0; return *this; }
++
++  /** \returns the squared norm of the quaternion's coefficients
++    * \sa Quaternion::norm(), MatrixBase::norm2()
++    */
++  inline Scalar norm2() const { return m_coeffs.norm2(); }
++
++  /** \returns the norm of the quaternion's coefficients
++    * \sa Quaternion::norm2(), MatrixBase::norm()
++    */
++  inline Scalar norm() const { return m_coeffs.norm(); }
++
++  Matrix3 toRotationMatrix(void) const;
++
++  template<typename Derived1, typename Derived2>
++  Quaternion& setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b);
++
++  inline Quaternion operator* (const Quaternion& q) const;
++  inline Quaternion& operator*= (const Quaternion& q);
++
++  Quaternion inverse(void) const;
++  Quaternion conjugate(void) const;
++
++  Quaternion slerp(Scalar t, const Quaternion& other) const;
++
++  template<typename Derived>
++  Vector3 operator* (const MatrixBase<Derived>& vec) const;
++
++};
++
++/** \ingroup Geometry
++  * single precision quaternion type */
++typedef Quaternion<float> Quaternionf;
++/** \ingroup Geometry
++  * double precision quaternion type */
++typedef Quaternion<double> Quaterniond;
++
++/** \returns the concatenation of two rotations as a quaternion-quaternion product */
++template <typename Scalar>
++inline Quaternion<Scalar> Quaternion<Scalar>::operator* (const Quaternion& other) const
++{
++  return Quaternion
++  (
++    this->w() * other.w() - this->x() * other.x() - this->y() * other.y() - this->z() * other.z(),
++    this->w() * other.x() + this->x() * other.w() + this->y() * other.z() - this->z() * other.y(),
++    this->w() * other.y() + this->y() * other.w() + this->z() * other.x() - this->x() * other.z(),
++    this->w() * other.z() + this->z() * other.w() + this->x() * other.y() - this->y() * other.x()
++  );
++}
++
++/** \sa operator*(Quaternion) */
++template <typename Scalar>
++inline Quaternion<Scalar>& Quaternion<Scalar>::operator*= (const Quaternion& other)
++{
++  return (*this = *this * other);
++}
++
++/** Rotation of a vector by a quaternion.
++  * \remarks If the quaternion is used to rotate several points (>1)
++  * then it is much more efficient to first convert it to a 3x3 Matrix.
++  * Comparison of the operation cost for n transformations:
++  *   - Quaternion:    30n
++  *   - Via a Matrix3: 24 + 15n
++  */
++template <typename Scalar>
++template<typename Derived>
++inline typename Quaternion<Scalar>::Vector3
++Quaternion<Scalar>::operator* (const MatrixBase<Derived>& v) const
++{
++    // Note that this algorithm comes from the optimization by hand
++    // of the conversion to a Matrix followed by a Matrix/Vector product.
++    // It appears to be much faster than the common algorithm found
++    // in the litterature (30 versus 39 flops). It also requires two
++    // Vector3 as temporaries.
++    Vector3 uv;
++    uv = 2 * this->vec().cross(v);
++    return v + this->w() * uv + this->vec().cross(uv);
++}
++
++template<typename Scalar>
++inline Quaternion<Scalar>& Quaternion<Scalar>::operator=(const Quaternion& other)
++{
++  m_coeffs = other.m_coeffs;
++  return *this;
++}
++
++/** Set \c *this from an angle-axis \a aa and returns a reference to \c *this
++  */
++template<typename Scalar>
++inline Quaternion<Scalar>& Quaternion<Scalar>::operator=(const AngleAxisType& aa)
++{
++  Scalar ha = 0.5*aa.angle();
++  this->w() = ei_cos(ha);
++  this->vec() = ei_sin(ha) * aa.axis();
++  return *this;
++}
++
++/** Set \c *this from the expression \a xpr:
++  *   - if \a xpr is a 4x1 vector, then \a xpr is assumed to be a quaternion
++  *   - if \a xpr is a 3x3 matrix, then \a xpr is assumed to be rotation matrix
++  *     and \a xpr is converted to a quaternion
++  */
++template<typename Scalar>
++template<typename Derived>
++inline Quaternion<Scalar>& Quaternion<Scalar>::operator=(const MatrixBase<Derived>& xpr)
++{
++  ei_quaternion_assign_impl<Derived>::run(*this, xpr.derived());
++  return *this;
++}
++
++/** Convert the quaternion to a 3x3 rotation matrix */
++template<typename Scalar>
++inline typename Quaternion<Scalar>::Matrix3
++Quaternion<Scalar>::toRotationMatrix(void) const
++{
++  // NOTE if inlined, then gcc 4.2 and 4.4 get rid of the temporary (not gcc 4.3 !!)
++  // if not inlined then the cost of the return by value is huge ~ +35%,
++  // however, not inlining this function is an order of magnitude slower, so
++  // it has to be inlined, and so the return by value is not an issue
++  Matrix3 res;
++
++  Scalar tx  = 2*this->x();
++  Scalar ty  = 2*this->y();
++  Scalar tz  = 2*this->z();
++  Scalar twx = tx*this->w();
++  Scalar twy = ty*this->w();
++  Scalar twz = tz*this->w();
++  Scalar txx = tx*this->x();
++  Scalar txy = ty*this->x();
++  Scalar txz = tz*this->x();
++  Scalar tyy = ty*this->y();
++  Scalar tyz = tz*this->y();
++  Scalar tzz = tz*this->z();
++
++  res.coeffRef(0,0) = 1-(tyy+tzz);
++  res.coeffRef(0,1) = txy-twz;
++  res.coeffRef(0,2) = txz+twy;
++  res.coeffRef(1,0) = txy+twz;
++  res.coeffRef(1,1) = 1-(txx+tzz);
++  res.coeffRef(1,2) = tyz-twx;
++  res.coeffRef(2,0) = txz-twy;
++  res.coeffRef(2,1) = tyz+twx;
++  res.coeffRef(2,2) = 1-(txx+tyy);
++
++  return res;
++}
++
++/** Makes a quaternion representing the rotation between two vectors \a a and \a b.
++  * \returns a reference to the actual quaternion
++  * Note that the two input vectors have \b not to be normalized.
++  */
++template<typename Scalar>
++template<typename Derived1, typename Derived2>
++inline Quaternion<Scalar>& Quaternion<Scalar>::setFromTwoVectors(const MatrixBase<Derived1>& a, const MatrixBase<Derived2>& b)
++{
++  Vector3 v0 = a.normalized();
++  Vector3 v1 = b.normalized();
++  Vector3 axis = v0.cross(v1);
++  Scalar c = v0.dot(v1);
++
++  // if dot == 1, vectors are the same
++  if (ei_isApprox(c,Scalar(1)))
++  {
++    // set to identity
++    this->w() = 1; this->vec().setZero();
++  }
++  Scalar s = ei_sqrt((1+c)*2);
++  Scalar invs = 1./s;
++  this->vec() = axis * invs;
++  this->w() = s * 0.5;
++
++  return *this;
++}
++
++/** \returns the multiplicative inverse of \c *this
++  * Note that in most cases, i.e., if you simply want the opposite rotation,
++  * and/or the quaternion is normalized, then it is enough to use the conjugate.
++  *
++  * \sa Quaternion::conjugate()
++  */
++template <typename Scalar>
++inline Quaternion<Scalar> Quaternion<Scalar>::inverse() const
++{
++  // FIXME should this funtion be called multiplicativeInverse and conjugate() be called inverse() or opposite()  ??
++  Scalar n2 = this->norm2();
++  if (n2 > 0)
++    return Quaternion(conjugate().coeffs() / n2);
++  else
++  {
++    // return an invalid result to flag the error
++    return Quaternion(Coefficients::Zero());
++  }
++}
++
++/** \returns the conjugate of the \c *this which is equal to the multiplicative inverse
++  * if the quaternion is normalized.
++  * The conjugate of a quaternion represents the opposite rotation.
++  *
++  * \sa Quaternion::inverse()
++  */
++template <typename Scalar>
++inline Quaternion<Scalar> Quaternion<Scalar>::conjugate() const
++{
++  return Quaternion(this->w(),-this->x(),-this->y(),-this->z());
++}
++
++/** \returns the spherical linear interpolation between the two quaternions
++  * \c *this and \a other at the parameter \a t
++  */
++template <typename Scalar>
++Quaternion<Scalar> Quaternion<Scalar>::slerp(Scalar t, const Quaternion& other) const
++{
++  // FIXME options for this function would be:
++  // 1 - Quaternion& fromSlerp(Scalar t, const Quaternion& q0, const Quaternion& q1);
++  //     which set *this from the s-lerp and returns *this
++  // 2 - Quaternion slerp(Scalar t, const Quaternion& other) const
++  //     which returns the s-lerp between this and other
++  // ??
++  if (m_coeffs == other.m_coeffs)
++    return *this;
++
++  Scalar d = m_coeffs.dot(other.m_coeffs);
++
++  // theta is the angle between the 2 quaternions
++  Scalar theta = std::acos(ei_abs(d));
++  Scalar sinTheta = ei_sin(theta);
++
++  Scalar scale0 = ei_sin( ( 1 - t ) * theta) / sinTheta;
++  Scalar scale1 = ei_sin( ( t * theta) ) / sinTheta;
++  if (d<0)
++    scale1 = -scale1;
++
++  return Quaternion(scale0 * m_coeffs + scale1 * other.m_coeffs);
++}
++
++// set from a rotation matrix
++template<typename Other>
++struct ei_quaternion_assign_impl<Other,3,3>
++{
++  typedef typename Other::Scalar Scalar;
++  inline static void run(Quaternion<Scalar>& q, const Other& mat)
++  {
++    // This algorithm comes from  "Quaternion Calculus and Fast Animation",
++    // Ken Shoemake, 1987 SIGGRAPH course notes
++    Scalar t = mat.trace();
++    if (t > 0)
++    {
++      t = ei_sqrt(t + 1.0);
++      q.w() = 0.5*t;
++      t = 0.5/t;
++      q.x() = (mat.coeff(2,1) - mat.coeff(1,2)) * t;
++      q.y() = (mat.coeff(0,2) - mat.coeff(2,0)) * t;
++      q.z() = (mat.coeff(1,0) - mat.coeff(0,1)) * t;
++    }
++    else
++    {
++      int i = 0;
++      if (mat.coeff(1,1) > mat.coeff(0,0))
++        i = 1;
++      if (mat.coeff(2,2) > mat.coeff(i,i))
++        i = 2;
++      int j = (i+1)%3;
++      int k = (j+1)%3;
++
++      t = ei_sqrt(mat.coeff(i,i)-mat.coeff(j,j)-mat.coeff(k,k) + 1.0);
++      q.coeffs().coeffRef(i) = 0.5 * t;
++      t = 0.5/t;
++      q.w() = (mat.coeff(k,j)-mat.coeff(j,k))*t;
++      q.coeffs().coeffRef(j) = (mat.coeff(j,i)+mat.coeff(i,j))*t;
++      q.coeffs().coeffRef(k) = (mat.coeff(k,i)+mat.coeff(i,k))*t;
++    }
++  }
++};
++
++// set from a vector of coefficients assumed to be a quaternion
++template<typename Other>
++struct ei_quaternion_assign_impl<Other,4,1>
++{
++  typedef typename Other::Scalar Scalar;
++  inline static void run(Quaternion<Scalar>& q, const Other& vec)
++  {
++    q.coeffs() = vec;
++  }
++};
++
++/** \geometry_module
++  *
++  * Constructs a 3x3 rotation matrix from the quaternion \a q
++  *
++  * \sa Matrix(const AngleAxis&)
++  */
++template<typename _Scalar, int _Rows, int _Cols, int _MaxRows, int _MaxCols, unsigned int _Flags>
++Matrix<_Scalar, _Rows, _Cols, _MaxRows, _MaxCols, _Flags>::Matrix(const Quaternion<Scalar>& q)
++{
++  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,3,3);
++  *this = q.toRotationMatrix();
++}
++
++/** \geometry_module
++  *
++  * Set a 3x3 rotation matrix from the quaternion \a q
++  */
++template<typename _Scalar, int _Rows, int _Cols, int _MaxRows, int _MaxCols, unsigned int _Flags>
++Matrix<_Scalar, _Rows, _Cols, _MaxRows, _MaxCols, _Flags>&
++Matrix<_Scalar, _Rows, _Cols, _MaxRows, _MaxCols, _Flags>::operator=(const Quaternion<Scalar>& q)
++{
++  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix,3,3);
++  return *this = q.toRotationMatrix();
++}
++
++#endif // EIGEN_QUATERNION_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Geometry/Rotation.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Geometry/Rotation.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Geometry/Rotation.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Geometry/Rotation.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,203 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_ROTATION_H
++#define EIGEN_ROTATION_H
++
++// this file aims to contains the various representations of rotation/orientation
++// in 2D and 3D space excepted Matrix and Quaternion.
++
++/** \internal
++  *
++  * \class ToRotationMatrix
++  *
++  * \brief Template static struct to convert any rotation representation to a matrix form
++  *
++  * \param Scalar the numeric type of the matrix coefficients
++  * \param Dim the dimension of the current space
++  * \param RotationType the input type of the rotation
++  *
++  * This class defines a single static member with the following prototype:
++  * \code
++  * static <MatrixExpression> convert(const RotationType& r);
++  * \endcode
++  * where \c <MatrixExpression> must be a fixed-size matrix expression of size Dim x Dim and
++  * coefficient type Scalar.
++  *
++  * Default specializations are provided for:
++  *   - any scalar type (2D),
++  *   - any matrix expression,
++  *   - Quaternion,
++  *   - AngleAxis.
++  *
++  * Currently ToRotationMatrix is only used by Transform.
++  *
++  * \sa class Transform, class Rotation2D, class Quaternion, class AngleAxis
++  *
++  */
++template<typename Scalar, int Dim, typename RotationType>
++struct ToRotationMatrix;
++
++// 2D rotation to matrix
++template<typename Scalar, typename OtherScalarType>
++struct ToRotationMatrix<Scalar, 2, OtherScalarType>
++{
++  inline static Matrix<Scalar,2,2> convert(const OtherScalarType& r)
++  { return Rotation2D<Scalar>(r).toRotationMatrix(); }
++};
++
++// 2D rotation to rotation matrix
++template<typename Scalar, typename OtherScalarType>
++struct ToRotationMatrix<Scalar, 2, Rotation2D<OtherScalarType> >
++{
++  inline static Matrix<Scalar,2,2> convert(const Rotation2D<OtherScalarType>& r)
++  { return Rotation2D<Scalar>(r).toRotationMatrix(); }
++};
++
++// quaternion to rotation matrix
++template<typename Scalar, typename OtherScalarType>
++struct ToRotationMatrix<Scalar, 3, Quaternion<OtherScalarType> >
++{
++  inline static Matrix<Scalar,3,3> convert(const Quaternion<OtherScalarType>& q)
++  { return q.toRotationMatrix(); }
++};
++
++// angle axis to rotation matrix
++template<typename Scalar, typename OtherScalarType>
++struct ToRotationMatrix<Scalar, 3, AngleAxis<OtherScalarType> >
++{
++  inline static Matrix<Scalar,3,3> convert(const AngleAxis<OtherScalarType>& aa)
++  { return aa.toRotationMatrix(); }
++};
++
++// matrix xpr to matrix xpr
++template<typename Scalar, int Dim, typename OtherDerived>
++struct ToRotationMatrix<Scalar, Dim, MatrixBase<OtherDerived> >
++{
++  inline static const MatrixBase<OtherDerived>& convert(const MatrixBase<OtherDerived>& mat)
++  {
++    EIGEN_STATIC_ASSERT(OtherDerived::RowsAtCompileTime==Dim && OtherDerived::ColsAtCompileTime==Dim,
++      you_did_a_programming_error);
++    return mat;
++  }
++};
++
++/** \geometry_module \ingroup Geometry
++  *
++  * \class Rotation2D
++  *
++  * \brief Represents a rotation/orientation in a 2 dimensional space.
++  *
++  * \param _Scalar the scalar type, i.e., the type of the coefficients
++  *
++  * This class is equivalent to a single scalar representing a counter clock wise rotation
++  * as a single angle in radian. It provides some additional features such as the automatic
++  * conversion from/to a 2x2 rotation matrix. Moreover this class aims to provide a similar
++  * interface to Quaternion in order to facilitate the writing of generic algorithm
++  * dealing with rotations.
++  *
++  * \sa class Quaternion, class Transform
++  */
++template<typename _Scalar>
++class Rotation2D
++{
++public:
++  enum { Dim = 2 };
++  /** the scalar type of the coefficients */
++  typedef _Scalar Scalar;
++  typedef Matrix<Scalar,2,1> Vector2;
++  typedef Matrix<Scalar,2,2> Matrix2;
++
++protected:
++
++  Scalar m_angle;
++
++public:
++
++  /** Construct a 2D counter clock wise rotation from the angle \a a in radian. */
++  inline Rotation2D(Scalar a) : m_angle(a) {}
++
++  /** \returns the rotation angle */
++  inline Scalar angle() const { return m_angle; }
++
++  /** \returns a read-write reference to the rotation angle */
++  inline Scalar& angle() { return m_angle; }
++
++  /** Automatic convertion to a 2D rotation matrix.
++    * \sa toRotationMatrix()
++    */
++  inline operator Matrix2() const { return toRotationMatrix(); }
++
++  /** \returns the inverse rotation */
++  inline Rotation2D inverse() const { return -m_angle; }
++
++  /** Concatenates two rotations */
++  inline Rotation2D operator*(const Rotation2D& other) const
++  { return m_angle + other.m_angle; }
++
++  /** Concatenates two rotations */
++  inline Rotation2D& operator*=(const Rotation2D& other)
++  { return m_angle += other.m_angle; }
++
++  /** Applies the rotation to a 2D vector */
++  template<typename Derived>
++  Vector2 operator* (const MatrixBase<Derived>& vec) const
++  { return toRotationMatrix() * vec; }
++
++  template<typename Derived>
++  Rotation2D& fromRotationMatrix(const MatrixBase<Derived>& m);
++  Matrix2 toRotationMatrix(void) const;
++
++  /** \returns the spherical interpolation between \c *this and \a other using
++    * parameter \a t. It is in fact equivalent to a linear interpolation.
++    */
++  inline Rotation2D slerp(Scalar t, const Rotation2D& other) const
++  { return m_angle * (1-t) + t * other; }
++};
++
++/** Set \c *this from a 2x2 rotation matrix \a mat.
++  * In other words, this function extract the rotation angle
++  * from the rotation matrix.
++  */
++template<typename Scalar>
++template<typename Derived>
++Rotation2D<Scalar>& Rotation2D<Scalar>::fromRotationMatrix(const MatrixBase<Derived>& mat)
++{
++  EIGEN_STATIC_ASSERT(Derived::RowsAtCompileTime==2 && Derived::ColsAtCompileTime==2,you_did_a_programming_error);
++  m_angle = ei_atan2(mat.coeff(1,0), mat.coeff(0,0));
++  return *this;
++}
++
++/** Constructs and \returns an equivalent 2x2 rotation matrix.
++  */
++template<typename Scalar>
++typename Rotation2D<Scalar>::Matrix2
++Rotation2D<Scalar>::toRotationMatrix(void) const
++{
++  Scalar sinA = ei_sin(m_angle);
++  Scalar cosA = ei_cos(m_angle);
++  return (Matrix2() << cosA, -sinA, sinA, cosA).finished();
++}
++
++#endif // EIGEN_ROTATION_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Geometry/Transform.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Geometry/Transform.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Geometry/Transform.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Geometry/Transform.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,438 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_TRANSFORM_H
++#define EIGEN_TRANSFORM_H
++
++// Note that we have to pass Dim and HDim because it is not allowed to use a template
++// parameter to define a template specialization. To be more precise, in the following
++// specializations, it is not allowed to use Dim+1 instead of HDim.
++template< typename Other,
++          int Dim,
++          int HDim,
++          int OtherRows=Other::RowsAtCompileTime,
++          int OtherCols=Other::ColsAtCompileTime>
++struct ei_transform_product_impl;
++
++/** \geometry_module \ingroup Geometry
++  *
++  * \class Transform
++  *
++  * \brief Represents an homogeneous transformation in a N dimensional space
++  *
++  * \param _Scalar the scalar type, i.e., the type of the coefficients
++  * \param _Dim the dimension of the space
++  *
++  * The homography is internally represented and stored as a (Dim+1)^2 matrix which
++  * is available through the matrix() method.
++  *
++  * Conversion methods from/to Qt's QMatrix are available if the preprocessor token
++  * EIGEN_QT_SUPPORT is defined.
++  *
++  * \sa class Matrix, class Quaternion
++  */
++template<typename _Scalar, int _Dim>
++class Transform
++{
++public:
++
++  enum {
++    Dim = _Dim,     ///< space dimension in which the transformation holds
++    HDim = _Dim+1   ///< size of a respective homogeneous vector
++  };
++  /** the scalar type of the coefficients */
++  typedef _Scalar Scalar;
++  /** type of the matrix used to represent the transformation */
++  typedef Matrix<Scalar,HDim,HDim> MatrixType;
++  /** type of the matrix used to represent the affine part of the transformation */
++  typedef Matrix<Scalar,Dim,Dim> AffineMatrixType;
++  /** type of read/write reference to the affine part of the transformation */
++  typedef Block<MatrixType,Dim,Dim> AffinePart;
++  /** type of a vector */
++  typedef Matrix<Scalar,Dim,1> VectorType;
++  /** type of a read/write reference to the translation part of the rotation */
++  typedef Block<MatrixType,Dim,1> TranslationPart;
++
++protected:
++
++  MatrixType m_matrix;
++
++public:
++
++  /** Default constructor without initialization of the coefficients. */
++  Transform() { }
++
++  inline Transform(const Transform& other)
++  { m_matrix = other.m_matrix; }
++
++  inline Transform& operator=(const Transform& other)
++  { m_matrix = other.m_matrix; return *this; }
++
++  /** Constructs and initializes a transformation from a (Dim+1)^2 matrix. */
++  template<typename OtherDerived>
++  inline explicit Transform(const MatrixBase<OtherDerived>& other)
++  { m_matrix = other; }
++
++  /** Set \c *this from a (Dim+1)^2 matrix. */
++  template<typename OtherDerived>
++  inline Transform& operator=(const MatrixBase<OtherDerived>& other)
++  { m_matrix = other; return *this; }
++
++  #ifdef EIGEN_QT_SUPPORT
++  inline Transform(const QMatrix& other);
++  inline Transform& operator=(const QMatrix& other);
++  inline QMatrix toQMatrix(void) const;
++  #endif
++
++  /** \returns a read-only expression of the transformation matrix */
++  inline const MatrixType& matrix() const { return m_matrix; }
++  /** \returns a writable expression of the transformation matrix */
++  inline MatrixType& matrix() { return m_matrix; }
++
++  /** \returns a read-only expression of the affine (linear) part of the transformation */
++  inline const AffinePart affine() const { return m_matrix.template block<Dim,Dim>(0,0); }
++  /** \returns a writable expression of the affine (linear) part of the transformation */
++  inline AffinePart affine() { return m_matrix.template block<Dim,Dim>(0,0); }
++
++  /** \returns a read-only expression of the translation vector of the transformation */
++  inline const TranslationPart translation() const { return m_matrix.template block<Dim,1>(0,Dim); }
++  /** \returns a writable expression of the translation vector of the transformation */
++  inline TranslationPart translation() { return m_matrix.template block<Dim,1>(0,Dim); }
++
++  /** \returns an expression of the product between the transform \c *this and a matrix expression \a other
++  *
++  * The right hand side \a other might be either:
++  * \li a vector of size Dim,
++  * \li an homogeneous vector of size Dim+1,
++  * \li a transformation matrix of size Dim+1 x Dim+1.
++  */
++  // note: this function is defined here because some compilers cannot find the respective declaration
++  template<typename OtherDerived>
++  const typename ei_transform_product_impl<OtherDerived,_Dim,_Dim+1>::ResultType
++  operator * (const MatrixBase<OtherDerived> &other) const
++  { return ei_transform_product_impl<OtherDerived,Dim,HDim>::run(*this,other.derived()); }
++
++  /** Contatenates two transformations */
++  const typename ProductReturnType<MatrixType,MatrixType>::Type
++  operator * (const Transform& other) const
++  { return m_matrix * other.matrix(); }
++
++  /** \sa MatrixBase::setIdentity() */
++  void setIdentity() { m_matrix.setIdentity(); }
++
++  template<typename OtherDerived>
++  Transform& scale(const MatrixBase<OtherDerived> &other);
++
++  template<typename OtherDerived>
++  Transform& prescale(const MatrixBase<OtherDerived> &other);
++
++  template<typename OtherDerived>
++  Transform& translate(const MatrixBase<OtherDerived> &other);
++
++  template<typename OtherDerived>
++  Transform& pretranslate(const MatrixBase<OtherDerived> &other);
++
++  template<typename RotationType>
++  Transform& rotate(const RotationType& rotation);
++
++  template<typename RotationType>
++  Transform& prerotate(const RotationType& rotation);
++
++  Transform& shear(Scalar sx, Scalar sy);
++  Transform& preshear(Scalar sx, Scalar sy);
++
++  AffineMatrixType extractRotation() const;
++  AffineMatrixType extractRotationNoShear() const;
++
++  template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
++  Transform& fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
++    const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale);
++
++  /** \sa MatrixBase::inverse() */
++  const MatrixType inverse() const
++  { return m_matrix.inverse(); }
++
++protected:
++
++};
++
++/** \ingroup Geometry */
++typedef Transform<float,2> Transform2f;
++/** \ingroup Geometry */
++typedef Transform<float,3> Transform3f;
++/** \ingroup Geometry */
++typedef Transform<double,2> Transform2d;
++/** \ingroup Geometry */
++typedef Transform<double,3> Transform3d;
++
++#ifdef EIGEN_QT_SUPPORT
++/** Initialises \c *this from a QMatrix assuming the dimension is 2.
++  *
++  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
++  */
++template<typename Scalar, int Dim>
++Transform<Scalar,Dim>::Transform(const QMatrix& other)
++{
++  *this = other;
++}
++
++/** Set \c *this from a QMatrix assuming the dimension is 2.
++  *
++  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
++  */
++template<typename Scalar, int Dim>
++Transform<Scalar,Dim>& Transform<Scalar,Dim>::operator=(const QMatrix& other)
++{
++  EIGEN_STATIC_ASSERT(Dim==2, you_did_a_programming_error);
++  m_matrix << other.m11(), other.m21(), other.dx(),
++              other.m12(), other.m22(), other.dy(),
++              0, 0, 1;
++   return *this;
++}
++
++/** \returns a QMatrix from \c *this assuming the dimension is 2.
++  *
++  * This function is available only if the token EIGEN_QT_SUPPORT is defined.
++  */
++template<typename Scalar, int Dim>
++QMatrix Transform<Scalar,Dim>::toQMatrix(void) const
++{
++  EIGEN_STATIC_ASSERT(Dim==2, you_did_a_programming_error);
++  return QMatrix(other.coeffRef(0,0), other.coeffRef(1,0),
++                 other.coeffRef(0,1), other.coeffRef(1,1),
++                 other.coeffRef(0,2), other.coeffRef(1,2));
++}
++#endif
++
++/** Applies on the right the non uniform scale transformation represented
++  * by the vector \a other to \c *this and returns a reference to \c *this.
++  * \sa prescale()
++  */
++template<typename Scalar, int Dim>
++template<typename OtherDerived>
++Transform<Scalar,Dim>&
++Transform<Scalar,Dim>::scale(const MatrixBase<OtherDerived> &other)
++{
++  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim));
++  affine() = (affine() * other.asDiagonal()).lazy();
++  return *this;
++}
++
++/** Applies on the left the non uniform scale transformation represented
++  * by the vector \a other to \c *this and returns a reference to \c *this.
++  * \sa scale()
++  */
++template<typename Scalar, int Dim>
++template<typename OtherDerived>
++Transform<Scalar,Dim>&
++Transform<Scalar,Dim>::prescale(const MatrixBase<OtherDerived> &other)
++{
++  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim));
++  m_matrix.template block<Dim,HDim>(0,0) = (other.asDiagonal() * m_matrix.template block<Dim,HDim>(0,0)).lazy();
++  return *this;
++}
++
++/** Applies on the right the translation matrix represented by the vector \a other
++  * to \c *this and returns a reference to \c *this.
++  * \sa pretranslate()
++  */
++template<typename Scalar, int Dim>
++template<typename OtherDerived>
++Transform<Scalar,Dim>&
++Transform<Scalar,Dim>::translate(const MatrixBase<OtherDerived> &other)
++{
++  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim));
++  translation() += affine() * other;
++  return *this;
++}
++
++/** Applies on the left the translation matrix represented by the vector \a other
++  * to \c *this and returns a reference to \c *this.
++  * \sa translate()
++  */
++template<typename Scalar, int Dim>
++template<typename OtherDerived>
++Transform<Scalar,Dim>&
++Transform<Scalar,Dim>::pretranslate(const MatrixBase<OtherDerived> &other)
++{
++  EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(OtherDerived,int(Dim));
++  translation() += other;
++  return *this;
++}
++
++/** Applies on the right the rotation represented by the rotation \a rotation
++  * to \c *this and returns a reference to \c *this.
++  *
++  * The template parameter \a RotationType is the type of the rotation which
++  * must be registered by ToRotationMatrix<>.
++  *
++  * Natively supported types includes:
++  *   - any scalar (2D),
++  *   - a Dim x Dim matrix expression,
++  *   - a Quaternion (3D),
++  *   - a AngleAxis (3D)
++  *
++  * This mechanism is easily extendable to support user types such as Euler angles,
++  * or a pair of Quaternion for 4D rotations.
++  *
++  * \sa rotate(Scalar), class Quaternion, class AngleAxis, class ToRotationMatrix, prerotate(RotationType)
++  */
++template<typename Scalar, int Dim>
++template<typename RotationType>
++Transform<Scalar,Dim>&
++Transform<Scalar,Dim>::rotate(const RotationType& rotation)
++{
++  affine() *= ToRotationMatrix<Scalar,Dim,RotationType>::convert(rotation);
++  return *this;
++}
++
++/** Applies on the left the rotation represented by the rotation \a rotation
++  * to \c *this and returns a reference to \c *this.
++  *
++  * See rotate() for further details.
++  *
++  * \sa rotate()
++  */
++template<typename Scalar, int Dim>
++template<typename RotationType>
++Transform<Scalar,Dim>&
++Transform<Scalar,Dim>::prerotate(const RotationType& rotation)
++{
++  m_matrix.template block<Dim,HDim>(0,0) = ToRotationMatrix<Scalar,Dim,RotationType>::convert(rotation)
++                                         * m_matrix.template block<Dim,HDim>(0,0);
++  return *this;
++}
++
++/** Applies on the right the shear transformation represented
++  * by the vector \a other to \c *this and returns a reference to \c *this.
++  * \warning 2D only.
++  * \sa preshear()
++  */
++template<typename Scalar, int Dim>
++Transform<Scalar,Dim>&
++Transform<Scalar,Dim>::shear(Scalar sx, Scalar sy)
++{
++  EIGEN_STATIC_ASSERT(int(Dim)==2, you_did_a_programming_error);
++  VectorType tmp = affine().col(0)*sy + affine().col(1);
++  affine() << affine().col(0) + affine().col(1)*sx, tmp;
++  return *this;
++}
++
++/** Applies on the left the shear transformation represented
++  * by the vector \a other to \c *this and returns a reference to \c *this.
++  * \warning 2D only.
++  * \sa shear()
++  */
++template<typename Scalar, int Dim>
++Transform<Scalar,Dim>&
++Transform<Scalar,Dim>::preshear(Scalar sx, Scalar sy)
++{
++  EIGEN_STATIC_ASSERT(int(Dim)==2, you_did_a_programming_error);
++  m_matrix.template block<Dim,HDim>(0,0) = AffineMatrixType(1, sx, sy, 1) * m_matrix.template block<Dim,HDim>(0,0);
++  return *this;
++}
++
++/** \returns the rotation part of the transformation using a QR decomposition.
++  * \sa extractRotationNoShear(), class QR
++  */
++template<typename Scalar, int Dim>
++typename Transform<Scalar,Dim>::AffineMatrixType
++Transform<Scalar,Dim>::extractRotation() const
++{
++  return affine().qr().matrixQ();
++}
++
++/** \returns the rotation part of the transformation assuming no shear in
++  * the affine part.
++  * \sa extractRotation()
++  */
++template<typename Scalar, int Dim>
++typename Transform<Scalar,Dim>::AffineMatrixType
++Transform<Scalar,Dim>::extractRotationNoShear() const
++{
++  return affine().cwise().abs2()
++            .verticalRedux(ei_scalar_sum_op<Scalar>()).cwise().sqrt();
++}
++
++/** Convenient method to set \c *this from a position, orientation and scale
++  * of a 3D object.
++  */
++template<typename Scalar, int Dim>
++template<typename PositionDerived, typename OrientationType, typename ScaleDerived>
++Transform<Scalar,Dim>&
++Transform<Scalar,Dim>::fromPositionOrientationScale(const MatrixBase<PositionDerived> &position,
++  const OrientationType& orientation, const MatrixBase<ScaleDerived> &scale)
++{
++  affine() = ToRotationMatrix<Scalar,Dim,OrientationType>::convert(orientation);
++  translation() = position;
++  m_matrix(Dim,Dim) = 1.;
++  m_matrix.template block<1,Dim>(Dim,0).setZero();
++  affine() *= scale.asDiagonal();
++  return *this;
++}
++
++/***********************************
++*** Specializations of operator* ***
++***********************************/
++
++template<typename Other, int Dim, int HDim>
++struct ei_transform_product_impl<Other,Dim,HDim, HDim,HDim>
++{
++  typedef Transform<typename Other::Scalar,Dim> TransformType;
++  typedef typename TransformType::MatrixType MatrixType;
++  typedef typename ProductReturnType<MatrixType,Other>::Type ResultType;
++  static ResultType run(const TransformType& tr, const Other& other)
++  { return tr.matrix() * other; }
++};
++
++template<typename Other, int Dim, int HDim>
++struct ei_transform_product_impl<Other,Dim,HDim, HDim,1>
++{
++  typedef Transform<typename Other::Scalar,Dim> TransformType;
++  typedef typename TransformType::MatrixType MatrixType;
++  typedef typename ProductReturnType<MatrixType,Other>::Type ResultType;
++  static ResultType run(const TransformType& tr, const Other& other)
++  { return tr.matrix() * other; }
++};
++
++template<typename Other, int Dim, int HDim>
++struct ei_transform_product_impl<Other,Dim,HDim, Dim,1>
++{
++  typedef typename Other::Scalar Scalar;
++  typedef Transform<Scalar,Dim> TransformType;
++  typedef typename TransformType::AffinePart MatrixType;
++  typedef const CwiseUnaryOp<
++      ei_scalar_multiple_op<Scalar>,
++      NestByValue<CwiseBinaryOp<
++        ei_scalar_sum_op<Scalar>,
++        NestByValue<typename ProductReturnType<NestByValue<MatrixType>,Other>::Type >,
++        NestByValue<typename TransformType::TranslationPart> > >
++      > ResultType;
++  // FIXME should we offer an optimized version when the last row is known to be 0,0...,0,1 ?
++  static ResultType run(const TransformType& tr, const Other& other)
++  { return ((tr.affine().nestByValue() * other).nestByValue() + tr.translation().nestByValue()).nestByValue()
++          * (Scalar(1) / ( (tr.matrix().template block<1,Dim>(Dim,0) * other).coeff(0) + tr.matrix().coeff(Dim,Dim))); }
++};
++
++#endif // EIGEN_TRANSFORM_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/LU/Determinant.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/LU/Determinant.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/LU/Determinant.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/LU/Determinant.h	2008-08-20 18:52:53.000000000 +0200
+@@ -0,0 +1,122 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_DETERMINANT_H
++#define EIGEN_DETERMINANT_H
++
++template<typename Derived>
++inline const typename Derived::Scalar ei_bruteforce_det3_helper
++(const MatrixBase<Derived>& matrix, int a, int b, int c)
++{
++  return matrix.coeff(0,a)
++         * (matrix.coeff(1,b) * matrix.coeff(2,c) - matrix.coeff(1,c) * matrix.coeff(2,b));
++}
++
++template<typename Derived>
++const typename Derived::Scalar ei_bruteforce_det4_helper
++(const MatrixBase<Derived>& matrix, int j, int k, int m, int n)
++{
++  return (matrix.coeff(j,0) * matrix.coeff(k,1) - matrix.coeff(k,0) * matrix.coeff(j,1))
++       * (matrix.coeff(m,2) * matrix.coeff(n,3) - matrix.coeff(n,2) * matrix.coeff(m,3));
++}
++
++const int TriangularDeterminant = 0;
++
++template<typename Derived,
++         int DeterminantType =
++           (Derived::Flags & (UpperTriangularBit | LowerTriangularBit))
++           ? TriangularDeterminant : Derived::RowsAtCompileTime
++> struct ei_determinant_impl
++{
++  static inline typename ei_traits<Derived>::Scalar run(const Derived& m)
++  {
++    return m.lu().determinant();
++  }
++};
++
++template<typename Derived> struct ei_determinant_impl<Derived, TriangularDeterminant>
++{
++  static inline typename ei_traits<Derived>::Scalar run(const Derived& m)
++  {
++    if (Derived::Flags & UnitDiagBit)
++      return 1;
++    else if (Derived::Flags & ZeroDiagBit)
++      return 0;
++    else
++      return m.diagonal().redux(ei_scalar_product_op<typename ei_traits<Derived>::Scalar>());
++  }
++};
++
++template<typename Derived> struct ei_determinant_impl<Derived, 1>
++{
++  static inline typename ei_traits<Derived>::Scalar run(const Derived& m)
++  {
++    return m.coeff(0,0);
++  }
++};
++
++template<typename Derived> struct ei_determinant_impl<Derived, 2>
++{
++  static inline typename ei_traits<Derived>::Scalar run(const Derived& m)
++  {
++    return m.coeff(0,0) * m.coeff(1,1) - m.coeff(1,0) * m.coeff(0,1);
++  }
++};
++
++template<typename Derived> struct ei_determinant_impl<Derived, 3>
++{
++  static typename ei_traits<Derived>::Scalar run(const Derived& m)
++  {
++    return ei_bruteforce_det3_helper(m,0,1,2)
++          - ei_bruteforce_det3_helper(m,1,0,2)
++          + ei_bruteforce_det3_helper(m,2,0,1);
++  }
++};
++
++template<typename Derived> struct ei_determinant_impl<Derived, 4>
++{
++  static typename ei_traits<Derived>::Scalar run(const Derived& m)
++  {
++    // trick by Martin Costabel to compute 4x4 det with only 30 muls
++    return ei_bruteforce_det4_helper(m,0,1,2,3)
++          - ei_bruteforce_det4_helper(m,0,2,1,3)
++          + ei_bruteforce_det4_helper(m,0,3,1,2)
++          + ei_bruteforce_det4_helper(m,1,2,0,3)
++          - ei_bruteforce_det4_helper(m,1,3,0,2)
++          + ei_bruteforce_det4_helper(m,2,3,0,1);
++  }
++};
++
++/** \lu_module
++  *
++  * \returns the determinant of this matrix
++  */
++template<typename Derived>
++inline typename ei_traits<Derived>::Scalar MatrixBase<Derived>::determinant() const
++{
++  assert(rows() == cols());
++  return ei_determinant_impl<Derived>::run(derived());
++}
++
++#endif // EIGEN_DETERMINANT_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/LU/Inverse.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/LU/Inverse.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/LU/Inverse.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/LU/Inverse.h	2008-08-20 18:52:53.000000000 +0200
+@@ -0,0 +1,249 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_INVERSE_H
++#define EIGEN_INVERSE_H
++
++/********************************************************************
++*** Part 1 : optimized implementations for fixed-size 2,3,4 cases ***
++********************************************************************/
++
++template<typename MatrixType>
++void ei_compute_inverse_in_size2_case(const MatrixType& matrix, MatrixType* result)
++{
++  typedef typename MatrixType::Scalar Scalar;
++  const Scalar invdet = Scalar(1) / matrix.determinant();
++  result->coeffRef(0,0) = matrix.coeff(1,1) * invdet;
++  result->coeffRef(1,0) = -matrix.coeff(1,0) * invdet;
++  result->coeffRef(0,1) = -matrix.coeff(0,1) * invdet;
++  result->coeffRef(1,1) = matrix.coeff(0,0) * invdet;
++}
++
++template<typename XprType, typename MatrixType>
++bool ei_compute_inverse_in_size2_case_with_check(const XprType& matrix, MatrixType* result)
++{
++  typedef typename MatrixType::Scalar Scalar;
++  const Scalar det = matrix.determinant();
++  if(ei_isMuchSmallerThan(det, matrix.cwise().abs().maxCoeff())) return false;
++  const Scalar invdet = Scalar(1) / det;
++  result->coeffRef(0,0) = matrix.coeff(1,1) * invdet;
++  result->coeffRef(1,0) = -matrix.coeff(1,0) * invdet;
++  result->coeffRef(0,1) = -matrix.coeff(0,1) * invdet;
++  result->coeffRef(1,1) = matrix.coeff(0,0) * invdet;
++  return true;
++}
++
++template<typename MatrixType>
++void ei_compute_inverse_in_size3_case(const MatrixType& matrix, MatrixType* result)
++{
++  typedef typename MatrixType::Scalar Scalar;
++  const Scalar det_minor00 = matrix.minor(0,0).determinant();
++  const Scalar det_minor10 = matrix.minor(1,0).determinant();
++  const Scalar det_minor20 = matrix.minor(2,0).determinant();
++  const Scalar invdet = Scalar(1) / ( det_minor00 * matrix.coeff(0,0)
++                                    - det_minor10 * matrix.coeff(1,0)
++                                    + det_minor20 * matrix.coeff(2,0) );
++  result->coeffRef(0, 0) = det_minor00 * invdet;
++  result->coeffRef(0, 1) = -det_minor10 * invdet;
++  result->coeffRef(0, 2) = det_minor20 * invdet;
++  result->coeffRef(1, 0) = -matrix.minor(0,1).determinant() * invdet;
++  result->coeffRef(1, 1) = matrix.minor(1,1).determinant() * invdet;
++  result->coeffRef(1, 2) = -matrix.minor(2,1).determinant() * invdet;
++  result->coeffRef(2, 0) = matrix.minor(0,2).determinant() * invdet;
++  result->coeffRef(2, 1) = -matrix.minor(1,2).determinant() * invdet;
++  result->coeffRef(2, 2) = matrix.minor(2,2).determinant() * invdet;
++}
++
++template<typename MatrixType>
++bool ei_compute_inverse_in_size4_case_helper(const MatrixType& matrix, MatrixType* result)
++{
++  /* Let's split M into four 2x2 blocks:
++    * (P Q)
++    * (R S)
++    * If P is invertible, with inverse denoted by P_inverse, and if
++    * (S - R*P_inverse*Q) is also invertible, then the inverse of M is
++    * (P' Q')
++    * (R' S')
++    * where
++    * S' = (S - R*P_inverse*Q)^(-1)
++    * P' = P1 + (P1*Q) * S' *(R*P_inverse)
++    * Q' = -(P_inverse*Q) * S'
++    * R' = -S' * (R*P_inverse)
++    */
++  typedef Block<MatrixType,2,2> XprBlock22;
++  typedef typename XprBlock22::Eval Block22;
++  Block22 P_inverse;
++  if(ei_compute_inverse_in_size2_case_with_check(matrix.template block<2,2>(0,0), &P_inverse))
++  {
++    const Block22 Q = matrix.template block<2,2>(0,2);
++    const Block22 P_inverse_times_Q = P_inverse * Q;
++    const XprBlock22 R = matrix.template block<2,2>(2,0);
++    const Block22 R_times_P_inverse = R * P_inverse;
++    const Block22 R_times_P_inverse_times_Q = R_times_P_inverse * Q;
++    const XprBlock22 S = matrix.template block<2,2>(2,2);
++    const Block22 X = S - R_times_P_inverse_times_Q;
++    Block22 Y;
++    ei_compute_inverse_in_size2_case(X, &Y);
++    result->template block<2,2>(2,2) = Y;
++    result->template block<2,2>(2,0) = - Y * R_times_P_inverse;
++    const Block22 Z = P_inverse_times_Q * Y;
++    result->template block<2,2>(0,2) = - Z;
++    result->template block<2,2>(0,0) = P_inverse + Z * R_times_P_inverse;
++    return true;
++  }
++  else
++  {
++    return false;
++  }
++}
++
++template<typename MatrixType>
++void ei_compute_inverse_in_size4_case(const MatrixType& matrix, MatrixType* result)
++{
++  if(ei_compute_inverse_in_size4_case_helper(matrix, result))
++  {
++    // good ! The topleft 2x2 block was invertible, so the 2x2 blocks approach is successful.
++    return;
++  }
++  else
++  {
++    // rare case: the topleft 2x2 block is not invertible (but the matrix itself is assumed to be).
++    // since this is a rare case, we don't need to optimize it. We just want to handle it with little
++    // additional code.
++    MatrixType m(matrix);
++    m.row(1).swap(m.row(2));
++    if(ei_compute_inverse_in_size4_case_helper(m, result))
++    {
++      // good, the topleft 2x2 block of m is invertible. Since m is different from matrix in that two
++      // rows were permuted, the actual inverse of matrix is derived from the inverse of m by permuting
++      // the corresponding columns.
++      result->col(1).swap(result->col(2));
++    }
++    else
++    {
++      // last possible case. Since matrix is assumed to be invertible, this last case has to work.
++      m.row(1).swap(m.row(2));
++      m.row(1).swap(m.row(3));
++      ei_compute_inverse_in_size4_case_helper(m, result);
++      result->col(1).swap(result->col(3));
++    }
++  }
++}
++
++/***********************************************
++*** Part 2 : selector and MatrixBase methods ***
++***********************************************/
++
++template<typename MatrixType, int Size = MatrixType::RowsAtCompileTime>
++struct ei_compute_inverse
++{
++  static inline void run(const MatrixType& matrix, MatrixType* result)
++  {
++    LU<MatrixType> lu(matrix);
++    lu.computeInverse(result);
++  }
++};
++
++template<typename MatrixType>
++struct ei_compute_inverse<MatrixType, 1>
++{
++  static inline void run(const MatrixType& matrix, MatrixType* result)
++  {
++    typedef typename MatrixType::Scalar Scalar;
++    result->coeffRef(0,0) = Scalar(1) / matrix.coeff(0,0);
++  }
++};
++
++template<typename MatrixType>
++struct ei_compute_inverse<MatrixType, 2>
++{
++  static inline void run(const MatrixType& matrix, MatrixType* result)
++  {
++    ei_compute_inverse_in_size2_case(matrix, result);
++  }
++};
++
++template<typename MatrixType>
++struct ei_compute_inverse<MatrixType, 3>
++{
++  static inline void run(const MatrixType& matrix, MatrixType* result)
++  {
++    ei_compute_inverse_in_size3_case(matrix, result);
++  }
++};
++
++template<typename MatrixType>
++struct ei_compute_inverse<MatrixType, 4>
++{
++  static inline void run(const MatrixType& matrix, MatrixType* result)
++  {
++    ei_compute_inverse_in_size4_case(matrix, result);
++  }
++};
++
++/** \lu_module
++  *
++  * Computes the matrix inverse of this matrix.
++  *
++  * \note This matrix must be invertible, otherwise the result is undefined.
++  *
++  * \param result Pointer to the matrix in which to store the result.
++  *
++  * Example: \include MatrixBase_computeInverse.cpp
++  * Output: \verbinclude MatrixBase_computeInverse.out
++  *
++  * \sa inverse()
++  */
++template<typename Derived>
++inline void MatrixBase<Derived>::computeInverse(typename MatrixBase<Derived>::EvalType *result) const
++{
++  typedef typename ei_eval<Derived>::type MatrixType;
++  ei_assert(rows() == cols());
++  EIGEN_STATIC_ASSERT(NumTraits<Scalar>::HasFloatingPoint,scalar_type_must_be_floating_point);
++  ei_compute_inverse<MatrixType>::run(eval(), result);
++}
++
++/** \lu_module
++  *
++  * \returns the matrix inverse of this matrix.
++  *
++  * \note This matrix must be invertible, otherwise the result is undefined.
++  *
++  * \note This method returns a matrix by value, which can be inefficient. To avoid that overhead,
++  * use computeInverse() instead.
++  *
++  * Example: \include MatrixBase_inverse.cpp
++  * Output: \verbinclude MatrixBase_inverse.out
++  *
++  * \sa computeInverse()
++  */
++template<typename Derived>
++inline const typename MatrixBase<Derived>::EvalType MatrixBase<Derived>::inverse() const
++{
++  EvalType result(rows(), cols());
++  computeInverse(&result);
++  return result;
++}
++
++#endif // EIGEN_INVERSE_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/LU/LU.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/LU/LU.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/LU/LU.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/LU/LU.h	2008-08-20 18:52:53.000000000 +0200
+@@ -0,0 +1,493 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_LU_H
++#define EIGEN_LU_H
++
++/** \ingroup LU_Module
++  *
++  * \class LU
++  *
++  * \brief LU decomposition of a matrix with complete pivoting, and related features
++  *
++  * \param MatrixType the type of the matrix of which we are computing the LU decomposition
++  *
++  * This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A
++  * is decomposed as A = PLUQ where L is unit-lower-triangular, U is upper-triangular, and P and Q
++  * are permutation matrices. This is a rank-revealing LU decomposition. The eigenvalues of U are
++  * in non-increasing order.
++  *
++  * This decomposition provides the generic approach to solving systems of linear equations, computing
++  * the rank, invertibility, inverse, kernel, and determinant.
++  *
++  * The data of the LU decomposition can be directly accessed through the methods matrixLU(),
++  * permutationP(), permutationQ(). Convenience methods matrixL(), matrixU() are also provided.
++  *
++  * As an exemple, here is how the original matrix can be retrieved, in the square case:
++  * \include class_LU_1.cpp
++  * Output: \verbinclude class_LU_1.out
++  *
++  * When the matrix is not square, matrixL() is no longer very useful: if one needs it, one has
++  * to construct the L matrix by hand, as shown in this example:
++  * \include class_LU_2.cpp
++  * Output: \verbinclude class_LU_2.out
++  *
++  * \sa MatrixBase::lu(), MatrixBase::determinant(), MatrixBase::inverse(), MatrixBase::computeInverse()
++  */
++template<typename MatrixType> class LU
++{
++  public:
++
++    typedef typename MatrixType::Scalar Scalar;
++    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
++    typedef Matrix<int, 1, MatrixType::ColsAtCompileTime> IntRowVectorType;
++    typedef Matrix<int, MatrixType::RowsAtCompileTime, 1> IntColVectorType;
++    typedef Matrix<Scalar, 1, MatrixType::ColsAtCompileTime> RowVectorType;
++    typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> ColVectorType;
++
++    enum { MaxSmallDimAtCompileTime = EIGEN_ENUM_MIN(
++             MatrixType::MaxColsAtCompileTime,
++             MatrixType::MaxRowsAtCompileTime)
++    };
++
++	typedef Matrix<typename MatrixType::Scalar, MatrixType::ColsAtCompileTime, Dynamic,
++                   MatrixType::MaxColsAtCompileTime, MaxSmallDimAtCompileTime> KernelReturnType;
++
++    /** Constructor.
++      *
++      * \param matrix the matrix of which to compute the LU decomposition.
++      */
++    LU(const MatrixType& matrix);
++
++    /** \returns the LU decomposition matrix: the upper-triangular part is U, the
++      * unit-lower-triangular part is L (at least for square matrices; in the non-square
++      * case, special care is needed, see the documentation of class LU).
++      *
++      * \sa matrixL(), matrixU()
++      */
++    inline const MatrixType& matrixLU() const
++    {
++      return m_lu;
++    }
++
++    /** \returns an expression of the unit-lower-triangular part of the LU matrix. In the square case,
++      *          this is the L matrix. In the non-square, actually obtaining the L matrix takes some
++      *          more care, see the documentation of class LU.
++      *
++      * \sa matrixLU(), matrixU()
++      */
++    inline const Part<MatrixType, UnitLower> matrixL() const
++    {
++      return m_lu;
++    }
++
++    /** \returns an expression of the U matrix, i.e. the upper-triangular part of the LU matrix.
++      *
++      * \note The eigenvalues of U are sorted in non-increasing order.
++      *
++      * \sa matrixLU(), matrixL()
++      */
++    inline const Part<MatrixType, Upper> matrixU() const
++    {
++      return m_lu;
++    }
++
++    /** \returns a vector of integers, whose size is the number of rows of the matrix being decomposed,
++      * representing the P permutation i.e. the permutation of the rows. For its precise meaning,
++      * see the examples given in the documentation of class LU.
++      *
++      * \sa permutationQ()
++      */
++    inline const IntColVectorType& permutationP() const
++    {
++      return m_p;
++    }
++
++    /** \returns a vector of integers, whose size is the number of columns of the matrix being
++      * decomposed, representing the Q permutation i.e. the permutation of the columns.
++      * For its precise meaning, see the examples given in the documentation of class LU.
++      *
++      * \sa permutationP()
++      */
++    inline const IntRowVectorType& permutationQ() const
++    {
++      return m_q;
++    }
++
++    /** Computes the kernel of the matrix.
++      *
++      * \note: this method is only allowed on non-invertible matrices, as determined by
++      * isInvertible(). Calling it on an invertible matrice will make an assertion fail.
++      *
++      * \param result a pointer to the matrix in which to store the kernel. The columns of this
++      * matrix will be set to form a basis of the kernel (it will be resized
++      * if necessary).
++      *
++      * Example: \include LU_computeKernel.cpp
++      * Output: \verbinclude LU_computeKernel.out
++      *
++      * \sa kernel()
++      */
++    void computeKernel(Matrix<typename MatrixType::Scalar,
++                              MatrixType::ColsAtCompileTime, Dynamic,
++                              MatrixType::MaxColsAtCompileTime,
++                              LU<MatrixType>::MaxSmallDimAtCompileTime
++                             > *result) const;
++
++    /** \returns the kernel of the matrix. The columns of the returned matrix
++      * will form a basis of the kernel.
++      *
++      * \note: this method is only allowed on non-invertible matrices, as determined by
++      * isInvertible(). Calling it on an invertible matrice will make an assertion fail.
++      *
++      * \note: this method returns a matrix by value, which induces some inefficiency.
++      *        If you prefer to avoid this overhead, use computeKernel() instead.
++      *
++      * Example: \include LU_kernel.cpp
++      * Output: \verbinclude LU_kernel.out
++      *
++      * \sa computeKernel()
++      */
++    const KernelReturnType kernel() const;
++
++    /** This method finds a solution x to the equation Ax=b, where A is the matrix of which
++      * *this is the LU decomposition, if any exists.
++      *
++      * \param b the right-hand-side of the equation to solve. Can be a vector or a matrix,
++      *          the only requirement in order for the equation to make sense is that
++      *          b.rows()==A.rows(), where A is the matrix of which *this is the LU decomposition.
++      * \param result a pointer to the vector or matrix in which to store the solution, if any exists.
++      *          Resized if necessary, so that result->rows()==A.cols() and result->cols()==b.cols().
++      *          If no solution exists, *result is left with undefined coefficients.
++      *
++      * \returns true if any solution exists, false if no solution exists.
++      *
++      * \note If there exist more than one solution, this method will arbitrarily choose one.
++      *       If you need a complete analysis of the space of solutions, take the one solution obtained  *       by this method and add to it elements of the kernel, as determined by kernel().
++      *
++      * Example: \include LU_solve.cpp
++      * Output: \verbinclude LU_solve.out
++      *
++      * \sa MatrixBase::solveTriangular(), kernel(), computeKernel(), inverse(), computeInverse()
++      */
++    template<typename OtherDerived, typename ResultType>
++    bool solve(
++    const MatrixBase<OtherDerived>& b,
++    ResultType *result
++    ) const;
++
++    /** \returns the determinant of the matrix of which
++      * *this is the LU decomposition. It has only linear complexity
++      * (that is, O(n) where n is the dimension of the square matrix)
++      * as the LU decomposition has already been computed.
++      *
++      * \note This is only for square matrices.
++      *
++      * \note For fixed-size matrices of size up to 4, MatrixBase::determinant() offers
++      *       optimized paths.
++      *
++      * \warning a determinant can be very big or small, so for matrices
++      * of large enough dimension, there is a risk of overflow/underflow.
++      *
++      * \sa MatrixBase::determinant()
++      */
++    typename ei_traits<MatrixType>::Scalar determinant() const;
++
++    /** \returns the rank of the matrix of which *this is the LU decomposition.
++      *
++      * \note This is computed at the time of the construction of the LU decomposition. This
++      *       method does not perform any further computation.
++      */
++    inline int rank() const
++    {
++      return m_rank;
++    }
++
++    /** \returns the dimension of the kernel of the matrix of which *this is the LU decomposition.
++      *
++      * \note Since the rank is computed at the time of the construction of the LU decomposition, this
++      *       method almost does not perform any further computation.
++      */
++    inline int dimensionOfKernel() const
++    {
++      return m_lu.cols() - m_rank;
++    }
++
++    /** \returns true if the matrix of which *this is the LU decomposition represents an injective
++      *          linear map, i.e. has trivial kernel; false otherwise.
++      *
++      * \note Since the rank is computed at the time of the construction of the LU decomposition, this
++      *       method almost does not perform any further computation.
++      */
++    inline bool isInjective() const
++    {
++      return m_rank == m_lu.cols();
++    }
++
++    /** \returns true if the matrix of which *this is the LU decomposition represents a surjective
++      *          linear map; false otherwise.
++      *
++      * \note Since the rank is computed at the time of the construction of the LU decomposition, this
++      *       method almost does not perform any further computation.
++      */
++    inline bool isSurjective() const
++    {
++      return m_rank == m_lu.rows();
++    }
++
++    /** \returns true if the matrix of which *this is the LU decomposition is invertible.
++      *
++      * \note Since the rank is computed at the time of the construction of the LU decomposition, this
++      *       method almost does not perform any further computation.
++      */
++    inline bool isInvertible() const
++    {
++      return isInjective() && isSurjective();
++    }
++
++    /** Computes the inverse of the matrix of which *this is the LU decomposition.
++      *
++      * \param result a pointer to the matrix into which to store the inverse. Resized if needed.
++      *
++      * \note If this matrix is not invertible, *result is left with undefined coefficients.
++      *       Use isInvertible() to first determine whether this matrix is invertible.
++      *
++      * \sa MatrixBase::computeInverse(), inverse()
++      */
++    inline void computeInverse(MatrixType *result) const
++    {
++      solve(MatrixType::Identity(m_lu.rows(), m_lu.cols()), result);
++    }
++
++    /** \returns the inverse of the matrix of which *this is the LU decomposition.
++      *
++      * \note If this matrix is not invertible, the returned matrix has undefined coefficients.
++      *       Use isInvertible() to first determine whether this matrix is invertible.
++      *
++      * \sa computeInverse(), MatrixBase::inverse()
++      */
++    inline MatrixType inverse() const
++    {
++      MatrixType result;
++      computeInverse(&result);
++      return result;
++    }
++
++  protected:
++    MatrixType m_lu;
++    IntColVectorType m_p;
++    IntRowVectorType m_q;
++    int m_det_pq;
++    int m_rank;
++};
++
++template<typename MatrixType>
++LU<MatrixType>::LU(const MatrixType& matrix)
++  : m_lu(matrix),
++    m_p(matrix.rows()),
++    m_q(matrix.cols())
++{
++  const int size = matrix.diagonal().size();
++  const int rows = matrix.rows();
++  const int cols = matrix.cols();
++
++  IntColVectorType rows_transpositions(matrix.rows());
++  IntRowVectorType cols_transpositions(matrix.cols());
++  int number_of_transpositions = 0;
++
++  RealScalar biggest = RealScalar(0);
++  for(int k = 0; k < size; k++)
++  {
++    int row_of_biggest_in_corner, col_of_biggest_in_corner;
++    RealScalar biggest_in_corner;
++
++    biggest_in_corner = m_lu.corner(Eigen::BottomRight, rows-k, cols-k)
++                  .cwise().abs()
++                  .maxCoeff(&row_of_biggest_in_corner, &col_of_biggest_in_corner);
++    row_of_biggest_in_corner += k;
++    col_of_biggest_in_corner += k;
++    rows_transpositions.coeffRef(k) = row_of_biggest_in_corner;
++    cols_transpositions.coeffRef(k) = col_of_biggest_in_corner;
++    if(k != row_of_biggest_in_corner) {
++      m_lu.row(k).swap(m_lu.row(row_of_biggest_in_corner));
++      number_of_transpositions++;
++    }
++    if(k != col_of_biggest_in_corner) {
++      m_lu.col(k).swap(m_lu.col(col_of_biggest_in_corner));
++      number_of_transpositions++;
++    }
++
++    if(k==0) biggest = biggest_in_corner;
++    const Scalar lu_k_k = m_lu.coeff(k,k);
++    if(ei_isMuchSmallerThan(lu_k_k, biggest)) continue;
++    if(k<rows-1)
++      m_lu.col(k).end(rows-k-1) /= lu_k_k;
++    if(k<size-1)
++      for( int col = k + 1; col < cols; col++ )
++        m_lu.col(col).end(rows-k-1) -= m_lu.col(k).end(rows-k-1) * m_lu.coeff(k,col);
++  }
++
++  for(int k = 0; k < matrix.rows(); k++) m_p.coeffRef(k) = k;
++  for(int k = size-1; k >= 0; k--)
++    std::swap(m_p.coeffRef(k), m_p.coeffRef(rows_transpositions.coeff(k)));
++
++  for(int k = 0; k < matrix.cols(); k++) m_q.coeffRef(k) = k;
++  for(int k = 0; k < size; k++)
++    std::swap(m_q.coeffRef(k), m_q.coeffRef(cols_transpositions.coeff(k)));
++
++  m_det_pq = (number_of_transpositions%2) ? -1 : 1;
++
++  for(m_rank = 0; m_rank < size; m_rank++)
++    if(ei_isMuchSmallerThan(m_lu.diagonal().coeff(m_rank), m_lu.diagonal().coeff(0)))
++      break;
++}
++
++template<typename MatrixType>
++typename ei_traits<MatrixType>::Scalar LU<MatrixType>::determinant() const
++{
++  return Scalar(m_det_pq) * m_lu.diagonal().redux(ei_scalar_product_op<Scalar>());
++}
++
++template<typename MatrixType>
++void LU<MatrixType>::computeKernel(Matrix<typename MatrixType::Scalar,
++                                          MatrixType::ColsAtCompileTime, Dynamic,
++                                          MatrixType::MaxColsAtCompileTime,
++                                          LU<MatrixType>::MaxSmallDimAtCompileTime
++                                   > *result) const
++{
++  ei_assert(!isInvertible());
++  const int dimker = dimensionOfKernel(), cols = m_lu.cols();
++  result->resize(cols, dimker);
++
++  /* Let us use the following lemma:
++    *
++    * Lemma: If the matrix A has the LU decomposition PAQ = LU,
++    * then Ker A = Q( Ker U ).
++    *
++    * Proof: trivial: just keep in mind that P, Q, L are invertible.
++    */
++
++  /* Thus, all we need to do is to compute Ker U, and then apply Q.
++    *
++    * U is upper triangular, with eigenvalues sorted in decreasing order of
++    * absolute value. Thus, the diagonal of U ends with exactly
++    * m_dimKer zero's. Let us use that to construct m_dimKer linearly
++    * independent vectors in Ker U.
++    */
++
++  Matrix<Scalar, Dynamic, Dynamic, MatrixType::MaxColsAtCompileTime, MaxSmallDimAtCompileTime>
++    y(-m_lu.corner(TopRight, m_rank, dimker));
++
++  m_lu.corner(TopLeft, m_rank, m_rank)
++      .template marked<Upper>()
++      .solveTriangularInPlace(y);
++
++  for(int i = 0; i < m_rank; i++)
++    result->row(m_q.coeff(i)) = y.row(i);
++  for(int i = m_rank; i < cols; i++) result->row(m_q.coeff(i)).setZero();
++  for(int k = 0; k < dimker; k++) result->coeffRef(m_q.coeff(m_rank+k), k) = Scalar(1);
++}
++
++template<typename MatrixType>
++const typename LU<MatrixType>::KernelReturnType
++LU<MatrixType>::kernel() const
++{
++  Matrix<typename MatrixType::Scalar, MatrixType::ColsAtCompileTime, Dynamic,
++                    MatrixType::MaxColsAtCompileTime,
++                    LU<MatrixType>::MaxSmallDimAtCompileTime> result(m_lu.cols(), dimensionOfKernel());
++  computeKernel(&result);
++  return result;
++}
++
++template<typename MatrixType>
++template<typename OtherDerived, typename ResultType>
++bool LU<MatrixType>::solve(
++  const MatrixBase<OtherDerived>& b,
++  ResultType *result
++) const
++{
++  /* The decomposition PAQ = LU can be rewritten as A = P^{-1} L U Q^{-1}.
++   * So we proceed as follows:
++   * Step 1: compute c = Pb.
++   * Step 2: replace c by the solution x to Lx = c. Exists because L is invertible.
++   * Step 3: compute d such that Ud = c. Check if such d really exists.
++   * Step 4: result = Qd;
++   */
++
++  const int rows = m_lu.rows();
++  ei_assert(b.rows() == rows);
++  const int smalldim = std::min(rows, m_lu.cols());
++
++  typename OtherDerived::Eval c(b.rows(), b.cols());
++
++  // Step 1
++  for(int i = 0; i < rows; i++) c.row(m_p.coeff(i)) = b.row(i);
++
++  // Step 2
++  Matrix<Scalar, MatrixType::RowsAtCompileTime, MatrixType::RowsAtCompileTime,
++         MatrixType::MaxRowsAtCompileTime,
++         MatrixType::MaxRowsAtCompileTime> l(rows, rows);
++  l.setZero();
++  l.corner(Eigen::TopLeft,rows,smalldim)
++    = m_lu.corner(Eigen::TopLeft,rows,smalldim);
++  l.template marked<UnitLower>().solveTriangularInPlace(c);
++
++  // Step 3
++  if(!isSurjective())
++  {
++    // is c is in the image of U ?
++    RealScalar biggest_in_c = c.corner(TopLeft, m_rank, c.cols()).cwise().abs().maxCoeff();
++    for(int col = 0; col < c.cols(); col++)
++      for(int row = m_rank; row < c.rows(); row++)
++        if(!ei_isMuchSmallerThan(c.coeff(row,col), biggest_in_c))
++          return false;
++  }
++  Matrix<Scalar, Dynamic, OtherDerived::ColsAtCompileTime,
++         MatrixType::MaxRowsAtCompileTime, OtherDerived::MaxColsAtCompileTime>
++    d(c.corner(TopLeft, m_rank, c.cols()));
++  m_lu.corner(TopLeft, m_rank, m_rank)
++      .template marked<Upper>()
++      .solveTriangularInPlace(d);
++
++  // Step 4
++  result->resize(m_lu.cols(), b.cols());
++  for(int i = 0; i < m_rank; i++) result->row(m_q.coeff(i)) = d.row(i);
++  for(int i = m_rank; i < m_lu.cols(); i++) result->row(m_q.coeff(i)).setZero();
++  return true;
++}
++
++/** \lu_module
++  *
++  * \return the LU decomposition of \c *this.
++  *
++  * \sa class LU
++  */
++template<typename Derived>
++inline const LU<typename MatrixBase<Derived>::EvalType>
++MatrixBase<Derived>::lu() const
++{
++  return eval();
++}
++
++#endif // EIGEN_LU_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/QR/EigenSolver.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/QR/EigenSolver.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/QR/EigenSolver.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/QR/EigenSolver.h	2008-08-20 18:52:54.000000000 +0200
+@@ -0,0 +1,615 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_EIGENSOLVER_H
++#define EIGEN_EIGENSOLVER_H
++
++/** \ingroup QR_Module
++  *
++  * \class EigenSolver
++  *
++  * \brief Eigen values/vectors solver for non selfadjoint matrices
++  *
++  * \param MatrixType the type of the matrix of which we are computing the eigen decomposition
++  *
++  * Currently it only support real matrices.
++  *
++  * \note this code was adapted from JAMA (public domain)
++  *
++  * \sa MatrixBase::eigenvalues(), SelfAdjointEigenSolver
++  */
++template<typename _MatrixType> class EigenSolver
++{
++  public:
++
++    typedef _MatrixType MatrixType;
++    typedef typename MatrixType::Scalar Scalar;
++    typedef typename NumTraits<Scalar>::Real RealScalar;
++    typedef std::complex<RealScalar> Complex;
++    typedef Matrix<Complex, MatrixType::ColsAtCompileTime, 1> EigenvalueType;
++    typedef Matrix<RealScalar, MatrixType::ColsAtCompileTime, 1> RealVectorType;
++    typedef Matrix<RealScalar, Dynamic, 1> RealVectorTypeX;
++
++    EigenSolver(const MatrixType& matrix)
++      : m_eivec(matrix.rows(), matrix.cols()),
++        m_eivalues(matrix.cols())
++    {
++      compute(matrix);
++    }
++
++    MatrixType eigenvectors(void) const { return m_eivec; }
++
++    EigenvalueType eigenvalues(void) const { return m_eivalues; }
++
++    void compute(const MatrixType& matrix);
++
++  private:
++
++    void orthes(MatrixType& matH, RealVectorType& ort);
++    void hqr2(MatrixType& matH);
++
++  protected:
++    MatrixType m_eivec;
++    EigenvalueType m_eivalues;
++};
++
++template<typename MatrixType>
++void EigenSolver<MatrixType>::compute(const MatrixType& matrix)
++{
++  assert(matrix.cols() == matrix.rows());
++  int n = matrix.cols();
++  m_eivalues.resize(n,1);
++
++  MatrixType matH = matrix;
++  RealVectorType ort(n);
++
++  // Reduce to Hessenberg form.
++  orthes(matH, ort);
++
++  // Reduce Hessenberg to real Schur form.
++  hqr2(matH);
++}
++
++// Nonsymmetric reduction to Hessenberg form.
++template<typename MatrixType>
++void EigenSolver<MatrixType>::orthes(MatrixType& matH, RealVectorType& ort)
++{
++  //  This is derived from the Algol procedures orthes and ortran,
++  //  by Martin and Wilkinson, Handbook for Auto. Comp.,
++  //  Vol.ii-Linear Algebra, and the corresponding
++  //  Fortran subroutines in EISPACK.
++
++  int n = m_eivec.cols();
++  int low = 0;
++  int high = n-1;
++
++  for (int m = low+1; m <= high-1; m++)
++  {
++    // Scale column.
++    Scalar scale = matH.block(m, m-1, high-m+1, 1).cwise().abs().sum();
++    if (scale != 0.0)
++    {
++      // Compute Householder transformation.
++      Scalar h = 0.0;
++      // FIXME could be rewritten, but this one looks better wrt cache
++      for (int i = high; i >= m; i--)
++      {
++        ort.coeffRef(i) = matH.coeff(i,m-1)/scale;
++        h += ort.coeff(i) * ort.coeff(i);
++      }
++      Scalar g = ei_sqrt(h);
++      if (ort.coeff(m) > 0)
++        g = -g;
++      h = h - ort.coeff(m) * g;
++      ort.coeffRef(m) = ort.coeff(m) - g;
++
++      // Apply Householder similarity transformation
++      // H = (I-u*u'/h)*H*(I-u*u')/h)
++      int bSize = high-m+1;
++      matH.block(m, m, bSize, n-m) -= ((ort.block(m, bSize)/h)
++        * (ort.block(m, bSize).transpose() *  matH.block(m, m, bSize, n-m)).lazy()).lazy();
++
++      matH.block(0, m, high+1, bSize) -= ((matH.block(0, m, high+1, bSize) * ort.block(m, bSize)).lazy()
++        * (ort.block(m, bSize)/h).transpose()).lazy();
++
++      ort.coeffRef(m) = scale*ort.coeff(m);
++      matH.coeffRef(m,m-1) = scale*g;
++    }
++  }
++
++  // Accumulate transformations (Algol's ortran).
++  m_eivec.setIdentity();
++
++  for (int m = high-1; m >= low+1; m--)
++  {
++    if (matH.coeff(m,m-1) != 0.0)
++    {
++      ort.block(m+1, high-m) = matH.col(m-1).block(m+1, high-m);
++
++      int bSize = high-m+1;
++      m_eivec.block(m, m, bSize, bSize) += ( (ort.block(m, bSize) /  (matH.coeff(m,m-1) * ort.coeff(m) ) )
++        * (ort.block(m, bSize).transpose() * m_eivec.block(m, m, bSize, bSize)).lazy());
++    }
++  }
++}
++
++
++// Complex scalar division.
++template<typename Scalar>
++std::complex<Scalar> cdiv(Scalar xr, Scalar xi, Scalar yr, Scalar yi)
++{
++  Scalar r,d;
++  if (ei_abs(yr) > ei_abs(yi))
++  {
++      r = yi/yr;
++      d = yr + r*yi;
++      return std::complex<Scalar>((xr + r*xi)/d, (xi - r*xr)/d);
++  }
++  else
++  {
++      r = yr/yi;
++      d = yi + r*yr;
++      return std::complex<Scalar>((r*xr + xi)/d, (r*xi - xr)/d);
++  }
++}
++
++
++// Nonsymmetric reduction from Hessenberg to real Schur form.
++template<typename MatrixType>
++void EigenSolver<MatrixType>::hqr2(MatrixType& matH)
++{
++  //  This is derived from the Algol procedure hqr2,
++  //  by Martin and Wilkinson, Handbook for Auto. Comp.,
++  //  Vol.ii-Linear Algebra, and the corresponding
++  //  Fortran subroutine in EISPACK.
++
++  // Initialize
++  int nn = m_eivec.cols();
++  int n = nn-1;
++  int low = 0;
++  int high = nn-1;
++  Scalar eps = pow(2.0,-52.0);
++  Scalar exshift = 0.0;
++  Scalar p=0,q=0,r=0,s=0,z=0,t,w,x,y;
++
++  // Store roots isolated by balanc and compute matrix norm
++  // FIXME to be efficient the following would requires a triangular reduxion code
++  // Scalar norm = matH.upper().cwise().abs().sum() + matH.corner(BottomLeft,n,n).diagonal().cwise().abs().sum();
++  Scalar norm = 0.0;
++  for (int j = 0; j < nn; j++)
++  {
++    // FIXME what's the purpose of the following since the condition is always false
++    if ((j < low) || (j > high))
++    {
++      m_eivalues.coeffRef(j).real() = matH.coeff(j,j);
++      m_eivalues.coeffRef(j).imag() = 0.0;
++    }
++    norm += matH.col(j).start(std::min(j+1,nn)).cwise().abs().sum();
++  }
++
++  // Outer loop over eigenvalue index
++  int iter = 0;
++  while (n >= low)
++  {
++    // Look for single small sub-diagonal element
++    int l = n;
++    while (l > low)
++    {
++      s = ei_abs(matH.coeff(l-1,l-1)) + ei_abs(matH.coeff(l,l));
++      if (s == 0.0)
++          s = norm;
++      if (ei_abs(matH.coeff(l,l-1)) < eps * s)
++        break;
++      l--;
++    }
++
++    // Check for convergence
++    // One root found
++    if (l == n)
++    {
++      matH.coeffRef(n,n) = matH.coeff(n,n) + exshift;
++      m_eivalues.coeffRef(n).real() = matH.coeff(n,n);
++      m_eivalues.coeffRef(n).imag() = 0.0;
++      n--;
++      iter = 0;
++    }
++    else if (l == n-1) // Two roots found
++    {
++      w = matH.coeff(n,n-1) * matH.coeff(n-1,n);
++      p = (matH.coeff(n-1,n-1) - matH.coeff(n,n)) / 2.0;
++      q = p * p + w;
++      z = ei_sqrt(ei_abs(q));
++      matH.coeffRef(n,n) = matH.coeff(n,n) + exshift;
++      matH.coeffRef(n-1,n-1) = matH.coeff(n-1,n-1) + exshift;
++      x = matH.coeff(n,n);
++
++      // Scalar pair
++      if (q >= 0)
++      {
++        if (p >= 0)
++          z = p + z;
++        else
++          z = p - z;
++
++        m_eivalues.coeffRef(n-1).real() = x + z;
++        m_eivalues.coeffRef(n).real() = m_eivalues.coeff(n-1).real();
++        if (z != 0.0)
++          m_eivalues.coeffRef(n).real() = x - w / z;
++
++        m_eivalues.coeffRef(n-1).imag() = 0.0;
++        m_eivalues.coeffRef(n).imag() = 0.0;
++        x = matH.coeff(n,n-1);
++        s = ei_abs(x) + ei_abs(z);
++        p = x / s;
++        q = z / s;
++        r = ei_sqrt(p * p+q * q);
++        p = p / r;
++        q = q / r;
++
++        // Row modification
++        for (int j = n-1; j < nn; j++)
++        {
++          z = matH.coeff(n-1,j);
++          matH.coeffRef(n-1,j) = q * z + p * matH.coeff(n,j);
++          matH.coeffRef(n,j) = q * matH.coeff(n,j) - p * z;
++        }
++
++        // Column modification
++        for (int i = 0; i <= n; i++)
++        {
++          z = matH.coeff(i,n-1);
++          matH.coeffRef(i,n-1) = q * z + p * matH.coeff(i,n);
++          matH.coeffRef(i,n) = q * matH.coeff(i,n) - p * z;
++        }
++
++        // Accumulate transformations
++        for (int i = low; i <= high; i++)
++        {
++          z = m_eivec.coeff(i,n-1);
++          m_eivec.coeffRef(i,n-1) = q * z + p * m_eivec.coeff(i,n);
++          m_eivec.coeffRef(i,n) = q * m_eivec.coeff(i,n) - p * z;
++        }
++      }
++      else // Complex pair
++      {
++        m_eivalues.coeffRef(n-1).real() = x + p;
++        m_eivalues.coeffRef(n).real() = x + p;
++        m_eivalues.coeffRef(n-1).imag() = z;
++        m_eivalues.coeffRef(n).imag() = -z;
++      }
++      n = n - 2;
++      iter = 0;
++    }
++    else // No convergence yet
++    {
++      // Form shift
++      x = matH.coeff(n,n);
++      y = 0.0;
++      w = 0.0;
++      if (l < n)
++      {
++          y = matH.coeff(n-1,n-1);
++          w = matH.coeff(n,n-1) * matH.coeff(n-1,n);
++      }
++
++      // Wilkinson's original ad hoc shift
++      if (iter == 10)
++      {
++        exshift += x;
++        for (int i = low; i <= n; i++)
++          matH.coeffRef(i,i) -= x;
++        s = ei_abs(matH.coeff(n,n-1)) + ei_abs(matH.coeff(n-1,n-2));
++        x = y = 0.75 * s;
++        w = -0.4375 * s * s;
++      }
++
++      // MATLAB's new ad hoc shift
++      if (iter == 30)
++      {
++        s = (y - x) / 2.0;
++        s = s * s + w;
++        if (s > 0)
++        {
++          s = ei_sqrt(s);
++          if (y < x)
++            s = -s;
++          s = x - w / ((y - x) / 2.0 + s);
++          for (int i = low; i <= n; i++)
++            matH.coeffRef(i,i) -= s;
++          exshift += s;
++          x = y = w = 0.964;
++        }
++      }
++
++      iter = iter + 1;   // (Could check iteration count here.)
++
++      // Look for two consecutive small sub-diagonal elements
++      int m = n-2;
++      while (m >= l)
++      {
++        z = matH.coeff(m,m);
++        r = x - z;
++        s = y - z;
++        p = (r * s - w) / matH.coeff(m+1,m) + matH.coeff(m,m+1);
++        q = matH.coeff(m+1,m+1) - z - r - s;
++        r = matH.coeff(m+2,m+1);
++        s = ei_abs(p) + ei_abs(q) + ei_abs(r);
++        p = p / s;
++        q = q / s;
++        r = r / s;
++        if (m == l) {
++          break;
++        }
++        if (ei_abs(matH.coeff(m,m-1)) * (ei_abs(q) + ei_abs(r)) <
++          eps * (ei_abs(p) * (ei_abs(matH.coeff(m-1,m-1)) + ei_abs(z) +
++          ei_abs(matH.coeff(m+1,m+1)))))
++        {
++          break;
++        }
++        m--;
++      }
++
++      for (int i = m+2; i <= n; i++)
++      {
++        matH.coeffRef(i,i-2) = 0.0;
++        if (i > m+2)
++          matH.coeffRef(i,i-3) = 0.0;
++      }
++
++      // Double QR step involving rows l:n and columns m:n
++      for (int k = m; k <= n-1; k++)
++      {
++        int notlast = (k != n-1);
++        if (k != m) {
++          p = matH.coeff(k,k-1);
++          q = matH.coeff(k+1,k-1);
++          r = (notlast ? matH.coeff(k+2,k-1) : 0.0);
++          x = ei_abs(p) + ei_abs(q) + ei_abs(r);
++          if (x != 0.0)
++          {
++            p = p / x;
++            q = q / x;
++            r = r / x;
++          }
++        }
++
++        if (x == 0.0)
++          break;
++
++        s = ei_sqrt(p * p + q * q + r * r);
++
++        if (p < 0)
++          s = -s;
++
++        if (s != 0)
++        {
++          if (k != m)
++            matH.coeffRef(k,k-1) = -s * x;
++          else if (l != m)
++            matH.coeffRef(k,k-1) = -matH.coeff(k,k-1);
++
++          p = p + s;
++          x = p / s;
++          y = q / s;
++          z = r / s;
++          q = q / p;
++          r = r / p;
++
++          // Row modification
++          for (int j = k; j < nn; j++)
++          {
++            p = matH.coeff(k,j) + q * matH.coeff(k+1,j);
++            if (notlast)
++            {
++              p = p + r * matH.coeff(k+2,j);
++              matH.coeffRef(k+2,j) = matH.coeff(k+2,j) - p * z;
++            }
++            matH.coeffRef(k,j) = matH.coeff(k,j) - p * x;
++            matH.coeffRef(k+1,j) = matH.coeff(k+1,j) - p * y;
++          }
++
++          // Column modification
++          for (int i = 0; i <= std::min(n,k+3); i++)
++          {
++            p = x * matH.coeff(i,k) + y * matH.coeff(i,k+1);
++            if (notlast)
++            {
++              p = p + z * matH.coeff(i,k+2);
++              matH.coeffRef(i,k+2) = matH.coeff(i,k+2) - p * r;
++            }
++            matH.coeffRef(i,k) = matH.coeff(i,k) - p;
++            matH.coeffRef(i,k+1) = matH.coeff(i,k+1) - p * q;
++          }
++
++          // Accumulate transformations
++          for (int i = low; i <= high; i++)
++          {
++            p = x * m_eivec.coeff(i,k) + y * m_eivec.coeff(i,k+1);
++            if (notlast)
++            {
++              p = p + z * m_eivec.coeff(i,k+2);
++              m_eivec.coeffRef(i,k+2) = m_eivec.coeff(i,k+2) - p * r;
++            }
++            m_eivec.coeffRef(i,k) = m_eivec.coeff(i,k) - p;
++            m_eivec.coeffRef(i,k+1) = m_eivec.coeff(i,k+1) - p * q;
++          }
++        }  // (s != 0)
++      }  // k loop
++    }  // check convergence
++  }  // while (n >= low)
++
++  // Backsubstitute to find vectors of upper triangular form
++  if (norm == 0.0)
++  {
++      return;
++  }
++
++  for (n = nn-1; n >= 0; n--)
++  {
++    p = m_eivalues.coeff(n).real();
++    q = m_eivalues.coeff(n).imag();
++
++    // Scalar vector
++    if (q == 0)
++    {
++      int l = n;
++      matH.coeffRef(n,n) = 1.0;
++      for (int i = n-1; i >= 0; i--)
++      {
++        w = matH.coeff(i,i) - p;
++        r = (matH.row(i).end(nn-l) * matH.col(n).end(nn-l))(0,0);
++
++        if (m_eivalues.coeff(i).imag() < 0.0)
++        {
++          z = w;
++          s = r;
++        }
++        else
++        {
++          l = i;
++          if (m_eivalues.coeff(i).imag() == 0.0)
++          {
++            if (w != 0.0)
++              matH.coeffRef(i,n) = -r / w;
++            else
++              matH.coeffRef(i,n) = -r / (eps * norm);
++          }
++          else // Solve real equations
++          {
++            x = matH.coeff(i,i+1);
++            y = matH.coeff(i+1,i);
++            q = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag();
++            t = (x * s - z * r) / q;
++            matH.coeffRef(i,n) = t;
++            if (ei_abs(x) > ei_abs(z))
++              matH.coeffRef(i+1,n) = (-r - w * t) / x;
++            else
++              matH.coeffRef(i+1,n) = (-s - y * t) / z;
++          }
++
++          // Overflow control
++          t = ei_abs(matH.coeff(i,n));
++          if ((eps * t) * t > 1)
++            matH.col(n).end(nn-i) /= t;
++        }
++      }
++    }
++    else if (q < 0) // Complex vector
++    {
++      std::complex<Scalar> cc;
++      int l = n-1;
++
++      // Last vector component imaginary so matrix is triangular
++      if (ei_abs(matH.coeff(n,n-1)) > ei_abs(matH.coeff(n-1,n)))
++      {
++        matH.coeffRef(n-1,n-1) = q / matH.coeff(n,n-1);
++        matH.coeffRef(n-1,n) = -(matH.coeff(n,n) - p) / matH.coeff(n,n-1);
++      }
++      else
++      {
++        cc = cdiv<Scalar>(0.0,-matH.coeff(n-1,n),matH.coeff(n-1,n-1)-p,q);
++        matH.coeffRef(n-1,n-1) = ei_real(cc);
++        matH.coeffRef(n-1,n) = ei_imag(cc);
++      }
++      matH.coeffRef(n,n-1) = 0.0;
++      matH.coeffRef(n,n) = 1.0;
++      for (int i = n-2; i >= 0; i--)
++      {
++        Scalar ra,sa,vr,vi;
++        ra = (matH.row(i).end(nn-l) * matH.col(n-1).end(nn-l)).lazy()(0,0);
++        sa = (matH.row(i).end(nn-l) * matH.col(n).end(nn-l)).lazy()(0,0);
++        w = matH.coeff(i,i) - p;
++
++        if (m_eivalues.coeff(i).imag() < 0.0)
++        {
++          z = w;
++          r = ra;
++          s = sa;
++        }
++        else
++        {
++          l = i;
++          if (m_eivalues.coeff(i).imag() == 0)
++          {
++            cc = cdiv(-ra,-sa,w,q);
++            matH.coeffRef(i,n-1) = ei_real(cc);
++            matH.coeffRef(i,n) = ei_imag(cc);
++          }
++          else
++          {
++            // Solve complex equations
++            x = matH.coeff(i,i+1);
++            y = matH.coeff(i+1,i);
++            vr = (m_eivalues.coeff(i).real() - p) * (m_eivalues.coeff(i).real() - p) + m_eivalues.coeff(i).imag() * m_eivalues.coeff(i).imag() - q * q;
++            vi = (m_eivalues.coeff(i).real() - p) * 2.0 * q;
++            if ((vr == 0.0) && (vi == 0.0))
++              vr = eps * norm * (ei_abs(w) + ei_abs(q) + ei_abs(x) + ei_abs(y) + ei_abs(z));
++
++            cc= cdiv(x*r-z*ra+q*sa,x*s-z*sa-q*ra,vr,vi);
++            matH.coeffRef(i,n-1) = ei_real(cc);
++            matH.coeffRef(i,n) = ei_imag(cc);
++            if (ei_abs(x) > (ei_abs(z) + ei_abs(q)))
++            {
++              matH.coeffRef(i+1,n-1) = (-ra - w * matH.coeff(i,n-1) + q * matH.coeff(i,n)) / x;
++              matH.coeffRef(i+1,n) = (-sa - w * matH.coeff(i,n) - q * matH.coeff(i,n-1)) / x;
++            }
++            else
++            {
++              cc = cdiv(-r-y*matH.coeff(i,n-1),-s-y*matH.coeff(i,n),z,q);
++              matH.coeffRef(i+1,n-1) = ei_real(cc);
++              matH.coeffRef(i+1,n) = ei_imag(cc);
++            }
++          }
++
++          // Overflow control
++          t = std::max(ei_abs(matH.coeff(i,n-1)),ei_abs(matH.coeff(i,n)));
++          if ((eps * t) * t > 1)
++            matH.block(i, n-1, nn-i, 2) /= t;
++
++        }
++      }
++    }
++  }
++
++  // Vectors of isolated roots
++  for (int i = 0; i < nn; i++)
++  {
++    // FIXME again what's the purpose of this test ?
++    // in this algo low==0 and high==nn-1 !!
++    if (i < low || i > high)
++    {
++      m_eivec.row(i).end(nn-i) = matH.row(i).end(nn-i);
++    }
++  }
++
++  // Back transformation to get eigenvectors of original matrix
++  int bRows = high-low+1;
++  for (int j = nn-1; j >= low; j--)
++  {
++    int bSize = std::min(j,high)-low+1;
++    m_eivec.col(j).block(low, bRows) = (m_eivec.block(low, low, bRows, bSize) * matH.col(j).block(low, bSize));
++  }
++}
++
++#endif // EIGEN_EIGENSOLVER_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/QR/HessenbergDecomposition.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/QR/HessenbergDecomposition.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/QR/HessenbergDecomposition.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/QR/HessenbergDecomposition.h	2008-08-20 18:52:54.000000000 +0200
+@@ -0,0 +1,249 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_HESSENBERGDECOMPOSITION_H
++#define EIGEN_HESSENBERGDECOMPOSITION_H
++
++/** \ingroup QR_Module
++  *
++  * \class HessenbergDecomposition
++  *
++  * \brief Reduces a squared matrix to an Hessemberg form
++  *
++  * \param MatrixType the type of the matrix of which we are computing the Hessenberg decomposition
++  *
++  * This class performs an Hessenberg decomposition of a matrix \f$ A \f$ such that:
++  * \f$ A = Q H Q^* \f$ where \f$ Q \f$ is unitary and \f$ H \f$ a Hessenberg matrix.
++  *
++  * \sa class Tridiagonalization, class Qr
++  */
++template<typename _MatrixType> class HessenbergDecomposition
++{
++  public:
++
++    typedef _MatrixType MatrixType;
++    typedef typename MatrixType::Scalar Scalar;
++    typedef typename NumTraits<Scalar>::Real RealScalar;
++
++    enum {
++      Size = MatrixType::RowsAtCompileTime,
++      SizeMinusOne = MatrixType::RowsAtCompileTime==Dynamic
++                   ? Dynamic
++                   : MatrixType::RowsAtCompileTime-1
++    };
++
++    typedef Matrix<Scalar, SizeMinusOne, 1> CoeffVectorType;
++    typedef Matrix<RealScalar, Size, 1> DiagonalType;
++    typedef Matrix<RealScalar, SizeMinusOne, 1> SubDiagonalType;
++
++    typedef typename NestByValue<DiagonalCoeffs<MatrixType> >::RealReturnType DiagonalReturnType;
++
++    typedef typename NestByValue<DiagonalCoeffs<
++        NestByValue<Block<MatrixType,SizeMinusOne,SizeMinusOne> > > >::RealReturnType SubDiagonalReturnType;
++
++    /** This constructor initializes a HessenbergDecomposition object for
++      * further use with HessenbergDecomposition::compute()
++      */
++    HessenbergDecomposition(int size = Size==Dynamic ? 2 : Size)
++      : m_matrix(size,size), m_hCoeffs(size-1)
++    {}
++
++    HessenbergDecomposition(const MatrixType& matrix)
++      : m_matrix(matrix),
++        m_hCoeffs(matrix.cols()-1)
++    {
++      _compute(m_matrix, m_hCoeffs);
++    }
++
++    /** Computes or re-compute the Hessenberg decomposition for the matrix \a matrix.
++      *
++      * This method allows to re-use the allocated data.
++      */
++    void compute(const MatrixType& matrix)
++    {
++      m_matrix = matrix;
++      m_hCoeffs.resize(matrix.rows()-1,1);
++      _compute(m_matrix, m_hCoeffs);
++    }
++
++    /** \returns the householder coefficients allowing to
++      * reconstruct the matrix Q from the packed data.
++      *
++      * \sa packedMatrix()
++      */
++    CoeffVectorType householderCoefficients(void) const { return m_hCoeffs; }
++
++    /** \returns the internal result of the decomposition.
++      *
++      * The returned matrix contains the following information:
++      *  - the upper part and lower sub-diagonal represent the Hessenberg matrix H
++      *  - the rest of the lower part contains the Householder vectors that, combined with
++      *    Householder coefficients returned by householderCoefficients(),
++      *    allows to reconstruct the matrix Q as follow:
++      *       Q = H_{N-1} ... H_1 H_0
++      *    where the matrices H are the Householder transformation:
++      *       H_i = (I - h_i * v_i * v_i')
++      *    where h_i == householderCoefficients()[i] and v_i is a Householder vector:
++      *       v_i = [ 0, ..., 0, 1, M(i+2,i), ..., M(N-1,i) ]
++      *
++      * See LAPACK for further details on this packed storage.
++      */
++    const MatrixType& packedMatrix(void) const { return m_matrix; }
++
++    MatrixType matrixQ(void) const;
++    MatrixType matrixH(void) const;
++
++  private:
++
++    static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs);
++
++  protected:
++    MatrixType m_matrix;
++    CoeffVectorType m_hCoeffs;
++};
++
++#ifndef EIGEN_HIDE_HEAVY_CODE
++
++/** \internal
++  * Performs a tridiagonal decomposition of \a matA in place.
++  *
++  * \param matA the input selfadjoint matrix
++  * \param hCoeffs returned Householder coefficients
++  *
++  * The result is written in the lower triangular part of \a matA.
++  *
++  * Implemented from Golub's "Matrix Computations", algorithm 8.3.1.
++  *
++  * \sa packedMatrix()
++  */
++template<typename MatrixType>
++void HessenbergDecomposition<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs)
++{
++  assert(matA.rows()==matA.cols());
++  int n = matA.rows();
++  for (int i = 0; i<n-2; ++i)
++  {
++    // let's consider the vector v = i-th column starting at position i+1
++
++    // start of the householder transformation
++    // squared norm of the vector v skipping the first element
++    RealScalar v1norm2 = matA.col(i).end(n-(i+2)).norm2();
++
++    if (ei_isMuchSmallerThan(v1norm2,static_cast<Scalar>(1)))
++    {
++      hCoeffs.coeffRef(i) = 0.;
++    }
++    else
++    {
++      Scalar v0 = matA.col(i).coeff(i+1);
++      RealScalar beta = ei_sqrt(ei_abs2(v0)+v1norm2);
++      if (ei_real(v0)>=0.)
++        beta = -beta;
++      matA.col(i).end(n-(i+2)) *= (Scalar(1)/(v0-beta));
++      matA.col(i).coeffRef(i+1) = beta;
++      Scalar h = (beta - v0) / beta;
++      // end of the householder transformation
++
++      // Apply similarity transformation to remaining columns,
++      // i.e., A = H' A H where H = I - h v v' and v = matA.col(i).end(n-i-1)
++      matA.col(i).coeffRef(i+1) = 1;
++
++      // first let's do A = H A
++      matA.corner(BottomRight,n-i-1,n-i-1) -= ((ei_conj(h) * matA.col(i).end(n-i-1)) *
++        (matA.col(i).end(n-i-1).adjoint() * matA.corner(BottomRight,n-i-1,n-i-1))).lazy();
++
++      // now let's do A = A H
++      matA.corner(BottomRight,n,n-i-1) -= ((matA.corner(BottomRight,n,n-i-1) * matA.col(i).end(n-i-1))
++                                        * (h * matA.col(i).end(n-i-1).adjoint())).lazy();
++
++      matA.col(i).coeffRef(i+1) = beta;
++      hCoeffs.coeffRef(i) = h;
++    }
++  }
++  if (NumTraits<Scalar>::IsComplex)
++  {
++    // Householder transformation on the remaining single scalar
++    int i = n-2;
++    Scalar v0 = matA.coeff(i+1,i);
++
++    RealScalar beta = ei_sqrt(ei_abs2(v0));
++    if (ei_real(v0)>=0.)
++      beta = -beta;
++    Scalar h = (beta - v0) / beta;
++    hCoeffs.coeffRef(i) = h;
++
++    // A = H* A
++    matA.corner(BottomRight,n-i-1,n-i) -= ei_conj(h) * matA.corner(BottomRight,n-i-1,n-i);
++
++    // A = A H
++    matA.col(n-1) -= h * matA.col(n-1);
++  }
++  else
++  {
++    hCoeffs.coeffRef(n-2) = 0;
++  }
++}
++
++/** reconstructs and returns the matrix Q */
++template<typename MatrixType>
++typename HessenbergDecomposition<MatrixType>::MatrixType
++HessenbergDecomposition<MatrixType>::matrixQ(void) const
++{
++  int n = m_matrix.rows();
++  MatrixType matQ = MatrixType::Identity(n,n);
++  for (int i = n-2; i>=0; i--)
++  {
++    Scalar tmp = m_matrix.coeff(i+1,i);
++    m_matrix.const_cast_derived().coeffRef(i+1,i) = 1;
++
++    matQ.corner(BottomRight,n-i-1,n-i-1) -=
++      ((m_hCoeffs.coeff(i) * m_matrix.col(i).end(n-i-1)) *
++      (m_matrix.col(i).end(n-i-1).adjoint() * matQ.corner(BottomRight,n-i-1,n-i-1)).lazy()).lazy();
++
++    m_matrix.const_cast_derived().coeffRef(i+1,i) = tmp;
++  }
++  return matQ;
++}
++
++#endif // EIGEN_HIDE_HEAVY_CODE
++
++/** constructs and returns the matrix H.
++  * Note that the matrix H is equivalent to the upper part of the packed matrix
++  * (including the lower sub-diagonal). Therefore, it might be often sufficient
++  * to directly use the packed matrix instead of creating a new one.
++  */
++template<typename MatrixType>
++typename HessenbergDecomposition<MatrixType>::MatrixType
++HessenbergDecomposition<MatrixType>::matrixH(void) const
++{
++  // FIXME should this function (and other similar) rather take a matrix as argument
++  // and fill it (to avoid temporaries)
++  int n = m_matrix.rows();
++  MatrixType matH = m_matrix;
++  if (n>2)
++    matH.corner(BottomLeft,n-2, n-2).template part<Lower>().setZero();
++  return matH;
++}
++
++#endif // EIGEN_HESSENBERGDECOMPOSITION_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/QR/QR.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/QR/QR.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/QR/QR.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/QR/QR.h	2008-08-20 18:52:54.000000000 +0200
+@@ -0,0 +1,178 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_QR_H
++#define EIGEN_QR_H
++
++/** \ingroup QR_Module
++  *
++  * \class QR
++  *
++  * \brief QR decomposition of a matrix
++  *
++  * \param MatrixType the type of the matrix of which we are computing the QR decomposition
++  *
++  * This class performs a QR decomposition using Householder transformations. The result is
++  * stored in a compact way compatible with LAPACK.
++  *
++  * \sa MatrixBase::qr()
++  */
++template<typename MatrixType> class QR
++{
++  public:
++
++    typedef typename MatrixType::Scalar Scalar;
++    typedef typename MatrixType::RealScalar RealScalar;
++    typedef Block<MatrixType, MatrixType::ColsAtCompileTime, MatrixType::ColsAtCompileTime> MatrixRBlockType;
++    typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, MatrixType::ColsAtCompileTime> MatrixTypeR;
++    typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, 1> VectorType;
++
++    QR(const MatrixType& matrix)
++      : m_qr(matrix.rows(), matrix.cols()),
++        m_hCoeffs(matrix.cols())
++    {
++      _compute(matrix);
++    }
++
++    /** \returns whether or not the matrix is of full rank */
++    bool isFullRank() const { return ei_isMuchSmallerThan(m_hCoeffs.cwise().abs().minCoeff(), Scalar(1)); }
++
++    /** \returns a read-only expression of the matrix R of the actual the QR decomposition */
++    const Part<NestByValue<MatrixRBlockType>, Upper>
++    matrixR(void) const
++    {
++      int cols = m_qr.cols();
++      return MatrixRBlockType(m_qr, 0, 0, cols, cols).nestByValue().template part<Upper>();
++    }
++
++    MatrixType matrixQ(void) const;
++
++  private:
++
++    void _compute(const MatrixType& matrix);
++
++  protected:
++    MatrixType m_qr;
++    VectorType m_hCoeffs;
++};
++
++#ifndef EIGEN_HIDE_HEAVY_CODE
++
++template<typename MatrixType>
++void QR<MatrixType>::_compute(const MatrixType& matrix)
++{
++  m_qr = matrix;
++  int rows = matrix.rows();
++  int cols = matrix.cols();
++
++  for (int k = 0; k < cols; k++)
++  {
++    int remainingSize = rows-k;
++
++    RealScalar beta;
++    Scalar v0 = m_qr.col(k).coeff(k);
++
++    if (remainingSize==1)
++    {
++      if (NumTraits<Scalar>::IsComplex)
++      {
++        // Householder transformation on the remaining single scalar
++        beta = ei_abs(v0);
++        if (ei_real(v0)>0)
++          beta = -beta;
++        m_qr.coeffRef(k,k) = beta;
++        m_hCoeffs.coeffRef(k) = (beta - v0) / beta;
++      }
++      else
++      {
++        m_hCoeffs.coeffRef(k) = 0;
++      }
++    }
++    else if ( (!ei_isMuchSmallerThan(beta=m_qr.col(k).end(remainingSize-1).norm2(),static_cast<Scalar>(1))) || ei_imag(v0)==0 )
++    {
++      // form k-th Householder vector
++      beta = ei_sqrt(ei_abs2(v0)+beta);
++      if (ei_real(v0)>=0.)
++        beta = -beta;
++      m_qr.col(k).end(remainingSize-1) /= v0-beta;
++      m_qr.coeffRef(k,k) = beta;
++      Scalar h = m_hCoeffs.coeffRef(k) = (beta - v0) / beta;
++
++      // apply the Householder transformation (I - h v v') to remaining columns, i.e.,
++      // R <- (I - h v v') * R   where v = [1,m_qr(k+1,k), m_qr(k+2,k), ...]
++      int remainingCols = cols - k -1;
++      if (remainingCols>0)
++      {
++        m_qr.coeffRef(k,k) = Scalar(1);
++        m_qr.corner(BottomRight, remainingSize, remainingCols) -= ei_conj(h) * m_qr.col(k).end(remainingSize)
++            * (m_qr.col(k).end(remainingSize).adjoint() * m_qr.corner(BottomRight, remainingSize, remainingCols));
++        m_qr.coeffRef(k,k) = beta;
++      }
++    }
++    else
++    {
++      m_hCoeffs.coeffRef(k) = 0;
++    }
++  }
++}
++
++/** \returns the matrix Q */
++template<typename MatrixType>
++MatrixType QR<MatrixType>::matrixQ(void) const
++{
++  // compute the product Q_0 Q_1 ... Q_n-1,
++  // where Q_k is the k-th Householder transformation I - h_k v_k v_k'
++  // and v_k is the k-th Householder vector [1,m_qr(k+1,k), m_qr(k+2,k), ...]
++  int rows = m_qr.rows();
++  int cols = m_qr.cols();
++  MatrixType res = MatrixType::Identity(rows, cols);
++  for (int k = cols-1; k >= 0; k--)
++  {
++    // to make easier the computation of the transformation, let's temporarily
++    // overwrite m_qr(k,k) such that the end of m_qr.col(k) is exactly our Householder vector.
++    Scalar beta = m_qr.coeff(k,k);
++    m_qr.const_cast_derived().coeffRef(k,k) = 1;
++    int endLength = rows-k;
++    res.corner(BottomRight,endLength, cols-k) -= ((m_hCoeffs.coeff(k) * m_qr.col(k).end(endLength))
++      * (m_qr.col(k).end(endLength).adjoint() * res.corner(BottomRight,endLength, cols-k)).lazy()).lazy();
++    m_qr.const_cast_derived().coeffRef(k,k) = beta;
++  }
++  return res;
++}
++
++#endif // EIGEN_HIDE_HEAVY_CODE
++
++/** \return the QR decomposition of \c *this.
++  *
++  * \sa class QR
++  */
++template<typename Derived>
++const QR<typename MatrixBase<Derived>::EvalType>
++MatrixBase<Derived>::qr() const
++{
++  return eval();
++}
++
++
++#endif // EIGEN_QR_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/QR/SelfAdjointEigenSolver.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/QR/SelfAdjointEigenSolver.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/QR/SelfAdjointEigenSolver.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/QR/SelfAdjointEigenSolver.h	2008-08-20 18:52:54.000000000 +0200
+@@ -0,0 +1,361 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_SELFADJOINTEIGENSOLVER_H
++#define EIGEN_SELFADJOINTEIGENSOLVER_H
++
++/** \qr_module \ingroup QR_Module
++  *
++  * \class SelfAdjointEigenSolver
++  *
++  * \brief Eigen values/vectors solver for selfadjoint matrix
++  *
++  * \param MatrixType the type of the matrix of which we are computing the eigen decomposition
++  *
++  * \note MatrixType must be an actual Matrix type, it can't be an expression type.
++  *
++  * \sa MatrixBase::eigenvalues(), class EigenSolver
++  */
++template<typename _MatrixType> class SelfAdjointEigenSolver
++{
++  public:
++
++    enum {Size = _MatrixType::RowsAtCompileTime };
++    typedef _MatrixType MatrixType;
++    typedef typename MatrixType::Scalar Scalar;
++    typedef typename NumTraits<Scalar>::Real RealScalar;
++    typedef std::complex<RealScalar> Complex;
++    typedef Matrix<RealScalar, MatrixType::ColsAtCompileTime, 1> RealVectorType;
++    typedef Matrix<RealScalar, Dynamic, 1> RealVectorTypeX;
++    typedef Tridiagonalization<MatrixType> TridiagonalizationType;
++
++    SelfAdjointEigenSolver()
++        : m_eivec(Size, Size),
++          m_eivalues(Size)
++    {
++      ei_assert(Size!=Dynamic);
++    }
++
++    SelfAdjointEigenSolver(int size)
++        : m_eivec(size, size),
++          m_eivalues(size)
++    {}
++
++    /** Constructors computing the eigenvalues of the selfadjoint matrix \a matrix,
++      * as well as the eigenvectors if \a computeEigenvectors is true.
++      *
++      * \sa compute(MatrixType,bool), SelfAdjointEigenSolver(MatrixType,MatrixType,bool)
++      */
++    SelfAdjointEigenSolver(const MatrixType& matrix, bool computeEigenvectors = true)
++      : m_eivec(matrix.rows(), matrix.cols()),
++        m_eivalues(matrix.cols())
++    {
++      compute(matrix, computeEigenvectors);
++    }
++
++    /** Constructors computing the eigenvalues of the generalized eigen problem
++      * \f$ Ax = lambda B x \f$ with \a matA the selfadjoint matrix \f$ A \f$
++      * and \a matB the positive definite matrix \f$ B \f$ . The eigenvectors
++      * are computed if \a computeEigenvectors is true.
++      *
++      * \sa compute(MatrixType,MatrixType,bool), SelfAdjointEigenSolver(MatrixType,bool)
++      */
++    SelfAdjointEigenSolver(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true)
++      : m_eivec(matA.rows(), matA.cols()),
++        m_eivalues(matA.cols())
++    {
++      compute(matA, matB, computeEigenvectors);
++    }
++
++    void compute(const MatrixType& matrix, bool computeEigenvectors = true);
++
++    void compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors = true);
++
++    /** \returns the computed eigen vectors as a matrix of column vectors */
++    MatrixType eigenvectors(void) const
++    {
++      #ifndef NDEBUG
++      ei_assert(m_eigenvectorsOk);
++      #endif
++      return m_eivec;
++    }
++
++    /** \returns the computed eigen values */
++    RealVectorType eigenvalues(void) const { return m_eivalues; }
++
++  protected:
++    MatrixType m_eivec;
++    RealVectorType m_eivalues;
++    #ifndef NDEBUG
++    bool m_eigenvectorsOk;
++    #endif
++};
++
++#ifndef EIGEN_HIDE_HEAVY_CODE
++
++// from Golub's "Matrix Computations", algorithm 5.1.3
++template<typename Scalar>
++static void ei_givens_rotation(Scalar a, Scalar b, Scalar& c, Scalar& s)
++{
++  if (b==0)
++  {
++    c = 1; s = 0;
++  }
++  else if (ei_abs(b)>ei_abs(a))
++  {
++    Scalar t = -a/b;
++    s = Scalar(1)/ei_sqrt(1+t*t);
++    c = s * t;
++  }
++  else
++  {
++    Scalar t = -b/a;
++    c = Scalar(1)/ei_sqrt(1+t*t);
++    s = c * t;
++  }
++}
++
++/** \internal
++  *
++  * \qr_module
++  *
++  * Performs a QR step on a tridiagonal symmetric matrix represented as a
++  * pair of two vectors \a diag and \a subdiag.
++  *
++  * \param matA the input selfadjoint matrix
++  * \param hCoeffs returned Householder coefficients
++  *
++  * For compilation efficiency reasons, this procedure does not use eigen expression
++  * for its arguments.
++  *
++  * Implemented from Golub's "Matrix Computations", algorithm 8.3.2:
++  * "implicit symmetric QR step with Wilkinson shift"
++  */
++template<typename RealScalar, typename Scalar>
++static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int start, int end, Scalar* matrixQ, int n);
++
++/** Computes the eigenvalues of the selfadjoint matrix \a matrix,
++  * as well as the eigenvectors if \a computeEigenvectors is true.
++  *
++  * \sa SelfAdjointEigenSolver(MatrixType,bool), compute(MatrixType,MatrixType,bool)
++  */
++template<typename MatrixType>
++void SelfAdjointEigenSolver<MatrixType>::compute(const MatrixType& matrix, bool computeEigenvectors)
++{
++  #ifndef NDEBUG
++  m_eigenvectorsOk = computeEigenvectors;
++  #endif
++  assert(matrix.cols() == matrix.rows());
++  int n = matrix.cols();
++  m_eivalues.resize(n,1);
++  m_eivec = matrix;
++
++  // FIXME, should tridiag be a local variable of this function or an attribute of SelfAdjointEigenSolver ?
++  // the latter avoids multiple memory allocation when the same SelfAdjointEigenSolver is used multiple times...
++  // (same for diag and subdiag)
++  RealVectorType& diag = m_eivalues;
++  typename TridiagonalizationType::SubDiagonalType subdiag(n-1);
++  TridiagonalizationType::decomposeInPlace(m_eivec, diag, subdiag, computeEigenvectors);
++
++  int end = n-1;
++  int start = 0;
++  while (end>0)
++  {
++    for (int i = start; i<end; ++i)
++      if (ei_isMuchSmallerThan(ei_abs(subdiag[i]),(ei_abs(diag[i])+ei_abs(diag[i+1]))))
++        subdiag[i] = 0;
++
++    // find the largest unreduced block
++    while (end>0 && subdiag[end-1]==0)
++      end--;
++    if (end<=0)
++      break;
++    start = end - 1;
++    while (start>0 && subdiag[start-1]!=0)
++      start--;
++
++    ei_tridiagonal_qr_step(diag.data(), subdiag.data(), start, end, computeEigenvectors ? m_eivec.data() : (Scalar*)0, n);
++  }
++
++  // Sort eigenvalues and corresponding vectors.
++  // TODO make the sort optional ?
++  // TODO use a better sort algorithm !!
++  for (int i = 0; i < n-1; i++)
++  {
++    int k;
++    m_eivalues.block(i,n-i).minCoeff(&k);
++    if (k > 0)
++    {
++      std::swap(m_eivalues[i], m_eivalues[k+i]);
++      m_eivec.col(i).swap(m_eivec.col(k+i));
++    }
++  }
++}
++
++/** Computes the eigenvalues of the generalized eigen problem
++  * \f$ Ax = lambda B x \f$ with \a matA the selfadjoint matrix \f$ A \f$
++  * and \a matB the positive definite matrix \f$ B \f$ . The eigenvectors
++  * are computed if \a computeEigenvectors is true.
++  *
++  * \sa SelfAdjointEigenSolver(MatrixType,MatrixType,bool), compute(MatrixType,bool)
++  */
++template<typename MatrixType>
++void SelfAdjointEigenSolver<MatrixType>::
++compute(const MatrixType& matA, const MatrixType& matB, bool computeEigenvectors)
++{
++  ei_assert(matA.cols()==matA.rows() && matB.rows()==matA.rows() && matB.cols()==matB.rows());
++
++  // Compute the cholesky decomposition of matB = U'U
++  Cholesky<MatrixType> cholB(matB);
++
++  // compute C = inv(U') A inv(U)
++  MatrixType matC = cholB.matrixL().solveTriangular(matA);
++  // FIXME since we currently do not support A * inv(U),
++  // let's do (inv(U') A')' :
++  matC = (cholB.matrixL().solveTriangular(matC.adjoint())).adjoint();
++
++  compute(matC, computeEigenvectors);
++
++  if (computeEigenvectors)
++  {
++    // transform back the eigen vectors: evecs = inv(U) * evecs
++    m_eivec = cholB.matrixL().adjoint().template marked<Upper>().solveTriangular(m_eivec);
++  }
++}
++
++#endif // EIGEN_HIDE_HEAVY_CODE
++
++/** \qr_module
++  *
++  * \returns a vector listing the eigenvalues of this matrix.
++  */
++template<typename Derived>
++inline Matrix<typename NumTraits<typename ei_traits<Derived>::Scalar>::Real, ei_traits<Derived>::ColsAtCompileTime, 1>
++MatrixBase<Derived>::eigenvalues() const
++{
++  ei_assert(Flags&SelfAdjointBit);
++  return SelfAdjointEigenSolver<typename Derived::Eval>(eval(),false).eigenvalues();
++}
++
++template<typename Derived, bool IsSelfAdjoint>
++struct ei_operatorNorm_selector
++{
++  static inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
++  operatorNorm(const MatrixBase<Derived>& m)
++  {
++    // FIXME if it is really guaranteed that the eigenvalues are already sorted,
++    // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough.
++    return m.eigenvalues().cwise().abs().maxCoeff();
++  }
++};
++
++template<typename Derived> struct ei_operatorNorm_selector<Derived, false>
++{
++  static inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
++  operatorNorm(const MatrixBase<Derived>& m)
++  {
++    typename Derived::Eval m_eval(m);
++    // FIXME if it is really guaranteed that the eigenvalues are already sorted,
++    // then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough.
++    return ei_sqrt(
++             (m_eval*m_eval.adjoint())
++             .template marked<SelfAdjoint>()
++             .eigenvalues()
++             .maxCoeff()
++           );
++  }
++};
++
++/** \qr_module
++  *
++  * \returns the matrix norm of this matrix.
++  */
++template<typename Derived>
++inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
++MatrixBase<Derived>::operatorNorm() const
++{
++  return ei_operatorNorm_selector<Derived, Flags&SelfAdjointBit>
++       ::operatorNorm(derived());
++}
++
++#ifndef EIGEN_EXTERN_INSTANTIATIONS
++template<typename RealScalar, typename Scalar>
++static void ei_tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, int start, int end, Scalar* matrixQ, int n)
++{
++  RealScalar td = (diag[end-1] - diag[end])*0.5;
++  RealScalar e2 = ei_abs2(subdiag[end-1]);
++  RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * ei_sqrt(td*td + e2));
++  RealScalar x = diag[start] - mu;
++  RealScalar z = subdiag[start];
++
++  for (int k = start; k < end; ++k)
++  {
++    RealScalar c, s;
++    ei_givens_rotation(x, z, c, s);
++
++    // do T = G' T G
++    RealScalar sdk = s * diag[k] + c * subdiag[k];
++    RealScalar dkp1 = s * subdiag[k] + c * diag[k+1];
++
++    diag[k] = c * (c * diag[k] - s * subdiag[k]) - s * (c * subdiag[k] - s * diag[k+1]);
++    diag[k+1] = s * sdk + c * dkp1;
++    subdiag[k] = c * sdk - s * dkp1;
++
++    if (k > start)
++      subdiag[k - 1] = c * subdiag[k-1] - s * z;
++
++    x = subdiag[k];
++    z = -s * subdiag[k+1];
++
++    if (k < end - 1)
++      subdiag[k + 1] = c * subdiag[k+1];
++
++    // apply the givens rotation to the unit matrix Q = Q * G
++    // G only modifies the two columns k and k+1
++    if (matrixQ)
++    {
++      #ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
++      #else
++      int kn = k*n;
++      int kn1 = (k+1)*n;
++      #endif
++      // let's do the product manually to avoid the need of temporaries...
++      for (int i=0; i<n; ++i)
++      {
++        #ifdef EIGEN_DEFAULT_TO_ROW_MAJOR
++        Scalar matrixQ_i_k = matrixQ[i*n+k];
++        matrixQ[i*n+k]   = c * matrixQ_i_k - s * matrixQ[i*n+k+1];
++        matrixQ[i*n+k+1] = s * matrixQ_i_k + c * matrixQ[i*n+k+1];
++        #else
++        Scalar matrixQ_i_k = matrixQ[i+kn];
++        matrixQ[i+kn]  = c * matrixQ_i_k - s * matrixQ[i+kn1];
++        matrixQ[i+kn1] = s * matrixQ_i_k + c * matrixQ[i+kn1];
++        #endif
++      }
++    }
++  }
++}
++#endif
++
++#endif // EIGEN_SELFADJOINTEIGENSOLVER_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/QR/Tridiagonalization.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/QR/Tridiagonalization.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/QR/Tridiagonalization.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/QR/Tridiagonalization.h	2008-08-20 18:52:54.000000000 +0200
+@@ -0,0 +1,428 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_TRIDIAGONALIZATION_H
++#define EIGEN_TRIDIAGONALIZATION_H
++
++/** \ingroup QR_Module
++  *
++  * \class Tridiagonalization
++  *
++  * \brief Trigiagonal decomposition of a selfadjoint matrix
++  *
++  * \param MatrixType the type of the matrix of which we are performing the tridiagonalization
++  *
++  * This class performs a tridiagonal decomposition of a selfadjoint matrix \f$ A \f$ such that:
++  * \f$ A = Q T Q^* \f$ where \f$ Q \f$ is unitary and \f$ T \f$ a real symmetric tridiagonal matrix.
++  *
++  * \sa MatrixBase::tridiagonalize()
++  */
++template<typename _MatrixType> class Tridiagonalization
++{
++  public:
++
++    typedef _MatrixType MatrixType;
++    typedef typename MatrixType::Scalar Scalar;
++    typedef typename NumTraits<Scalar>::Real RealScalar;
++    typedef typename ei_packet_traits<Scalar>::type Packet;
++
++    enum {
++      Size = MatrixType::RowsAtCompileTime,
++      SizeMinusOne = MatrixType::RowsAtCompileTime==Dynamic
++                   ? Dynamic
++                   : MatrixType::RowsAtCompileTime-1,
++      PacketSize = ei_packet_traits<Scalar>::size
++    };
++
++    typedef Matrix<Scalar, SizeMinusOne, 1> CoeffVectorType;
++    typedef Matrix<RealScalar, Size, 1> DiagonalType;
++    typedef Matrix<RealScalar, SizeMinusOne, 1> SubDiagonalType;
++
++    typedef typename NestByValue<DiagonalCoeffs<MatrixType> >::RealReturnType DiagonalReturnType;
++
++    typedef typename NestByValue<DiagonalCoeffs<
++        NestByValue<Block<MatrixType,SizeMinusOne,SizeMinusOne> > > >::RealReturnType SubDiagonalReturnType;
++
++    /** This constructor initializes a Tridiagonalization object for
++      * further use with Tridiagonalization::compute()
++      */
++    Tridiagonalization(int size = Size==Dynamic ? 2 : Size)
++      : m_matrix(size,size), m_hCoeffs(size-1)
++    {}
++
++    Tridiagonalization(const MatrixType& matrix)
++      : m_matrix(matrix),
++        m_hCoeffs(matrix.cols()-1)
++    {
++      _compute(m_matrix, m_hCoeffs);
++    }
++
++    /** Computes or re-compute the tridiagonalization for the matrix \a matrix.
++      *
++      * This method allows to re-use the allocated data.
++      */
++    void compute(const MatrixType& matrix)
++    {
++      m_matrix = matrix;
++      m_hCoeffs.resize(matrix.rows()-1, 1);
++      _compute(m_matrix, m_hCoeffs);
++    }
++
++    /** \returns the householder coefficients allowing to
++      * reconstruct the matrix Q from the packed data.
++      *
++      * \sa packedMatrix()
++      */
++    inline CoeffVectorType householderCoefficients(void) const { return m_hCoeffs; }
++
++    /** \returns the internal result of the decomposition.
++      *
++      * The returned matrix contains the following information:
++      *  - the strict upper part is equal to the input matrix A
++      *  - the diagonal and lower sub-diagonal represent the tridiagonal symmetric matrix (real).
++      *  - the rest of the lower part contains the Householder vectors that, combined with
++      *    Householder coefficients returned by householderCoefficients(),
++      *    allows to reconstruct the matrix Q as follow:
++      *       Q = H_{N-1} ... H_1 H_0
++      *    where the matrices H are the Householder transformations:
++      *       H_i = (I - h_i * v_i * v_i')
++      *    where h_i == householderCoefficients()[i] and v_i is a Householder vector:
++      *       v_i = [ 0, ..., 0, 1, M(i+2,i), ..., M(N-1,i) ]
++      *
++      * See LAPACK for further details on this packed storage.
++      */
++    inline const MatrixType& packedMatrix(void) const { return m_matrix; }
++
++    MatrixType matrixQ(void) const;
++    MatrixType matrixT(void) const;
++    const DiagonalReturnType diagonal(void) const;
++    const SubDiagonalReturnType subDiagonal(void) const;
++
++    static void decomposeInPlace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ = true);
++
++  private:
++
++    static void _compute(MatrixType& matA, CoeffVectorType& hCoeffs);
++
++    static void _decomposeInPlace3x3(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ = true);
++
++  protected:
++    MatrixType m_matrix;
++    CoeffVectorType m_hCoeffs;
++};
++
++/** \returns an expression of the diagonal vector */
++template<typename MatrixType>
++const typename Tridiagonalization<MatrixType>::DiagonalReturnType
++Tridiagonalization<MatrixType>::diagonal(void) const
++{
++  return m_matrix.diagonal().nestByValue().real();
++}
++
++/** \returns an expression of the sub-diagonal vector */
++template<typename MatrixType>
++const typename Tridiagonalization<MatrixType>::SubDiagonalReturnType
++Tridiagonalization<MatrixType>::subDiagonal(void) const
++{
++  int n = m_matrix.rows();
++  return Block<MatrixType,SizeMinusOne,SizeMinusOne>(m_matrix, 1, 0, n-1,n-1)
++    .nestByValue().diagonal().nestByValue().real();
++}
++
++/** constructs and returns the tridiagonal matrix T.
++  * Note that the matrix T is equivalent to the diagonal and sub-diagonal of the packed matrix.
++  * Therefore, it might be often sufficient to directly use the packed matrix, or the vector
++  * expressions returned by diagonal() and subDiagonal() instead of creating a new matrix.
++  */
++template<typename MatrixType>
++typename Tridiagonalization<MatrixType>::MatrixType
++Tridiagonalization<MatrixType>::matrixT(void) const
++{
++  // FIXME should this function (and other similar ones) rather take a matrix as argument
++  // and fill it ? (to avoid temporaries)
++  int n = m_matrix.rows();
++  MatrixType matT = m_matrix;
++  matT.corner(TopRight,n-1, n-1).diagonal() = subDiagonal().conjugate();
++  if (n>2)
++  {
++    matT.corner(TopRight,n-2, n-2).template part<Upper>().setZero();
++    matT.corner(BottomLeft,n-2, n-2).template part<Lower>().setZero();
++  }
++  return matT;
++}
++
++#ifndef EIGEN_HIDE_HEAVY_CODE
++
++/** \internal
++  * Performs a tridiagonal decomposition of \a matA in place.
++  *
++  * \param matA the input selfadjoint matrix
++  * \param hCoeffs returned Householder coefficients
++  *
++  * The result is written in the lower triangular part of \a matA.
++  *
++  * Implemented from Golub's "Matrix Computations", algorithm 8.3.1.
++  *
++  * \sa packedMatrix()
++  */
++template<typename MatrixType>
++void Tridiagonalization<MatrixType>::_compute(MatrixType& matA, CoeffVectorType& hCoeffs)
++{
++  assert(matA.rows()==matA.cols());
++  int n = matA.rows();
++//   std::cerr << matA << "\n\n";
++  for (int i = 0; i<n-2; ++i)
++  {
++    // let's consider the vector v = i-th column starting at position i+1
++
++    // start of the householder transformation
++    // squared norm of the vector v skipping the first element
++    RealScalar v1norm2 = matA.col(i).end(n-(i+2)).norm2();
++
++    if (ei_isMuchSmallerThan(v1norm2,static_cast<Scalar>(1)))
++    {
++      hCoeffs.coeffRef(i) = 0.;
++    }
++    else
++    {
++      Scalar v0 = matA.col(i).coeff(i+1);
++      RealScalar beta = ei_sqrt(ei_abs2(v0)+v1norm2);
++      if (ei_real(v0)>=0.)
++        beta = -beta;
++      matA.col(i).end(n-(i+2)) *= (Scalar(1)/(v0-beta));
++      matA.col(i).coeffRef(i+1) = beta;
++      Scalar h = (beta - v0) / beta;
++      // end of the householder transformation
++
++      // Apply similarity transformation to remaining columns,
++      // i.e., A = H' A H where H = I - h v v' and v = matA.col(i).end(n-i-1)
++
++      matA.col(i).coeffRef(i+1) = 1;
++      
++      /* This is the initial algorithm which minimize operation counts and maximize
++       * the use of Eigen's expression. Unfortunately, the first matrix-vector product
++       * using Part<Lower|Selfadjoint>  is very very slow */
++      #ifdef EIGEN_NEVER_DEFINED
++      // matrix - vector product
++      hCoeffs.end(n-i-1) = (matA.corner(BottomRight,n-i-1,n-i-1).template part<Lower|SelfAdjoint>()
++                                * (h * matA.col(i).end(n-i-1))).lazy();
++      // simple axpy
++      hCoeffs.end(n-i-1) += (h * Scalar(-0.5) * matA.col(i).end(n-i-1).dot(hCoeffs.end(n-i-1)))
++                            * matA.col(i).end(n-i-1);
++      // rank-2 update
++      //Block<MatrixType,Dynamic,1> B(matA,i+1,i,n-i-1,1);
++      matA.corner(BottomRight,n-i-1,n-i-1).template part<Lower>() -=
++            (matA.col(i).end(n-i-1) * hCoeffs.end(n-i-1).adjoint()).lazy()
++          + (hCoeffs.end(n-i-1) * matA.col(i).end(n-i-1).adjoint()).lazy();
++      #endif
++      /* end initial algorithm */
++
++      /* If we still want to minimize operation count (i.e., perform operation on the lower part only)
++       * then we could provide the following algorithm for selfadjoint - vector product. However, a full
++       * matrix-vector product is still faster (at least for dynamic size, and not too small, did not check
++       * small matrices). The algo performs block matrix-vector and transposed matrix vector products. */
++      #ifdef EIGEN_NEVER_DEFINED
++      int n4 = (std::max(0,n-4)/4)*4;
++      hCoeffs.end(n-i-1).setZero();
++      for (int b=i+1; b<n4; b+=4)
++      {
++        // the ?x4 part:
++        hCoeffs.end(b-4) +=
++            Block<MatrixType,Dynamic,4>(matA,b+4,b,n-b-4,4) * matA.template block<4,1>(b,i);
++        // the respective transposed part:
++        Block<CoeffVectorType,4,1>(hCoeffs, b, 0, 4,1) +=
++            Block<MatrixType,Dynamic,4>(matA,b+4,b,n-b-4,4).adjoint() * Block<MatrixType,Dynamic,1>(matA,b+4,i,n-b-4,1);
++        // the 4x4 block diagonal:
++        Block<CoeffVectorType,4,1>(hCoeffs, b, 0, 4,1) +=
++            (Block<MatrixType,4,4>(matA,b,b,4,4).template part<Lower|SelfAdjoint>()
++             * (h * Block<MatrixType,4,1>(matA,b,i,4,1))).lazy();
++      }
++      #endif
++      // todo: handle the remaining part
++      /* end optimized selfadjoint - vector product */
++
++      /* Another interesting note: the above rank-2 update is much slower than the following hand written loop.
++       * After an analyse of the ASM, it seems GCC (4.2) generate poor code because of the Block. Moreover,
++       * if we remove the specialization of Block for Matrix then it is even worse, much worse ! */
++      #ifdef EIGEN_NEVER_DEFINED
++      for (int j1=i+1; j1<n; ++j1)
++      for (int i1=j1;  i1<n; i1++)
++        matA.coeffRef(i1,j1) -= matA.coeff(i1,i)*ei_conj(hCoeffs.coeff(j1-1))
++                              + hCoeffs.coeff(i1-1)*ei_conj(matA.coeff(j1,i));
++      #endif
++      /* end hand writen partial rank-2 update */
++
++      /* The current fastest implementation: the full matrix is used, no "optimization" to use/compute
++       * only half of the matrix. Custom vectorization of the inner col -= alpha X + beta Y such that access
++       * to col are always aligned. Once we support that in Assign, then the algorithm could be rewriten as
++       * a single compact expression. This code is therefore a good benchmark when will do that. */
++
++      // let's use the end of hCoeffs to store temporary values:
++      hCoeffs.end(n-i-1) = (matA.corner(BottomRight,n-i-1,n-i-1) * (h * matA.col(i).end(n-i-1))).lazy();
++      // FIXME in the above expr a temporary is created because of the scalar multiple by h
++
++      hCoeffs.end(n-i-1) += (h * Scalar(-0.5) * matA.col(i).end(n-i-1).dot(hCoeffs.end(n-i-1)))
++                            * matA.col(i).end(n-i-1);
++      
++      const Scalar* EIGEN_RESTRICT pb = &matA.coeffRef(0,i);
++      const Scalar* EIGEN_RESTRICT pa = (&hCoeffs.coeffRef(0)) - 1;
++      for (int j1=i+1; j1<n; ++j1)
++      {
++        int starti = i+1;
++        int alignedEnd = starti;
++        if (PacketSize>1)
++        {
++          int alignedStart = (starti) + ei_alignmentOffset(&matA.coeffRef(starti,j1), n-starti);
++          alignedEnd = alignedStart + ((n-alignedStart)/PacketSize)*PacketSize;
++          
++          for (int i1=starti; i1<alignedStart; ++i1)
++            matA.coeffRef(i1,j1) -= matA.coeff(i1,i)*ei_conj(hCoeffs.coeff(j1-1))
++                                  + hCoeffs.coeff(i1-1)*ei_conj(matA.coeff(j1,i));
++          
++          Packet tmp0 = ei_pset1(hCoeffs.coeff(j1-1));
++          Packet tmp1 = ei_pset1(matA.coeff(j1,i));
++          Scalar* pc = &matA.coeffRef(0,j1);
++          for (int i1=alignedStart ; i1<alignedEnd; i1+=PacketSize)
++            ei_pstore(pc+i1,ei_psub(ei_pload(pc+i1),
++              ei_padd(ei_pmul(tmp0, ei_ploadu(pb+i1)),
++                      ei_pmul(tmp1, ei_ploadu(pa+i1)))));
++        }
++        for (int i1=alignedEnd; i1<n; ++i1)
++          matA.coeffRef(i1,j1) -= matA.coeff(i1,i)*ei_conj(hCoeffs.coeff(j1-1))
++                                + hCoeffs.coeff(i1-1)*ei_conj(matA.coeff(j1,i));
++      }
++      /* end optimized implemenation */
++
++      // note: at that point matA(i+1,i+1) is the (i+1)-th element of the final diagonal
++      // note: the sequence of the beta values leads to the subdiagonal entries
++      matA.col(i).coeffRef(i+1) = beta;
++
++      hCoeffs.coeffRef(i) = h;
++    }
++  }
++  if (NumTraits<Scalar>::IsComplex)
++  {
++    // Householder transformation on the remaining single scalar
++    int i = n-2;
++    Scalar v0 = matA.col(i).coeff(i+1);
++    RealScalar beta = ei_abs(v0);
++    if (ei_real(v0)>=0.)
++      beta = -beta;
++    matA.col(i).coeffRef(i+1) = beta;
++    hCoeffs.coeffRef(i) = (beta - v0) / beta;
++  }
++  else
++  {
++    hCoeffs.coeffRef(n-2) = 0;
++  }
++}
++
++/** reconstructs and returns the matrix Q */
++template<typename MatrixType>
++typename Tridiagonalization<MatrixType>::MatrixType
++Tridiagonalization<MatrixType>::matrixQ(void) const
++{
++  int n = m_matrix.rows();
++  MatrixType matQ = MatrixType::Identity(n,n);
++  for (int i = n-2; i>=0; i--)
++  {
++    Scalar tmp = m_matrix.coeff(i+1,i);
++    m_matrix.const_cast_derived().coeffRef(i+1,i) = 1;
++
++    matQ.corner(BottomRight,n-i-1,n-i-1) -=
++      ((m_hCoeffs.coeff(i) * m_matrix.col(i).end(n-i-1)) *
++      (m_matrix.col(i).end(n-i-1).adjoint() * matQ.corner(BottomRight,n-i-1,n-i-1)).lazy()).lazy();
++
++    m_matrix.const_cast_derived().coeffRef(i+1,i) = tmp;
++  }
++  return matQ;
++}
++
++/** Performs a full decomposition in place */
++template<typename MatrixType>
++void Tridiagonalization<MatrixType>::decomposeInPlace(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
++{
++  int n = mat.rows();
++  ei_assert(mat.cols()==n && diag.size()==n && subdiag.size()==n-1);
++  if (n==3 && (!NumTraits<Scalar>::IsComplex) )
++  {
++    _decomposeInPlace3x3(mat, diag, subdiag, extractQ);
++  }
++  else
++  {
++    Tridiagonalization tridiag(mat);
++    diag = tridiag.diagonal();
++    subdiag = tridiag.subDiagonal();
++    if (extractQ)
++      mat = tridiag.matrixQ();
++  }
++}
++
++/** \internal
++  * Optimized path for 3x3 matrices.
++  * Especially useful for plane fitting.
++  */
++template<typename MatrixType>
++void Tridiagonalization<MatrixType>::_decomposeInPlace3x3(MatrixType& mat, DiagonalType& diag, SubDiagonalType& subdiag, bool extractQ)
++{
++  diag[0] = ei_real(mat(0,0));
++  RealScalar v1norm2 = ei_abs2(mat(0,2));
++  if (ei_isMuchSmallerThan(v1norm2, RealScalar(1)))
++  {
++    diag[1] = ei_real(mat(1,1));
++    diag[2] = ei_real(mat(2,2));
++    subdiag[0] = ei_real(mat(0,1));
++    subdiag[1] = ei_real(mat(1,2));
++    if (extractQ)
++      mat.setIdentity();
++  }
++  else
++  {
++    RealScalar beta = ei_sqrt(ei_abs2(mat(0,1))+v1norm2);
++    RealScalar invBeta = RealScalar(1)/beta;
++    Scalar m01 = mat(0,1) * invBeta;
++    Scalar m02 = mat(0,2) * invBeta;
++    Scalar q = RealScalar(2)*m01*mat(1,2) + m02*(mat(2,2) - mat(1,1));
++    diag[1] = ei_real(mat(1,1) + m02*q);
++    diag[2] = ei_real(mat(2,2) - m02*q);
++    subdiag[0] = beta;
++    subdiag[1] = ei_real(mat(1,2) - m01 * q);
++    if (extractQ)
++    {
++      mat(0,0) = 1;
++      mat(0,1) = 0;
++      mat(0,2) = 0;
++      mat(1,0) = 0;
++      mat(1,1) = m01;
++      mat(1,2) = m02;
++      mat(2,0) = 0;
++      mat(2,1) = m02;
++      mat(2,2) = -m01;
++    }
++  }
++}
++
++#endif // EIGEN_HIDE_HEAVY_CODE
++
++#endif // EIGEN_TRIDIAGONALIZATION_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Regression/Regression.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Regression/Regression.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Regression/Regression.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Regression/Regression.h	2008-08-20 18:52:54.000000000 +0200
+@@ -0,0 +1,199 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2006-2008 Benoit Jacob <jacob at math.jussieu.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_REGRESSION_H
++#define EIGEN_REGRESSION_H
++
++/** \ingroup Regression_Module
++  *
++  * \regression_module
++  *
++  * For a set of points, this function tries to express
++  * one of the coords as a linear (affine) function of the other coords.
++  *
++  * This is best explained by an example. This function works in full
++  * generality, for points in a space of arbitrary dimension, and also over
++  * the complex numbers, but for this example we will work in dimension 3
++  * over the real numbers (doubles).
++  *
++  * So let us work with the following set of 5 points given by their
++  * \f$(x,y,z)\f$ coordinates:
++  * @code
++    Vector3d points[5];
++    points[0] = Vector3d( 3.02, 6.89, -4.32 );
++    points[1] = Vector3d( 2.01, 5.39, -3.79 );
++    points[2] = Vector3d( 2.41, 6.01, -4.01 );
++    points[3] = Vector3d( 2.09, 5.55, -3.86 );
++    points[4] = Vector3d( 2.58, 6.32, -4.10 );
++  * @endcode
++  * Suppose that we want to express the second coordinate (\f$y\f$) as a linear
++  * expression in \f$x\f$ and \f$z\f$, that is,
++  * \f[ y=ax+bz+c \f]
++  * for some constants \f$a,b,c\f$. Thus, we want to find the best possible
++  * constants \f$a,b,c\f$ so that the plane of equation \f$y=ax+bz+c\f$ fits
++  * best the five above points. To do that, call this function as follows:
++  * @code
++    Vector3d coeffs; // will store the coefficients a, b, c
++    linearRegression(
++      5,
++      points,
++      &coeffs,
++      1 // the coord to express as a function of
++        // the other ones. 0 means x, 1 means y, 2 means z.
++    );
++  * @endcode
++  * Now the vector \a coeffs is approximately
++  * \f$( 0.495 ,  -1.927 ,  -2.906 )\f$.
++  * Thus, we get \f$a=0.495, b = -1.927, c = -2.906\f$. Let us check for
++  * instance how near points[0] is from the plane of equation \f$y=ax+bz+c\f$.
++  * Looking at the coords of points[0], we see that:
++  * \f[ax+bz+c = 0.495 * 3.02 + (-1.927) * (-4.32) + (-2.906) = 6.91.\f]
++  * On the other hand, we have \f$y=6.89\f$. We see that the values
++  * \f$6.91\f$ and \f$6.89\f$
++  * are near, so points[0] is very near the plane of equation \f$y=ax+bz+c\f$.
++  *
++  * Let's now describe precisely the parameters:
++  * @param numPoints the number of points
++  * @param points the array of pointers to the points on which to perform the linear regression
++  * @param retCoefficients pointer to the vector in which to store the result.
++                           This vector must be of the same type and size as the
++                           data points. The meaning of its coords is as follows.
++                           For brevity, let \f$n=Size\f$,
++                           \f$r_i=retCoefficients[i]\f$,
++                           and \f$f=funcOfOthers\f$. Denote by
++                           \f$x_0,\ldots,x_{n-1}\f$
++                           the n coordinates in the n-dimensional space.
++                           Then the result equation is:
++                           \f[ x_f = r_0 x_0 + \cdots + r_{f-1}x_{f-1}
++                            + r_{f+1}x_{f+1} + \cdots + r_{n-1}x_{n-1} + r_n. \f]
++  * @param funcOfOthers Determines which coord to express as a function of the
++                        others. Coords are numbered starting from 0, so that a
++                        value of 0 means \f$x\f$, 1 means \f$y\f$,
++                        2 means \f$z\f$, ...
++  *
++  * \sa fitHyperplane()
++  */
++template<typename VectorType>
++void linearRegression(int numPoints,
++                      VectorType **points,
++                      VectorType *result,
++                      int funcOfOthers )
++{
++  typedef typename VectorType::Scalar Scalar;
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType)
++  ei_assert(numPoints >= 1);
++  int size = points[0]->size();
++  ei_assert(funcOfOthers >= 0 && funcOfOthers < size);
++  result->resize(size);
++
++  Matrix<Scalar, Dynamic, VectorType::SizeAtCompileTime,
++         Dynamic, VectorType::MaxSizeAtCompileTime, RowMajorBit>
++    m(numPoints, size);
++  if(funcOfOthers>0)
++    for(int i = 0; i < numPoints; i++)
++      m.row(i).start(funcOfOthers) = points[i]->start(funcOfOthers);
++  if(funcOfOthers<size-1)
++    for(int i = 0; i < numPoints; i++)
++      m.row(i).block(funcOfOthers, size-funcOfOthers-1)
++        = points[i]->end(size-funcOfOthers-1);
++  for(int i = 0; i < numPoints; i++)
++    m.row(i).coeffRef(size-1) = Scalar(1);
++
++  VectorType v(size);
++  v.setZero();
++  for(int i = 0; i < numPoints; i++)
++    v += m.row(i).adjoint() * points[i]->coeff(funcOfOthers);
++
++  ei_assert((m.adjoint()*m).lu().solve(v, result));
++}
++
++/** \ingroup Regression_Module
++  *
++  * \regression_module
++  *
++  * This function is quite similar to linearRegression(), so we refer to the
++  * documentation of this function and only list here the differences.
++  *
++  * The main difference from linearRegression() is that this function doesn't
++  * take a \a funcOfOthers argument. Instead, it finds a general equation
++  * of the form
++  * \f[ r_0 x_0 + \cdots + r_{n-1}x_{n-1} + r_n = 0, \f]
++  * where \f$n=Size\f$, \f$r_i=retCoefficients[i]\f$, and we denote by
++  * \f$x_0,\ldots,x_{n-1}\f$ the n coordinates in the n-dimensional space.
++  *
++  * Thus, the vector \a retCoefficients has size \f$n+1\f$, which is another
++  * difference from linearRegression().
++  *
++  * This functions proceeds by first determining which coord has the smallest variance,
++  * and then calls linearRegression() to express that coord as a function of the other ones.
++  *
++  * \sa linearRegression()
++  */
++template<typename VectorType, typename BigVectorType>
++void fitHyperplane(int numPoints,
++                   VectorType **points,
++                   BigVectorType *result)
++{
++  typedef typename VectorType::Scalar Scalar;
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(VectorType)
++  EIGEN_STATIC_ASSERT_VECTOR_ONLY(BigVectorType)
++  ei_assert(numPoints >= 1);
++  int size = points[0]->size();
++  ei_assert(size+1 == result->size());
++
++  // now let's find out which coord varies the least. This is
++  // approximative. All that matters is that we don't pick a coordinate
++  // that varies orders of magnitude more than another one.
++  VectorType mean(size);
++  Matrix<typename NumTraits<Scalar>::Real,
++         VectorType::RowsAtCompileTime, VectorType::ColsAtCompileTime,
++         VectorType::MaxRowsAtCompileTime, VectorType::MaxColsAtCompileTime
++        > variance(size);
++  mean.setZero();
++  variance.setZero();
++  for(int i = 0; i < numPoints; i++)
++    mean += *(points[i]);
++  mean /= numPoints;
++  for(int j = 0; j < size; j++)
++  {
++    for(int i = 0; i < numPoints; i++)
++      variance.coeffRef(j) += ei_abs2(points[i]->coeff(j) - mean.coeff(j));
++  }
++
++  int coord_min_variance;
++  variance.minCoeff(&coord_min_variance);
++
++  // let's now perform a linear regression with respect to that
++  // not-too-much-varying coord
++  VectorType affine(size);
++  linearRegression(numPoints, points, &affine, coord_min_variance);
++
++  if(coord_min_variance>0)
++    result->start(coord_min_variance) = affine.start(coord_min_variance);
++  result->coeffRef(coord_min_variance) = static_cast<Scalar>(-1);
++  result->end(size-coord_min_variance) = affine.end(size-coord_min_variance);
++}
++
++
++#endif // EIGEN_REGRESSION_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/CoreIterators.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/CoreIterators.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/CoreIterators.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/CoreIterators.h	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,169 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_COREITERATORS_H
++#define EIGEN_COREITERATORS_H
++
++/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core
++ */
++
++template<typename Derived>
++class MatrixBase<Derived>::InnerIterator
++{
++    typedef typename Derived::Scalar Scalar;
++  public:
++    InnerIterator(const Derived& mat, int outer)
++      : m_matrix(mat), m_inner(0), m_outer(outer), m_end(mat.rows())
++    {}
++
++    Scalar value()
++    {
++      return (Derived::Flags&RowMajorBit) ? m_matrix.coeff(m_outer, m_inner)
++                                          : m_matrix.coeff(m_inner, m_outer);
++    }
++
++    InnerIterator& operator++() { m_inner++; return *this; }
++
++    int index() const { return m_inner; }
++
++    operator bool() const { return m_inner < m_end && m_inner>=0; }
++
++  protected:
++    const Derived& m_matrix;
++    int m_inner;
++    const int m_outer;
++    const int m_end;
++};
++
++template<typename MatrixType>
++class Transpose<MatrixType>::InnerIterator : public MatrixType::InnerIterator
++{
++  public:
++
++    InnerIterator(const Transpose& trans, int outer)
++      : MatrixType::InnerIterator(trans.m_matrix, outer)
++    {}
++};
++
++template<typename UnaryOp, typename MatrixType>
++class CwiseUnaryOp<UnaryOp,MatrixType>::InnerIterator
++{
++    typedef typename CwiseUnaryOp::Scalar Scalar;
++    typedef typename ei_traits<CwiseUnaryOp>::_MatrixTypeNested _MatrixTypeNested;
++    typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
++  public:
++
++    InnerIterator(const CwiseUnaryOp& unaryOp, int outer)
++      : m_iter(unaryOp.m_matrix,outer), m_functor(unaryOp.m_functor), m_id(-1)
++    {
++      this->operator++();
++    }
++
++    InnerIterator& operator++()
++    {
++      if (m_iter)
++      {
++        m_id = m_iter.index();
++        m_value = m_functor(m_iter.value());
++        ++m_iter;
++      }
++      else
++      {
++        m_id = -1;
++      }
++      return *this;
++    }
++
++    Scalar value() const { return m_value; }
++
++    int index() const { return m_id; }
++
++    operator bool() const { return m_id>=0; }
++
++  protected:
++    MatrixTypeIterator m_iter;
++    const UnaryOp& m_functor;
++    Scalar m_value;
++    int m_id;
++};
++
++template<typename BinaryOp, typename Lhs, typename Rhs>
++class CwiseBinaryOp<BinaryOp,Lhs,Rhs>::InnerIterator
++{
++    typedef typename CwiseBinaryOp::Scalar Scalar;
++    typedef typename ei_traits<CwiseBinaryOp>::_LhsNested _LhsNested;
++    typedef typename _LhsNested::InnerIterator LhsIterator;
++    typedef typename ei_traits<CwiseBinaryOp>::_RhsNested _RhsNested;
++    typedef typename _RhsNested::InnerIterator RhsIterator;
++  public:
++
++    InnerIterator(const CwiseBinaryOp& binOp, int outer)
++      : m_lhsIter(binOp.m_lhs,outer), m_rhsIter(binOp.m_rhs,outer), m_functor(binOp.m_functor), m_id(-1)
++    {
++      this->operator++();
++    }
++
++    InnerIterator& operator++()
++    {
++      if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))
++      {
++        m_id = m_lhsIter.index();
++        m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());
++        ++m_lhsIter;
++        ++m_rhsIter;
++      }
++      else if (m_lhsIter && ((!m_rhsIter) || m_lhsIter.index() < m_rhsIter.index()))
++      {
++        m_id = m_lhsIter.index();
++        m_value = m_functor(m_lhsIter.value(), Scalar(0));
++        ++m_lhsIter;
++      }
++      else if (m_rhsIter && ((!m_lhsIter) || m_lhsIter.index() > m_rhsIter.index()))
++      {
++        m_id = m_rhsIter.index();
++        m_value = m_functor(Scalar(0), m_rhsIter.value());
++        ++m_rhsIter;
++      }
++      else
++      {
++        m_id = -1;
++      }
++      return *this;
++    }
++
++    Scalar value() const { return m_value; }
++
++    int index() const { return m_id; }
++
++    operator bool() const { return m_id>=0; }
++
++  protected:
++    LhsIterator m_lhsIter;
++    RhsIterator m_rhsIter;
++    const BinaryOp& m_functor;
++    Scalar m_value;
++    int m_id;
++};
++
++#endif // EIGEN_COREITERATORS_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/HashMatrix.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/HashMatrix.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/HashMatrix.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/HashMatrix.h	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,165 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_HASHMATRIX_H
++#define EIGEN_HASHMATRIX_H
++
++template<typename _Scalar, int _Flags>
++struct ei_traits<HashMatrix<_Scalar, _Flags> >
++{
++  typedef _Scalar Scalar;
++  enum {
++    RowsAtCompileTime = Dynamic,
++    ColsAtCompileTime = Dynamic,
++    MaxRowsAtCompileTime = Dynamic,
++    MaxColsAtCompileTime = Dynamic,
++    Flags = SparseBit | _Flags,
++    CoeffReadCost = NumTraits<Scalar>::ReadCost,
++    SupportedAccessPatterns = RandomAccessPattern
++  };
++};
++
++// TODO reimplement this class using custom linked lists
++template<typename _Scalar, int _Flags>
++class HashMatrix : public SparseMatrixBase<HashMatrix<_Scalar, _Flags> >
++{
++  public:
++    EIGEN_GENERIC_PUBLIC_INTERFACE(HashMatrix)
++    class InnerIterator;
++  protected:
++
++    typedef typename std::map<int, Scalar>::iterator MapIterator;
++    typedef typename std::map<int, Scalar>::const_iterator ConstMapIterator;
++
++  public:
++    inline int rows() const { return m_innerSize; }
++    inline int cols() const { return m_data.size(); }
++
++    inline const Scalar& coeff(int row, int col) const
++    {
++      const MapIterator it = m_data[col].find(row);
++      if (it!=m_data[col].end())
++        return Scalar(0);
++      return it->second;
++    }
++
++    inline Scalar& coeffRef(int row, int col)
++    {
++      return m_data[col][row];
++    }
++
++  public:
++
++    inline void startFill(int reserveSize = 1000) {}
++
++    inline Scalar& fill(int row, int col) { return coeffRef(row, col); }
++
++    inline void endFill() {}
++
++    ~HashMatrix()
++    {}
++
++    inline void shallowCopy(const HashMatrix& other)
++    {
++      EIGEN_DBG_SPARSE(std::cout << "HashMatrix:: shallowCopy\n");
++      // FIXME implement a true shallow copy !!
++      resize(other.rows(), other.cols());
++      for (int j=0; j<this->outerSize(); ++j)
++        m_data[j] = other.m_data[j];
++    }
++
++    void resize(int _rows, int _cols)
++    {
++      if (cols() != _cols)
++      {
++        m_data.resize(_cols);
++      }
++      m_innerSize = _rows;
++    }
++
++    inline HashMatrix(int rows, int cols)
++      : m_innerSize(0)
++    {
++      resize(rows, cols);
++    }
++
++    template<typename OtherDerived>
++    inline HashMatrix(const MatrixBase<OtherDerived>& other)
++      : m_innerSize(0)
++    {
++      *this = other.derived();
++    }
++
++    inline HashMatrix& operator=(const HashMatrix& other)
++    {
++      if (other.isRValue())
++      {
++        shallowCopy(other);
++      }
++      else
++      {
++        resize(other.rows(), other.cols());
++        for (int col=0; col<cols(); ++col)
++          m_data[col] = other.m_data[col];
++      }
++      return *this;
++    }
++
++    template<typename OtherDerived>
++    inline HashMatrix& operator=(const MatrixBase<OtherDerived>& other)
++    {
++      return SparseMatrixBase<HashMatrix>::operator=(other);
++    }
++
++  protected:
++
++    std::vector<std::map<int, Scalar> > m_data;
++    int m_innerSize;
++
++};
++
++template<typename Scalar, int _Flags>
++class HashMatrix<Scalar,_Flags>::InnerIterator
++{
++  public:
++
++    InnerIterator(const HashMatrix& mat, int col)
++      : m_matrix(mat), m_it(mat.m_data[col].begin()), m_end(mat.m_data[col].end())
++    {}
++
++    InnerIterator& operator++() { m_it++; return *this; }
++
++    Scalar value() { return m_it->second; }
++
++    int index() const { return m_it->first; }
++
++    operator bool() const { return m_it!=m_end; }
++
++  protected:
++    const HashMatrix& m_matrix;
++    typename HashMatrix::ConstMapIterator m_it;
++    typename HashMatrix::ConstMapIterator m_end;
++};
++
++#endif // EIGEN_HASHMATRIX_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/LinkedVectorMatrix.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/LinkedVectorMatrix.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/LinkedVectorMatrix.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/LinkedVectorMatrix.h	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,283 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_LINKEDVECTORMATRIX_H
++#define EIGEN_LINKEDVECTORMATRIX_H
++
++template<typename _Scalar, int _Flags>
++struct ei_traits<LinkedVectorMatrix<_Scalar,_Flags> >
++{
++  typedef _Scalar Scalar;
++  enum {
++    RowsAtCompileTime = Dynamic,
++    ColsAtCompileTime = Dynamic,
++    MaxRowsAtCompileTime = Dynamic,
++    MaxColsAtCompileTime = Dynamic,
++    Flags = SparseBit | _Flags,
++    CoeffReadCost = NumTraits<Scalar>::ReadCost,
++    SupportedAccessPatterns = InnerCoherentAccessPattern
++  };
++};
++
++template<typename Element, int ChunkSize = 8>
++struct LinkedVectorChunk
++{
++  LinkedVectorChunk() : size(0), next(0), prev(0) {}
++  Element data[ChunkSize];
++  LinkedVectorChunk* next;
++  LinkedVectorChunk* prev;
++  int size;
++  bool isFull() const { return size==ChunkSize; }
++};
++
++template<typename _Scalar, int _Flags>
++class LinkedVectorMatrix : public SparseMatrixBase<LinkedVectorMatrix<_Scalar,_Flags> >
++{
++  public:
++    EIGEN_GENERIC_PUBLIC_INTERFACE(LinkedVectorMatrix)
++    class InnerIterator;
++  protected:
++
++    enum {
++      RowMajor = Flags&RowMajorBit ? 1 : 0
++    };
++
++    struct ValueIndex
++    {
++      ValueIndex() : value(0), index(0) {}
++      ValueIndex(Scalar v, int i) : value(v), index(i) {}
++      Scalar value;
++      int index;
++    };
++    typedef LinkedVectorChunk<ValueIndex,8> VectorChunk;
++
++    inline int find(VectorChunk** _el, int id)
++    {
++      VectorChunk* el = *_el;
++      while (el && el->data[el->size-1].index<id)
++        el = el->next;
++      *_el = el;
++      if (el)
++      {
++        // binary search
++        int maxI = el->size-1;
++        int minI = 0;
++        int i = el->size/2;
++        const ValueIndex* data = el->data;
++        while (data[i].index!=id)
++        {
++          if (data[i].index<id)
++          {
++            minI = i+1;
++            i = (maxI + minI)+2;
++          }
++          else
++          {
++            maxI = i-1;
++            i = (maxI + minI)+2;
++          }
++          if (minI>=maxI)
++            return -1;
++        }
++        if (data[i].index==id)
++          return i;
++      }
++      return -1;
++    }
++
++  public:
++    inline int rows() const { return RowMajor ? m_data.size() : m_innerSize; }
++    inline int cols() const { return RowMajor ? m_innerSize : m_data.size(); }
++
++    inline const Scalar& coeff(int row, int col) const
++    {
++      const int outer = RowMajor ? row : col;
++      const int inner = RowMajor ? col : row;
++
++      VectorChunk* el = m_data[outer];
++      int id = find(&el, inner);
++      if (id<0)
++        return Scalar(0);
++      return el->data[id].value;
++    }
++
++    inline Scalar& coeffRef(int row, int col)
++    {
++      const int outer = RowMajor ? row : col;
++      const int inner = RowMajor ? col : row;
++
++      VectorChunk* el = m_data[outer];
++      int id = find(&el, inner);
++      ei_assert(id>=0);
++//       if (id<0)
++//         return Scalar(0);
++      return el->data[id].value;
++    }
++
++  public:
++
++    inline void startFill(int reserveSize = 1000)
++    {
++      clear();
++      for (int i=0; i<m_data.size(); ++i)
++        m_ends[i] = m_data[i] = 0;
++    }
++
++    inline Scalar& fill(int row, int col)
++    {
++      const int outer = RowMajor ? row : col;
++      const int inner = RowMajor ? col : row;
++      if (m_ends[outer]==0)
++      {
++        m_data[outer] = m_ends[outer] = new VectorChunk();
++      }
++      else
++      {
++        ei_assert(m_ends[outer]->data[m_ends[outer]->size-1].index < inner);
++        if (m_ends[outer]->isFull())
++        {
++
++          VectorChunk* el = new VectorChunk();
++          m_ends[outer]->next = el;
++          el->prev = m_ends[outer];
++          m_ends[outer] = el;
++        }
++      }
++      m_ends[outer]->data[m_ends[outer]->size].index = inner;
++      return m_ends[outer]->data[m_ends[outer]->size++].value;
++    }
++
++    inline void endFill() { }
++
++    ~LinkedVectorMatrix()
++    {
++      clear();
++    }
++
++    void clear()
++    {
++      for (int i=0; i<m_data.size(); ++i)
++      {
++        VectorChunk* el = m_data[i];
++        while (el)
++        {
++          VectorChunk* tmp = el;
++          el = el->next;
++          delete tmp;
++        }
++      }
++    }
++
++    void resize(int rows, int cols)
++    {
++      const int outers = RowMajor ? rows : cols;
++      const int inners = RowMajor ? cols : rows;
++
++      if (this->outerSize() != outers)
++      {
++        clear();
++        m_data.resize(outers);
++        m_ends.resize(outers);
++        for (int i=0; i<m_data.size(); ++i)
++          m_ends[i] = m_data[i] = 0;
++      }
++      m_innerSize = inners;
++    }
++
++    inline LinkedVectorMatrix(int rows, int cols)
++      : m_innerSize(0)
++    {
++      resize(rows, cols);
++    }
++
++    template<typename OtherDerived>
++    inline LinkedVectorMatrix(const MatrixBase<OtherDerived>& other)
++      : m_innerSize(0)
++    {
++      *this = other.derived();
++    }
++
++    inline void swap(LinkedVectorMatrix& other)
++    {
++      EIGEN_DBG_SPARSE(std::cout << "LinkedVectorMatrix:: swap\n");
++      resize(other.rows(), other.cols());
++      m_data.swap(other.m_data);
++      m_ends.swap(other.m_ends);
++    }
++
++    inline LinkedVectorMatrix& operator=(const LinkedVectorMatrix& other)
++    {
++      if (other.isRValue())
++      {
++        swap(other.const_cast_derived());
++      }
++      else
++      {
++        // TODO implement a specialized deep copy here
++        return operator=<LinkedVectorMatrix>(other);
++      }
++      return *this;
++    }
++
++    template<typename OtherDerived>
++    inline LinkedVectorMatrix& operator=(const MatrixBase<OtherDerived>& other)
++    {
++      return SparseMatrixBase<LinkedVectorMatrix>::operator=(other.derived());
++    }
++
++  protected:
++
++    // outer vector of inner linked vector chunks
++    std::vector<VectorChunk*> m_data;
++    // stores a reference to the last vector chunk for efficient filling
++    std::vector<VectorChunk*> m_ends;
++    int m_innerSize;
++
++};
++
++
++template<typename Scalar, int _Flags>
++class LinkedVectorMatrix<Scalar,_Flags>::InnerIterator
++{
++  public:
++
++    InnerIterator(const LinkedVectorMatrix& mat, int col)
++      : m_matrix(mat), m_el(mat.m_data[col]), m_it(0)
++    {}
++
++    InnerIterator& operator++() { if (m_it<m_el->size) m_it++; else {m_el = m_el->next; m_it=0;}; return *this; }
++
++    Scalar value() { return m_el->data[m_it].value; }
++
++    int index() const { return m_el->data[m_it].index; }
++
++    operator bool() const { return m_el && (m_el->next || m_it<m_el->size); }
++
++  protected:
++    const LinkedVectorMatrix& m_matrix;
++    VectorChunk* m_el;
++    int m_it;
++};
++
++#endif // EIGEN_LINKEDVECTORMATRIX_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/SparseArray.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/SparseArray.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/SparseArray.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/SparseArray.h	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,133 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_SPARSE_ARRAY_H
++#define EIGEN_SPARSE_ARRAY_H
++
++/** Stores a sparse set of values as a list of values and a list of indices.
++  *
++  */
++template<typename Scalar> class SparseArray
++{
++  public:
++    SparseArray()
++      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
++    {}
++
++    SparseArray(int size)
++      : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
++    {
++      resize(size);
++    }
++
++    SparseArray(const SparseArray& other)
++    {
++      *this = other;
++    }
++
++    SparseArray& operator=(const SparseArray& other)
++    {
++      resize(other.size());
++      memcpy(m_values, other.m_values, m_size * sizeof(Scalar));
++      memcpy(m_indices, other.m_indices, m_size * sizeof(int));
++    }
++
++    void swap(SparseArray& other)
++    {
++      std::swap(m_values, other.m_values);
++      std::swap(m_indices, other.m_indices);
++      std::swap(m_size, other.m_size);
++      std::swap(m_allocatedSize, other.m_allocatedSize);
++    }
++
++    ~SparseArray()
++    {
++      delete[] m_values;
++      delete[] m_indices;
++    }
++
++    void reserve(int size)
++    {
++      int newAllocatedSize = m_size + size;
++      if (newAllocatedSize > m_allocatedSize)
++        reallocate(newAllocatedSize);
++    }
++
++    void squeeze()
++    {
++      if (m_allocatedSize>m_size)
++        reallocate(m_size);
++    }
++
++    void resize(int size, int reserveSizeFactor = 0)
++    {
++      if (m_allocatedSize<size)
++        reallocate(size + reserveSizeFactor*size);
++      m_size = size;
++    }
++
++    void append(const Scalar& v, int i)
++    {
++      int id = m_size;
++      resize(m_size+1, 1);
++      m_values[id] = v;
++      m_indices[id] = i;
++    }
++
++    int size() const { return m_size; }
++    void clear() { m_size = 0; }
++
++    Scalar& value(int i) { return m_values[i]; }
++    const Scalar& value(int i) const { return m_values[i]; }
++
++    int& index(int i) { return m_indices[i]; }
++    const int& index(int i) const { return m_indices[i]; }
++
++  protected:
++
++    void reallocate(int size)
++    {
++      Scalar* newValues  = new Scalar[size];
++      int* newIndices = new int[size];
++      int copySize = std::min(size, m_size);
++      // copy
++      memcpy(newValues,  m_values,  copySize * sizeof(Scalar));
++      memcpy(newIndices, m_indices, copySize * sizeof(int));
++      // delete old stuff
++      delete[] m_values;
++      delete[] m_indices;
++      m_values = newValues;
++      m_indices = newIndices;
++      m_allocatedSize = size;
++    }
++
++  protected:
++    Scalar* m_values;
++    int* m_indices;
++    int m_size;
++    int m_allocatedSize;
++
++};
++
++#endif // EIGEN_SPARSE_ARRAY_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/SparseMatrixBase.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/SparseMatrixBase.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/SparseMatrixBase.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/SparseMatrixBase.h	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,152 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_SPARSEMATRIXBASE_H
++#define EIGEN_SPARSEMATRIXBASE_H
++
++template<typename Derived>
++class SparseMatrixBase : public MatrixBase<Derived>
++{
++  public:
++
++    typedef MatrixBase<Derived> Base;
++    typedef typename Base::Scalar Scalar;
++    enum {
++      Flags = Base::Flags,
++      RowMajor = ei_traits<Derived>::Flags&RowMajorBit ? 1 : 0
++    };
++
++    inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
++    inline Derived& derived() { return *static_cast<Derived*>(this); }
++    inline Derived& const_cast_derived() const
++    { return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
++
++    SparseMatrixBase()
++      : m_isRValue(false)
++    {}
++
++    bool isRValue() const { return m_isRValue; }
++    Derived& markAsRValue() { m_isRValue = true; return derived(); }
++
++    inline Derived& operator=(const Derived& other)
++    {
++      if (other.isRValue())
++        derived().swap(other.const_cast_derived());
++      else
++        this->operator=<Derived>(other);
++      return derived();
++    }
++
++    template<typename OtherDerived>
++    inline Derived& operator=(const MatrixBase<OtherDerived>& other)
++    {
++      const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
++      const int outerSize = other.outerSize();
++      typedef typename ei_meta_if<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::ret TempType;
++      TempType temp(other.rows(), other.cols());
++
++      temp.startFill(std::max(this->rows(),this->cols())*2);
++      for (int j=0; j<outerSize; ++j)
++      {
++        for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
++        {
++          Scalar v = it.value();
++          if (v!=Scalar(0))
++            if (OtherDerived::Flags & RowMajorBit) temp.fill(j,it.index()) = v;
++            else temp.fill(it.index(),j) = v;
++        }
++      }
++      temp.endFill();
++
++      derived() = temp.markAsRValue();
++      return derived();
++    }
++
++    template<typename OtherDerived>
++    inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other)
++    {
++      const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
++//       std::cout << "eval transpose = " << transpose << "\n";
++      const int outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
++      if ((!transpose) && other.isRValue())
++      {
++        // eval without temporary
++        derived().resize(other.rows(), other.cols());
++        derived().startFill(std::max(this->rows(),this->cols())*2);
++        for (int j=0; j<outerSize; ++j)
++        {
++          for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
++          {
++            Scalar v = it.value();
++            if (v!=Scalar(0))
++              if (RowMajor) derived().fill(j,it.index()) = v;
++              else derived().fill(it.index(),j) = v;
++          }
++        }
++        derived().endFill();
++      }
++      else
++        this->operator=<OtherDerived>(static_cast<const MatrixBase<OtherDerived>&>(other));
++      return derived();
++    }
++
++    template<typename Lhs, typename Rhs>
++    inline Derived& operator=(const Product<Lhs,Rhs,SparseProduct>& product);
++
++    friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
++    {
++      if (Flags&RowMajorBit)
++      {
++        for (int row=0; row<m.outerSize(); ++row)
++        {
++          int col = 0;
++          for (typename Derived::InnerIterator it(m.derived(), row); it; ++it)
++          {
++            for ( ; col<it.index(); ++col)
++              s << "0 ";
++            std::cout << it.value() << " ";
++            ++col;
++          }
++          for ( ; col<m.cols(); ++col)
++            s << "0 ";
++          s << std::endl;
++        }
++      }
++      else
++      {
++        LinkedVectorMatrix<Scalar, RowMajorBit> trans = m.derived();
++        s << trans;
++      }
++      return s;
++    }
++
++    template<typename OtherDerived>
++    OtherDerived solveTriangular(const MatrixBase<OtherDerived>& other) const;
++
++  protected:
++
++    bool m_isRValue;
++};
++
++#endif // EIGEN_SPARSEMATRIXBASE_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/SparseMatrix.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/SparseMatrix.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/SparseMatrix.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/SparseMatrix.h	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,269 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_SPARSEMATRIX_H
++#define EIGEN_SPARSEMATRIX_H
++
++/** \class SparseMatrix
++  *
++  * \brief Sparse matrix
++  *
++  * \param _Scalar the scalar type, i.e. the type of the coefficients
++  *
++  * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
++  *
++  */
++template<typename _Scalar, int _Flags>
++struct ei_traits<SparseMatrix<_Scalar, _Flags> >
++{
++  typedef _Scalar Scalar;
++  enum {
++    RowsAtCompileTime = Dynamic,
++    ColsAtCompileTime = Dynamic,
++    MaxRowsAtCompileTime = Dynamic,
++    MaxColsAtCompileTime = Dynamic,
++    Flags = SparseBit | _Flags,
++    CoeffReadCost = NumTraits<Scalar>::ReadCost,
++    SupportedAccessPatterns = FullyCoherentAccessPattern
++  };
++};
++
++
++
++template<typename _Scalar, int _Flags>
++class SparseMatrix : public SparseMatrixBase<SparseMatrix<_Scalar, _Flags> >
++{
++  public:
++    EIGEN_GENERIC_PUBLIC_INTERFACE(SparseMatrix)
++
++  protected:
++  public:
++
++    typedef SparseMatrixBase<SparseMatrix> SparseBase;
++    enum {
++      RowMajor = SparseBase::RowMajor
++    };
++
++    int m_outerSize;
++    int m_innerSize;
++    int* m_outerIndex;
++    SparseArray<Scalar> m_data;
++
++
++  public:
++
++    inline int rows() const { return RowMajor ? m_outerSize : m_innerSize; }
++    inline int cols() const { return RowMajor ? m_innerSize : m_outerSize; }
++    inline int innerSize() const { return m_innerSize; }
++    inline int outerSize() const { return m_outerSize; }
++    inline int innerNonZeros(int j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
++
++    inline Scalar coeff(int row, int col) const
++    {
++      const int outer = RowMajor ? row : col;
++      const int inner = RowMajor ? col : row;
++
++      int id = m_outerIndex[outer];
++      int end = m_outerIndex[outer+1]-1;
++      if (m_data.index(end)==inner)
++        return m_data.value(end);
++      const int* r = std::lower_bound(&m_data.index(id),&m_data.index(end),inner);
++      return (*r==inner) ? m_data.value(*r) : Scalar(0);
++    }
++
++    inline Scalar& coeffRef(int row, int col)
++    {
++      const int outer = RowMajor ? row : col;
++      const int inner = RowMajor ? col : row;
++
++      int id = m_outerIndex[outer];
++      int end = m_outerIndex[outer+1];
++      int* r = std::lower_bound(&m_data.index(id),&m_data.index(end),inner);
++      ei_assert(*r==inner);
++      return m_data.value(*r);
++    }
++
++  public:
++
++    class InnerIterator;
++
++    /** \returns the number of non zero coefficients */
++    inline int nonZeros() const  { return m_data.size(); }
++
++    inline void startFill(int reserveSize = 1000)
++    {
++      m_data.clear();
++      m_data.reserve(reserveSize);
++      for (int i=0; i<=m_outerSize; ++i)
++        m_outerIndex[i] = 0;
++    }
++
++    inline Scalar& fill(int row, int col)
++    {
++      const int outer = RowMajor ? row : col;
++      const int inner = RowMajor ? col : row;
++
++      if (m_outerIndex[outer+1]==0)
++      {
++        int i=col;
++        while (i>=0 && m_outerIndex[i]==0)
++        {
++          m_outerIndex[i] = m_data.size();
++          --i;
++        }
++        m_outerIndex[outer+1] = m_outerIndex[outer];
++      }
++      assert(m_outerIndex[outer+1] == m_data.size());
++      int id = m_outerIndex[outer+1];
++      m_outerIndex[outer+1]++;
++
++      m_data.append(0, inner);
++      return m_data.value(id);
++    }
++
++    inline void endFill()
++    {
++      int size = m_data.size();
++      int i = m_outerSize;
++      // find the last filled column
++      while (i>=0 && m_outerIndex[i]==0)
++        --i;
++      i++;
++      while (i<=m_outerSize)
++      {
++        m_outerIndex[i] = size;
++        ++i;
++      }
++    }
++
++    void resize(int rows, int cols)
++    {
++      const int outerSize = RowMajor ? rows : cols;
++      m_innerSize = RowMajor ? cols : rows;
++      m_data.clear();
++      if (m_outerSize != outerSize)
++      {
++        delete[] m_outerIndex;
++        m_outerIndex = new int [outerSize+1];
++        m_outerSize = outerSize;
++      }
++    }
++
++    inline SparseMatrix(int rows, int cols)
++      : m_outerSize(0), m_innerSize(0), m_outerIndex(0)
++    {
++      resize(rows, cols);
++    }
++
++    template<typename OtherDerived>
++    inline SparseMatrix(const MatrixBase<OtherDerived>& other)
++      : m_outerSize(0), m_innerSize(0), m_outerIndex(0)
++    {
++      *this = other.derived();
++    }
++
++    inline void swap(SparseMatrix& other)
++    {
++      EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
++      std::swap(m_outerIndex, other.m_outerIndex);
++      std::swap(m_innerSize, other.m_innerSize);
++      std::swap(m_outerSize, other.m_outerSize);
++      m_data.swap(other.m_data);
++    }
++
++    inline SparseMatrix& operator=(const SparseMatrix& other)
++    {
++      if (other.isRValue())
++      {
++        swap(other.const_cast_derived());
++      }
++      else
++      {
++        resize(other.rows(), other.cols());
++        for (int j=0; j<=m_outerSize; ++j)
++          m_outerIndex[j] = other.m_outerIndex[j];
++        m_data = other.m_data;
++        return *this;
++      }
++    }
++
++    template<typename OtherDerived>
++    inline SparseMatrix& operator=(const MatrixBase<OtherDerived>& other)
++    {
++      return SparseMatrixBase<SparseMatrix>::operator=(other.derived());
++    }
++
++    friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
++    {
++      EIGEN_DBG_SPARSE(
++        s << "Nonzero entries:\n";
++        for (uint i=0; i<m.nonZeros(); ++i)
++        {
++          s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
++        }
++        s << std::endl;
++        s << std::endl;
++        s << "Column pointers:\n";
++        for (uint i=0; i<m.cols(); ++i)
++        {
++          s << m.m_outerIndex[i] << " ";
++        }
++        s << std::endl;
++        s << std::endl;
++      );
++      s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
++      return s;
++    }
++
++    /** Destructor */
++    inline ~SparseMatrix()
++    {
++      delete[] m_outerIndex;
++    }
++};
++
++template<typename Scalar, int _Flags>
++class SparseMatrix<Scalar,_Flags>::InnerIterator
++{
++  public:
++    InnerIterator(const SparseMatrix& mat, int outer)
++      : m_matrix(mat), m_id(mat.m_outerIndex[outer]), m_start(m_id), m_end(mat.m_outerIndex[outer+1])
++    {}
++
++    InnerIterator& operator++() { m_id++; return *this; }
++
++    Scalar value() { return m_matrix.m_data.value(m_id); }
++
++    int index() const { return m_matrix.m_data.index(m_id); }
++
++    operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
++
++  protected:
++    const SparseMatrix& m_matrix;
++    int m_id;
++    const int m_start;
++    const int m_end;
++};
++
++#endif // EIGEN_SPARSEMATRIX_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/SparseProduct.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/SparseProduct.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/SparseProduct.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/SparseProduct.h	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,329 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_SPARSEPRODUCT_H
++#define EIGEN_SPARSEPRODUCT_H
++
++// sparse product return type specialization
++template<typename Lhs, typename Rhs>
++struct ProductReturnType<Lhs,Rhs,SparseProduct>
++{
++  typedef typename ei_traits<Lhs>::Scalar Scalar;
++  enum {
++    LhsRowMajor = ei_traits<Lhs>::Flags & RowMajorBit,
++    RhsRowMajor = ei_traits<Rhs>::Flags & RowMajorBit,
++    TransposeRhs = (!LhsRowMajor) && RhsRowMajor,
++    TransposeLhs = LhsRowMajor && (!RhsRowMajor)
++  };
++
++  // FIXME if we transpose let's evaluate to a LinkedVectorMatrix since it is the
++  // type of the temporary to perform the transpose op
++  typedef typename ei_meta_if<TransposeLhs,
++    SparseMatrix<Scalar,0>,
++    typename ei_nested<Lhs,Rhs::RowsAtCompileTime>::type>::ret LhsNested;
++
++  typedef typename ei_meta_if<TransposeRhs,
++    SparseMatrix<Scalar,0>,
++    typename ei_nested<Rhs,Lhs::RowsAtCompileTime>::type>::ret RhsNested;
++
++  typedef Product<typename ei_unconst<LhsNested>::type,
++                  typename ei_unconst<RhsNested>::type, SparseProduct> Type;
++};
++
++template<typename LhsNested, typename RhsNested>
++struct ei_traits<Product<LhsNested, RhsNested, SparseProduct> >
++{
++  // clean the nested types:
++  typedef typename ei_unconst<typename ei_unref<LhsNested>::type>::type _LhsNested;
++  typedef typename ei_unconst<typename ei_unref<RhsNested>::type>::type _RhsNested;
++  typedef typename _LhsNested::Scalar Scalar;
++
++  enum {
++    LhsCoeffReadCost = _LhsNested::CoeffReadCost,
++    RhsCoeffReadCost = _RhsNested::CoeffReadCost,
++    LhsFlags = _LhsNested::Flags,
++    RhsFlags = _RhsNested::Flags,
++
++    RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
++    ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
++    InnerSize = EIGEN_ENUM_MIN(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
++
++    MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
++    MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
++
++    LhsRowMajor = LhsFlags & RowMajorBit,
++    RhsRowMajor = RhsFlags & RowMajorBit,
++
++    EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit),
++
++    RemovedBits = ~((EvalToRowMajor ? 0 : RowMajorBit)
++                | ((RowsAtCompileTime == Dynamic || ColsAtCompileTime == Dynamic) ? 0 : LargeBit)),
++
++    Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
++          | EvalBeforeAssigningBit
++          | EvalBeforeNestingBit,
++
++    CoeffReadCost = Dynamic
++  };
++};
++
++template<typename LhsNested, typename RhsNested> class Product<LhsNested,RhsNested,SparseProduct> : ei_no_assignment_operator,
++  public MatrixBase<Product<LhsNested, RhsNested, SparseProduct> >
++{
++  public:
++
++    EIGEN_GENERIC_PUBLIC_INTERFACE(Product)
++
++  private:
++
++    typedef typename ei_traits<Product>::_LhsNested _LhsNested;
++    typedef typename ei_traits<Product>::_RhsNested _RhsNested;
++
++  public:
++
++    template<typename Lhs, typename Rhs>
++    inline Product(const Lhs& lhs, const Rhs& rhs)
++      : m_lhs(lhs), m_rhs(rhs)
++    {
++      ei_assert(lhs.cols() == rhs.rows());
++    }
++
++    Scalar coeff(int, int) const { ei_assert(false && "eigen internal error"); }
++    Scalar& coeffRef(int, int) { ei_assert(false && "eigen internal error"); }
++
++    inline int rows() const { return m_lhs.rows(); }
++    inline int cols() const { return m_rhs.cols(); }
++
++    const _LhsNested& lhs() const { return m_lhs; }
++    const _LhsNested& rhs() const { return m_rhs; }
++
++  protected:
++    const LhsNested m_lhs;
++    const RhsNested m_rhs;
++};
++
++template<typename Lhs, typename Rhs, typename ResultType,
++  int LhsStorageOrder = ei_traits<Lhs>::Flags&RowMajorBit,
++  int RhsStorageOrder = ei_traits<Rhs>::Flags&RowMajorBit,
++  int ResStorageOrder = ei_traits<ResultType>::Flags&RowMajorBit>
++struct ei_sparse_product_selector;
++
++template<typename Lhs, typename Rhs, typename ResultType>
++struct ei_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
++{
++  typedef typename ei_traits<typename ei_cleantype<Lhs>::type>::Scalar Scalar;
++
++  struct ListEl
++  {
++    int next;
++    int index;
++    Scalar value;
++  };
++
++  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
++  {
++    // make sure to call innerSize/outerSize since we fake the storage order.
++    int rows = lhs.innerSize();
++    int cols = rhs.outerSize();
++    int size = lhs.outerSize();
++    ei_assert(size == rhs.rows());
++
++    // allocate a temporary buffer
++    Scalar* buffer = new Scalar[rows];
++
++    // estimate the number of non zero entries
++    float ratioLhs = float(lhs.nonZeros())/float(lhs.rows()*lhs.cols());
++    float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
++    float ratioRes = std::min(ratioLhs * avgNnzPerRhsColumn, 1.f);
++
++    res.resize(rows, cols);
++    res.startFill(ratioRes*rows*cols);
++    for (int j=0; j<cols; ++j)
++    {
++      // let's do a more accurate determination of the nnz ratio for the current column j of res
++      //float ratioColRes = std::min(ratioLhs * rhs.innerNonZeros(j), 1.f);
++      // FIXME find a nice way to get the number of nonzeros of a sub matrix (here an inner vector)
++      float ratioColRes = ratioRes;
++      if (ratioColRes>0.1)
++      {
++        // dense path, the scalar * columns products are accumulated into a dense column
++        Scalar* __restrict__ tmp = buffer;
++        // set to zero
++        for (int k=0; k<rows; ++k)
++          tmp[k] = 0;
++        for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
++        {
++          // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
++          Scalar x = rhsIt.value();
++          for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt)
++          {
++            tmp[lhsIt.index()] += lhsIt.value() * x;
++          }
++        }
++        // copy the temporary to the respective res.col()
++        for (int k=0; k<rows; ++k)
++          if (tmp[k]!=0)
++            res.fill(k, j) = tmp[k];
++      }
++      else
++      {
++        ListEl* __restrict__ tmp = reinterpret_cast<ListEl*>(buffer);
++        // sparse path, the scalar * columns products are accumulated into a linked list
++        int tmp_size = 0;
++        int tmp_start = -1;
++        for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
++        {
++          int tmp_el = tmp_start;
++          for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt)
++          {
++            Scalar v = lhsIt.value() * rhsIt.value();
++            int id = lhsIt.index();
++            if (tmp_size==0)
++            {
++              tmp_start = 0;
++              tmp_el = 0;
++              tmp_size++;
++              tmp[0].value = v;
++              tmp[0].index = id;
++              tmp[0].next = -1;
++            }
++            else if (id<tmp[tmp_start].index)
++            {
++              tmp[tmp_size].value = v;
++              tmp[tmp_size].index = id;
++              tmp[tmp_size].next = tmp_start;
++              tmp_start = tmp_size;
++              tmp_size++;
++            }
++            else
++            {
++              int nextel = tmp[tmp_el].next;
++              while (nextel >= 0 && tmp[nextel].index<=id)
++              {
++                tmp_el = nextel;
++                nextel = tmp[nextel].next;
++              }
++
++              if (tmp[tmp_el].index==id)
++              {
++                tmp[tmp_el].value += v;
++              }
++              else
++              {
++                tmp[tmp_size].value = v;
++                tmp[tmp_size].index = id;
++                tmp[tmp_size].next = tmp[tmp_el].next;
++                tmp[tmp_el].next = tmp_size;
++                tmp_size++;
++              }
++            }
++          }
++        }
++        int k = tmp_start;
++        while (k>=0)
++        {
++          if (tmp[k].value!=0)
++            res.fill(tmp[k].index, j) = tmp[k].value;
++          k = tmp[k].next;
++        }
++      }
++    }
++    res.endFill();
++  }
++};
++
++template<typename Lhs, typename Rhs, typename ResultType>
++struct ei_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
++{
++  typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
++  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
++  {
++    SparseTemporaryType _res(res.rows(), res.cols());
++    ei_sparse_product_selector<Lhs,Rhs,SparseTemporaryType,ColMajor,ColMajor,ColMajor>::run(lhs, rhs, _res);
++    res = _res;
++  }
++};
++
++template<typename Lhs, typename Rhs, typename ResultType>
++struct ei_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
++{
++  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
++  {
++    // let's transpose the product and fake the matrices are column major
++    ei_sparse_product_selector<Rhs,Lhs,ResultType,ColMajor,ColMajor,ColMajor>::run(rhs, lhs, res);
++  }
++};
++
++template<typename Lhs, typename Rhs, typename ResultType>
++struct ei_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
++{
++  typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
++  static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
++  {
++    // let's transpose the product and fake the matrices are column major
++    ei_sparse_product_selector<Rhs,Lhs,ResultType,ColMajor,ColMajor,RowMajor>::run(rhs, lhs, res);
++  }
++};
++
++// NOTE eventually let's transpose one argument even in this case since it might be expensive if
++// the result is not dense.
++// template<typename Lhs, typename Rhs, typename ResultType, int ResStorageOrder>
++// struct ei_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ResStorageOrder>
++// {
++//   static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
++//   {
++//     // trivial product as lhs.row/rhs.col dot products
++//     // loop over the prefered order of the result
++//   }
++// };
++
++// NOTE the 2 others cases (col row *) must never occurs since they are catched
++// by ProductReturnType which transform it to (col col *) by evaluating rhs.
++
++
++template<typename Derived>
++template<typename Lhs, typename Rhs>
++inline Derived& MatrixBase<Derived>::lazyAssign(const Product<Lhs,Rhs,SparseProduct>& product)
++{
++//   std::cout << "sparse product to dense\n";
++  ei_sparse_product_selector<
++    typename ei_cleantype<Lhs>::type,
++    typename ei_cleantype<Rhs>::type,
++    typename ei_cleantype<Derived>::type>::run(product.lhs(),product.rhs(),derived());
++  return derived();
++}
++
++template<typename Derived>
++template<typename Lhs, typename Rhs>
++inline Derived& SparseMatrixBase<Derived>::operator=(const Product<Lhs,Rhs,SparseProduct>& product)
++{
++//   std::cout << "sparse product to sparse\n";
++  ei_sparse_product_selector<
++    typename ei_cleantype<Lhs>::type,
++    typename ei_cleantype<Rhs>::type,
++    Derived>::run(product.lhs(),product.rhs(),derived());
++  return derived();
++}
++
++#endif // EIGEN_SPARSEPRODUCT_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/SparseSetter.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/SparseSetter.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/SparseSetter.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/SparseSetter.h	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,102 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_SPARSESETTER_H
++#define EIGEN_SPARSESETTER_H
++
++template<typename MatrixType, int AccessPattern,
++  int IsSupported = ei_support_access_pattern<MatrixType,AccessPattern>::ret>
++struct ei_sparse_setter_selector;
++
++template<typename MatrixType,
++         int AccessPattern,
++         typename WrapperType = typename ei_sparse_setter_selector<MatrixType,AccessPattern>::type>
++class SparseSetter
++{
++    typedef typename ei_unref<WrapperType>::type _WrapperType;
++  public:
++
++    inline SparseSetter(MatrixType& matrix) : m_wrapper(matrix), mp_matrix(&matrix) {}
++
++    ~SparseSetter()
++    { *mp_matrix = m_wrapper; }
++
++    inline _WrapperType* operator->() { return &m_wrapper; }
++
++    inline _WrapperType& operator*() { return m_wrapper; }
++
++  protected:
++
++    WrapperType m_wrapper;
++    MatrixType* mp_matrix;
++};
++
++template<typename MatrixType, int AccessPattern>
++struct ei_sparse_setter_selector<MatrixType, AccessPattern, AccessPatternSupported>
++{
++  typedef MatrixType& type;
++};
++
++// forward each derived of SparseMatrixBase to the generic SparseMatrixBase specializations
++template<typename Scalar, int Flags, int AccessPattern>
++struct ei_sparse_setter_selector<SparseMatrix<Scalar,Flags>, AccessPattern, AccessPatternNotSupported>
++: public ei_sparse_setter_selector<SparseMatrixBase<SparseMatrix<Scalar,Flags> >,AccessPattern, AccessPatternNotSupported>
++{};
++
++template<typename Scalar, int Flags, int AccessPattern>
++struct ei_sparse_setter_selector<LinkedVectorMatrix<Scalar,Flags>, AccessPattern, AccessPatternNotSupported>
++: public ei_sparse_setter_selector<LinkedVectorMatrix<SparseMatrix<Scalar,Flags> >,AccessPattern, AccessPatternNotSupported>
++{};
++
++template<typename Scalar, int Flags, int AccessPattern>
++struct ei_sparse_setter_selector<HashMatrix<Scalar,Flags>, AccessPattern, AccessPatternNotSupported>
++: public ei_sparse_setter_selector<HashMatrix<SparseMatrix<Scalar,Flags> >,AccessPattern, AccessPatternNotSupported>
++{};
++
++// generic SparseMatrixBase specializations
++template<typename Derived>
++struct ei_sparse_setter_selector<SparseMatrixBase<Derived>, RandomAccessPattern, AccessPatternNotSupported>
++{
++  typedef HashMatrix<typename Derived::Scalar, Derived::Flags> type;
++};
++
++template<typename Derived>
++struct ei_sparse_setter_selector<SparseMatrixBase<Derived>, OuterCoherentAccessPattern, AccessPatternNotSupported>
++{
++  typedef HashMatrix<typename Derived::Scalar, Derived::Flags> type;
++};
++
++template<typename Derived>
++struct ei_sparse_setter_selector<SparseMatrixBase<Derived>, InnerCoherentAccessPattern, AccessPatternNotSupported>
++{
++  typedef LinkedVectorMatrix<typename Derived::Scalar, Derived::Flags> type;
++};
++
++template<typename Derived>
++struct ei_sparse_setter_selector<SparseMatrixBase<Derived>, FullyCoherentAccessPattern, AccessPatternNotSupported>
++{
++  typedef SparseMatrix<typename Derived::Scalar, Derived::Flags> type;
++};
++
++#endif // EIGEN_SPARSESETTER_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/SparseUtil.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/SparseUtil.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/SparseUtil.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/SparseUtil.h	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,62 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_SPARSEUTIL_H
++#define EIGEN_SPARSEUTIL_H
++
++#ifdef NDEBUG
++#define EIGEN_DBG_SPARSE(X)
++#else
++#define EIGEN_DBG_SPARSE(X) X
++#endif
++
++template<typename Derived> class SparseMatrixBase;
++template<typename _Scalar, int _Flags = 0> class SparseMatrix;
++template<typename _Scalar, int _Flags = 0> class HashMatrix;
++template<typename _Scalar, int _Flags = 0> class LinkedVectorMatrix;
++
++const int AccessPatternNotSupported = 0x0;
++const int AccessPatternSupported    = 0x1;
++
++
++template<typename MatrixType, int AccessPattern> struct ei_support_access_pattern
++{
++  enum { ret = (int(ei_traits<MatrixType>::SupportedAccessPatterns) & AccessPattern) == AccessPattern
++             ? AccessPatternSupported
++             : AccessPatternNotSupported
++  };
++};
++
++template<typename T> class ei_eval<T,Sparse>
++{
++    typedef typename ei_traits<T>::Scalar _Scalar;
++    enum {
++          _Flags = ei_traits<T>::Flags
++    };
++
++  public:
++    typedef SparseMatrix<_Scalar, _Flags> type;
++};
++
++#endif // EIGEN_SPARSEUTIL_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/TriangularSolver.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/TriangularSolver.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/Sparse/TriangularSolver.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/Sparse/TriangularSolver.h	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,169 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_SPARSETRIANGULARSOLVER_H
++#define EIGEN_SPARSETRIANGULARSOLVER_H
++
++template<typename Lhs, typename Rhs,
++  int TriangularPart = (int(Lhs::Flags) & LowerTriangularBit)
++                     ? Lower
++                     : (int(Lhs::Flags) & UpperTriangularBit)
++                     ? Upper
++                     : -1,
++  int StorageOrder = int(Lhs::Flags) & RowMajorBit ? RowMajor : ColMajor
++  >
++struct ei_sparse_trisolve_selector;
++
++// forward substitution, row-major
++template<typename Lhs, typename Rhs>
++struct ei_sparse_trisolve_selector<Lhs,Rhs,Lower,RowMajor>
++{
++  typedef typename Rhs::Scalar Scalar;
++  static void run(const Lhs& lhs, const Rhs& rhs, Rhs& res)
++  {
++    for(int col=0 ; col<rhs.cols() ; ++col)
++    {
++      for(int i=0; i<lhs.rows(); ++i)
++      {
++        Scalar tmp = rhs.coeff(i,col);
++        Scalar lastVal = 0;
++        int lastIndex = 0;
++        for(typename Lhs::InnerIterator it(lhs, i); it; ++it)
++        {
++          lastVal = it.value();
++          lastIndex = it.index();
++          tmp -= lastVal * res.coeff(lastIndex,col);
++        }
++        if (Lhs::Flags & UnitDiagBit)
++          res.coeffRef(i,col) = tmp;
++        else
++        {
++          ei_assert(lastIndex==i);
++          res.coeffRef(i,col) = tmp/lastVal;
++        }
++      }
++    }
++  }
++};
++
++// backward substitution, row-major
++template<typename Lhs, typename Rhs>
++struct ei_sparse_trisolve_selector<Lhs,Rhs,Upper,RowMajor>
++{
++  typedef typename Rhs::Scalar Scalar;
++  static void run(const Lhs& lhs, const Rhs& rhs, Rhs& res)
++  {
++    for(int col=0 ; col<rhs.cols() ; ++col)
++    {
++      for(int i=lhs.rows()-1 ; i>=0 ; --i)
++      {
++        Scalar tmp = rhs.coeff(i,col);
++        typename Lhs::InnerIterator it(lhs, i);
++        for(++it; it; ++it)
++        {
++          tmp -= it.value() * res.coeff(it.index(),col);
++        }
++
++        if (Lhs::Flags & UnitDiagBit)
++          res.coeffRef(i,col) = tmp;
++        else
++        {
++          typename Lhs::InnerIterator it(lhs, i);
++          ei_assert(it.index() == i);
++          res.coeffRef(i,col) = tmp/it.value();
++        }
++      }
++    }
++  }
++};
++
++// forward substitution, col-major
++template<typename Lhs, typename Rhs>
++struct ei_sparse_trisolve_selector<Lhs,Rhs,Lower,ColMajor>
++{
++  typedef typename Rhs::Scalar Scalar;
++  static void run(const Lhs& lhs, const Rhs& rhs, Rhs& res)
++  {
++    // NOTE we could avoid this copy using an in-place API
++    res = rhs;
++    for(int col=0 ; col<rhs.cols() ; ++col)
++    {
++      for(int i=0; i<lhs.cols(); ++i)
++      {
++        typename Lhs::InnerIterator it(lhs, i);
++        if(!(Lhs::Flags & UnitDiagBit))
++        {
++          ei_assert(it.index()==i);
++          res.coeffRef(i,col) /= it.value();
++        }
++        Scalar tmp = res.coeffRef(i,col);
++        for(++it; it; ++it)
++          res.coeffRef(it.index(), col) -= tmp * it.value();
++      }
++    }
++  }
++};
++
++// backward substitution, col-major
++template<typename Lhs, typename Rhs>
++struct ei_sparse_trisolve_selector<Lhs,Rhs,Upper,ColMajor>
++{
++  typedef typename Rhs::Scalar Scalar;
++  static void run(const Lhs& lhs, const Rhs& rhs, Rhs& res)
++  {
++    // NOTE we could avoid this copy using an in-place API
++    res = rhs;
++    for(int col=0 ; col<rhs.cols() ; ++col)
++    {
++      for(int i=lhs.cols()-1; i>=0; --i)
++      {
++        if(!(Lhs::Flags & UnitDiagBit))
++        {
++          // FIXME lhs.coeff(i,i) might not be always efficient while it must simply be the
++          // last element of the column !
++          res.coeffRef(i,col) /= lhs.coeff(i,i);
++        }
++        Scalar tmp = res.coeffRef(i,col);
++        typename Lhs::InnerIterator it(lhs, i);
++        for(; it && it.index()<i; ++it)
++          res.coeffRef(it.index(), col) -= tmp * it.value();
++      }
++    }
++  }
++};
++
++template<typename Derived>
++template<typename OtherDerived>
++OtherDerived SparseMatrixBase<Derived>::solveTriangular(const MatrixBase<OtherDerived>& other) const
++{
++  ei_assert(derived().cols() == other.rows());
++  ei_assert(!(Flags & ZeroDiagBit));
++  ei_assert(Flags & (UpperTriangularBit|LowerTriangularBit));
++
++  OtherDerived res(other.rows(), other.cols());
++  ei_sparse_trisolve_selector<Derived, OtherDerived>::run(derived(), other.derived(), res);
++  return res;
++}
++
++#endif // EIGEN_SPARSETRIANGULARSOLVER_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/SVD/SVD.h koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/SVD/SVD.h
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/src/SVD/SVD.h	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/src/SVD/SVD.h	2008-08-20 18:52:55.000000000 +0200
+@@ -0,0 +1,506 @@
++// This file is part of Eigen, a lightweight C++ template library
++// for linear algebra. Eigen itself is part of the KDE project.
++//
++// Copyright (C) 2008 Gael Guennebaud <g.gael at free.fr>
++//
++// Eigen is free software; you can redistribute it and/or
++// modify it under the terms of the GNU Lesser General Public
++// License as published by the Free Software Foundation; either
++// version 3 of the License, or (at your option) any later version.
++//
++// Alternatively, you can redistribute it and/or
++// modify it under the terms of the GNU General Public License as
++// published by the Free Software Foundation; either version 2 of
++// the License, or (at your option) any later version.
++//
++// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
++// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
++// GNU General Public License for more details.
++//
++// You should have received a copy of the GNU Lesser General Public
++// License and a copy of the GNU General Public License along with
++// Eigen. If not, see <http://www.gnu.org/licenses/>.
++
++#ifndef EIGEN_SVD_H
++#define EIGEN_SVD_H
++
++/** \ingroup SVD_Module
++  *
++  * \class SVD
++  *
++  * \brief Standard SVD decomposition of a matrix and associated features
++  *
++  * \param MatrixType the type of the matrix of which we are computing the SVD decomposition
++  *
++  * This class performs a standard SVD decomposition of a real matrix A of size \c M x \c N
++  * with \c M \>= \c N.
++  *
++  *
++  * \sa MatrixBase::SVD()
++  */
++template<typename MatrixType> class SVD
++{
++  private:
++    typedef typename MatrixType::Scalar Scalar;
++    typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
++
++    enum {
++      PacketSize = ei_packet_traits<Scalar>::size,
++      AlignmentMask = int(PacketSize)-1,
++      MinSize = EIGEN_ENUM_MIN(MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime)
++    };
++    
++    typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, 1> ColVector;
++    typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, 1> RowVector;
++    
++    typedef Matrix<Scalar, MatrixType::RowsAtCompileTime, MinSize> MatrixUType;
++    typedef Matrix<Scalar, MatrixType::ColsAtCompileTime, MatrixType::ColsAtCompileTime> MatrixVType;
++    typedef Matrix<Scalar, MinSize, 1> SingularValuesType;
++
++  public:
++  
++    SVD(const MatrixType& matrix)
++      : m_matU(matrix.rows(), std::min(matrix.rows(), matrix.cols())),
++        m_matV(matrix.cols(),matrix.cols()),
++        m_sigma(std::min(matrix.rows(),matrix.cols()))
++    {
++      compute(matrix);
++    }
++
++    template<typename OtherDerived, typename ResultType>
++    void solve(const MatrixBase<OtherDerived> &b, ResultType* result) const;
++
++    const MatrixUType& matrixU() const { return m_matU; }
++    const SingularValuesType& singularValues() const { return m_sigma; }
++    const MatrixVType& matrixV() const { return m_matV; }
++
++    void compute(const MatrixType& matrix);
++
++  protected:
++    /** \internal */
++    MatrixUType m_matU;
++    /** \internal */
++    MatrixVType m_matV;
++    /** \internal */
++    SingularValuesType m_sigma;
++};
++
++/** Computes / recomputes the SVD decomposition A = U S V^* of \a matrix
++  *
++  * \note this code has been adapted from JAMA (public domain)
++  */
++template<typename MatrixType>
++void SVD<MatrixType>::compute(const MatrixType& matrix)
++{
++  const int m = matrix.rows();
++  const int n = matrix.cols();
++  const int nu = std::min(m,n);
++  
++  m_matU.resize(m, nu);
++  m_matU.setZero();
++  m_sigma.resize(std::min(m,n));
++  m_matV.resize(n,n);
++
++  RowVector e(n);
++  ColVector work(m);
++  MatrixType matA(matrix);
++  const bool wantu = true;
++  const bool wantv = true;
++  int i=0, j=0, k=0;
++
++  // Reduce A to bidiagonal form, storing the diagonal elements
++  // in s and the super-diagonal elements in e.
++  int nct = std::min(m-1,n);
++  int nrt = std::max(0,std::min(n-2,m));
++  for (k = 0; k < std::max(nct,nrt); k++)
++  {
++    if (k < nct)
++    {
++      // Compute the transformation for the k-th column and
++      // place the k-th diagonal in m_sigma[k].
++      m_sigma[k] = matA.col(k).end(m-k).norm();
++      if (m_sigma[k] != 0.0) // FIXME
++      {
++        if (matA(k,k) < 0.0)
++          m_sigma[k] = -m_sigma[k];
++        matA.col(k).end(m-k) /= m_sigma[k];
++        matA(k,k) += 1.0;
++      }
++      m_sigma[k] = -m_sigma[k];
++    }
++    
++    for (j = k+1; j < n; j++)
++    {
++      if ((k < nct) && (m_sigma[k] != 0.0))
++      {
++        // Apply the transformation.
++        Scalar t = matA.col(k).end(m-k).dot(matA.col(j).end(m-k)); // FIXME dot product or cwise prod + .sum() ??
++        t = -t/matA(k,k);
++        matA.col(j).end(m-k) += t * matA.col(k).end(m-k);
++      }
++
++      // Place the k-th row of A into e for the
++      // subsequent calculation of the row transformation.
++      e[j] = matA(k,j);
++    }
++
++    // Place the transformation in U for subsequent back multiplication.
++    if (wantu & (k < nct))
++      m_matU.col(k).end(m-k) = matA.col(k).end(m-k);
++
++    if (k < nrt)
++    {
++      // Compute the k-th row transformation and place the
++      // k-th super-diagonal in e[k].
++      e[k] = e.end(n-k-1).norm();
++      if (e[k] != 0.0)
++      {
++          if (e[k+1] < 0.0)
++            e[k] = -e[k];
++          e.end(n-k-1) /= e[k];
++          e[k+1] += 1.0;
++      }
++      e[k] = -e[k];
++      if ((k+1 < m) & (e[k] != 0.0))
++      {
++        // Apply the transformation.
++        work.end(m-k-1) = matA.corner(BottomRight,m-k-1,n-k-1) * e.end(n-k-1);
++        for (j = k+1; j < n; j++)
++          matA.col(j).end(m-k-1) += (-e[j]/e[k+1]) * work.end(m-k-1);
++      }
++
++      // Place the transformation in V for subsequent back multiplication.
++      if (wantv)
++        m_matV.col(k).end(n-k-1) = e.end(n-k-1);
++    }
++  }
++
++
++  // Set up the final bidiagonal matrix or order p.
++  int p = min(n,m+1);
++  if (nct < n)
++    m_sigma[nct] = matA(nct,nct);
++  if (m < p)
++    m_sigma[p-1] = 0.0;
++  if (nrt+1 < p)
++    e[nrt] = matA(nrt,p-1);
++  e[p-1] = 0.0;
++
++  // If required, generate U.
++  if (wantu)
++  {
++    for (j = nct; j < nu; j++)
++    {
++      m_matU.col(j).setZero();
++      m_matU(j,j) = 1.0;
++    }
++    for (k = nct-1; k >= 0; k--)
++    {
++      if (m_sigma[k] != 0.0)
++      {
++        for (j = k+1; j < nu; j++)
++        {
++          Scalar t = m_matU.col(k).end(m-k).dot(m_matU.col(j).end(m-k)); // FIXME is it really a dot product we want ?
++          t = -t/m_matU(k,k);
++          m_matU.col(j).end(m-k) += t * m_matU.col(k).end(m-k);
++        }
++        m_matU.col(k).end(m-k) = - m_matU.col(k).end(m-k);
++        m_matU(k,k) = 1.0 + m_matU(k,k);
++        if (k-1>0)
++          m_matU.col(k).start(k-1).setZero();
++      }
++      else
++      {
++        m_matU.col(k).setZero();
++        m_matU(k,k) = 1.0;
++      }
++    }
++  }
++
++  // If required, generate V.
++  if (wantv)
++  {
++    for (k = n-1; k >= 0; k--)
++    {
++      if ((k < nrt) & (e[k] != 0.0))
++      {
++        for (j = k+1; j < nu; j++)
++        {
++          Scalar t = m_matV.col(k).end(n-k-1).dot(m_matV.col(j).end(n-k-1)); // FIXME is it really a dot product we want ?
++          t = -t/m_matV(k+1,k);
++          m_matV.col(j).end(n-k-1) += t * m_matV.col(k).end(n-k-1);
++        }
++      }
++      m_matV.col(k).setZero();
++      m_matV(k,k) = 1.0;
++    }
++  }
++
++  // Main iteration loop for the singular values.
++  int pp = p-1;
++  int iter = 0;
++  Scalar eps(pow(2.0,-52.0));
++  while (p > 0)
++  {
++    int k=0;
++    int kase=0;
++
++    // Here is where a test for too many iterations would go.
++
++    // This section of the program inspects for
++    // negligible elements in the s and e arrays.  On
++    // completion the variables kase and k are set as follows.
++
++    // kase = 1     if s(p) and e[k-1] are negligible and k<p
++    // kase = 2     if s(k) is negligible and k<p
++    // kase = 3     if e[k-1] is negligible, k<p, and
++    //              s(k), ..., s(p) are not negligible (qr step).
++    // kase = 4     if e(p-1) is negligible (convergence).
++
++    for (k = p-2; k >= -1; k--)
++    {
++      if (k == -1)
++          break;
++      if (ei_abs(e[k]) <= eps*(ei_abs(m_sigma[k]) + ei_abs(m_sigma[k+1])))
++      {
++          e[k] = 0.0;
++          break;
++      }
++    }
++    if (k == p-2)
++    {
++      kase = 4;
++    }
++    else
++    {
++      int ks;
++      for (ks = p-1; ks >= k; ks--)
++      {
++        if (ks == k)
++          break;
++        Scalar t( (ks != p ? ei_abs(e[ks]) : 0.) + (ks != k+1 ? ei_abs(e[ks-1]) : 0.));
++        if (ei_abs(m_sigma[ks]) <= eps*t)
++        {
++          m_sigma[ks] = 0.0;
++          break;
++        }
++      }
++      if (ks == k)
++      {
++        kase = 3;
++      }
++      else if (ks == p-1)
++      {
++        kase = 1;
++      }
++      else
++      {
++        kase = 2;
++        k = ks;
++      }
++    }
++    k++;
++
++    // Perform the task indicated by kase.
++    switch (kase)
++    {
++
++      // Deflate negligible s(p).
++      case 1:
++      {
++        Scalar f(e[p-2]);
++        e[p-2] = 0.0;
++        for (j = p-2; j >= k; j--)
++        {
++          Scalar t(hypot(m_sigma[j],f));
++          Scalar cs(m_sigma[j]/t);
++          Scalar sn(f/t);
++          m_sigma[j] = t;
++          if (j != k)
++          {
++            f = -sn*e[j-1];
++            e[j-1] = cs*e[j-1];
++          }
++          if (wantv)
++          {
++            for (i = 0; i < n; i++)
++            {
++              t = cs*m_matV(i,j) + sn*m_matV(i,p-1);
++              m_matV(i,p-1) = -sn*m_matV(i,j) + cs*m_matV(i,p-1);
++              m_matV(i,j) = t;
++            }
++          }
++        }
++      }
++      break;
++
++      // Split at negligible s(k).
++      case 2:
++      {
++        Scalar f(e[k-1]);
++        e[k-1] = 0.0;
++        for (j = k; j < p; j++)
++        {
++          Scalar t(hypot(m_sigma[j],f));
++          Scalar cs( m_sigma[j]/t);
++          Scalar sn(f/t);
++          m_sigma[j] = t;
++          f = -sn*e[j];
++          e[j] = cs*e[j];
++          if (wantu)
++          {
++            for (i = 0; i < m; i++)
++            {
++              t = cs*m_matU(i,j) + sn*m_matU(i,k-1);
++              m_matU(i,k-1) = -sn*m_matU(i,j) + cs*m_matU(i,k-1);
++              m_matU(i,j) = t;
++            }
++          }
++        }
++      }
++      break;
++
++      // Perform one qr step.
++      case 3:
++      {
++        // Calculate the shift.
++        Scalar scale = std::max(std::max(std::max(std::max(
++                        ei_abs(m_sigma[p-1]),ei_abs(m_sigma[p-2])),ei_abs(e[p-2])),
++                        ei_abs(m_sigma[k])),ei_abs(e[k]));
++        Scalar sp = m_sigma[p-1]/scale;
++        Scalar spm1 = m_sigma[p-2]/scale;
++        Scalar epm1 = e[p-2]/scale;
++        Scalar sk = m_sigma[k]/scale;
++        Scalar ek = e[k]/scale;
++        Scalar b = ((spm1 + sp)*(spm1 - sp) + epm1*epm1)/2.0;
++        Scalar c = (sp*epm1)*(sp*epm1);
++        Scalar shift = 0.0;
++        if ((b != 0.0) || (c != 0.0))
++        {
++          shift = ei_sqrt(b*b + c);
++          if (b < 0.0)
++            shift = -shift;
++          shift = c/(b + shift);
++        }
++        Scalar f = (sk + sp)*(sk - sp) + shift;
++        Scalar g = sk*ek;
++
++        // Chase zeros.
++
++        for (j = k; j < p-1; j++)
++        {
++          Scalar t = hypot(f,g);
++          Scalar cs = f/t;
++          Scalar sn = g/t;
++          if (j != k)
++            e[j-1] = t;
++          f = cs*m_sigma[j] + sn*e[j];
++          e[j] = cs*e[j] - sn*m_sigma[j];
++          g = sn*m_sigma[j+1];
++          m_sigma[j+1] = cs*m_sigma[j+1];
++          if (wantv)
++          {
++            for (i = 0; i < n; i++)
++            {
++              t = cs*m_matV(i,j) + sn*m_matV(i,j+1);
++              m_matV(i,j+1) = -sn*m_matV(i,j) + cs*m_matV(i,j+1);
++              m_matV(i,j) = t;
++            }
++          }
++          t = hypot(f,g);
++          cs = f/t;
++          sn = g/t;
++          m_sigma[j] = t;
++          f = cs*e[j] + sn*m_sigma[j+1];
++          m_sigma[j+1] = -sn*e[j] + cs*m_sigma[j+1];
++          g = sn*e[j+1];
++          e[j+1] = cs*e[j+1];
++          if (wantu && (j < m-1))
++          {
++            for (i = 0; i < m; i++)
++            {
++              t = cs*m_matU(i,j) + sn*m_matU(i,j+1);
++              m_matU(i,j+1) = -sn*m_matU(i,j) + cs*m_matU(i,j+1);
++              m_matU(i,j) = t;
++            }
++          }
++        }
++        e[p-2] = f;
++        iter = iter + 1;
++      }
++      break;
++
++      // Convergence.
++      case 4:
++      {
++        // Make the singular values positive.
++        if (m_sigma[k] <= 0.0)
++        {
++          m_sigma[k] = (m_sigma[k] < 0.0 ? -m_sigma[k] : 0.0);
++          if (wantv)
++            m_matV.col(k).start(pp+1) = -m_matV.col(k).start(pp+1);
++        }
++
++        // Order the singular values.
++        while (k < pp)
++        {
++          if (m_sigma[k] >= m_sigma[k+1])
++            break;
++          Scalar t = m_sigma[k];
++          m_sigma[k] = m_sigma[k+1];
++          m_sigma[k+1] = t;
++          if (wantv && (k < n-1))
++            m_matV.col(k).swap(m_matV.col(k+1));
++          if (wantu && (k < m-1))
++            m_matU.col(k).swap(m_matU.col(k+1));
++          k++;
++        }
++        iter = 0;
++        p--;
++      }
++      break;
++    } // end big switch
++  } // end iterations
++}
++
++/** \returns the solution of \f$ A x = b \f$ using the current SVD decomposition of A.
++  * The parts of the solution corresponding to zero singular values are ignored.
++  *
++  * \sa MatrixBase::svd(), LU::solve(), Cholesky::solve()
++  */
++template<typename MatrixType>
++template<typename OtherDerived, typename ResultType>
++void SVD<MatrixType>::solve(const MatrixBase<OtherDerived> &b, ResultType* result) const
++{
++  const int rows = m_matU.rows();
++  ei_assert(b.rows() == rows);
++
++  for (int j=0; j<b.cols(); ++j)
++  {
++    Matrix<Scalar,MatrixUType::RowsAtCompileTime,1> aux = m_matU.transpose() * b.col(j);
++
++    for (int i = 0; i <m_matU.cols(); i++)
++    {
++      Scalar si = m_sigma.coeff(i);
++      if (si != 0)
++        aux.coeffRef(i) /= si;
++      else
++        aux.coeffRef(i) = 0;
++    }
++
++    result->col(j) = m_matV * aux;
++  }
++}
++
++/** \svd_module
++  * \returns the SVD decomposition of \c *this
++  */
++template<typename Derived>
++inline const SVD<typename MatrixBase<Derived>::EvalType>
++MatrixBase<Derived>::svd() const
++{
++  return SVD<typename ei_eval<Derived>::type>(derived());
++}
++
++#endif // EIGEN_SVD_H
+diff -Nrua koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/SVD koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/SVD
+--- koffice-1.9.96.0~that.is.really.1.9.95.10.orig/Eigen/SVD	1970-01-01 01:00:00.000000000 +0100
++++ koffice-1.9.96.0~that.is.really.1.9.95.10/Eigen/SVD	2008-08-20 18:52:56.000000000 +0200
+@@ -0,0 +1,22 @@
++#ifndef EIGEN_SVD_MODULE_H
++#define EIGEN_SVD_MODULE_H
++
++#include "Core"
++
++namespace Eigen {
++
++/** \defgroup SVD_Module SVD module
++  * This module provides SVD decomposition for (currently) real matrices.
++  * This decomposition is accessible via the following MatrixBase method:
++  *  - MatrixBase::svd()
++  *
++  * \code
++  * #include <Eigen/SVD>
++  * \endcode
++  */
++
++#include "src/SVD/SVD.h"
++
++} // namespace Eigen
++
++#endif // EIGEN_SVD_MODULE_H




More information about the pkg-kde-commits mailing list