[shark] 63/79: replaced axpy_prod with prod for all matrix-vector multiplications. the code is now much simpler in many cases
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Thu Nov 26 15:41:04 UTC 2015
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch master
in repository shark.
commit d94cfeed886537609ead659759e53937d0ed8dc3
Author: Oswin Krause <oswin.krause at di.ku.dk>
Date: Tue Oct 27 14:21:42 2015 +0100
replaced axpy_prod with prod for all matrix-vector multiplications. the code is now much simpler in many cases
---
include/shark/Algorithms/QP/QpMcLinear.h | 3 +-
.../Trainers/NormalizeComponentsWhitening.h | 4 +-
include/shark/LinAlg/BLAS/vector_expression.hpp | 6 +-
include/shark/LinAlg/BLAS/vector_proxy.hpp | 4 +
include/shark/LinAlg/Impl/solveSystem.inl | 11 +-
include/shark/LinAlg/rotations.h | 13 +-
include/shark/LinAlg/solveSystem.h | 4 +-
.../NegativeGaussianProcessEvidence.h | 3 +-
.../Distributions/MultiVariateNormalDistribution.h | 3 +-
.../Unsupervised/RBM/Neuronlayers/BinaryLayer.h | 13 +-
.../Unsupervised/RBM/Neuronlayers/BipolarLayer.h | 11 +-
.../Unsupervised/RBM/Neuronlayers/GaussianLayer.h | 4 +-
.../RBM/Neuronlayers/TruncatedExponentialLayer.h | 4 +-
patch.txt | 260 +++++++++++++++++++++
src/Algorithms/FisherLDA.cpp | 4 +-
src/Algorithms/PCA.cpp | 3 +-
src/Models/OnlineRNNet.cpp | 10 +-
src/Models/RNNet.cpp | 11 +-
18 files changed, 304 insertions(+), 67 deletions(-)
diff --git a/include/shark/Algorithms/QP/QpMcLinear.h b/include/shark/Algorithms/QP/QpMcLinear.h
index 4cbc7be..976a924 100644
--- a/include/shark/Algorithms/QP/QpMcLinear.h
+++ b/include/shark/Algorithms/QP/QpMcLinear.h
@@ -209,8 +209,7 @@ public:
RealMatrixRow a = row(alpha, i);
// compute gradient and KKT violation
- RealVector wx(m_classes,0.0);
- axpy_prod(w,x_i,wx,false);
+ RealVector wx = prod(w,x_i);
RealVector g(m_classes);
double kkt = calcGradient(g, wx, a, C, y_i);
diff --git a/include/shark/Algorithms/Trainers/NormalizeComponentsWhitening.h b/include/shark/Algorithms/Trainers/NormalizeComponentsWhitening.h
index f04ab95..0cb538f 100644
--- a/include/shark/Algorithms/Trainers/NormalizeComponentsWhitening.h
+++ b/include/shark/Algorithms/Trainers/NormalizeComponentsWhitening.h
@@ -77,9 +77,7 @@ public:
RealMatrix whiteningMatrix = createWhiteningMatrix(covariance);
whiteningMatrix *= std::sqrt(m_targetVariance);
- RealVector offset(dc);
- axpy_prod(trans(whiteningMatrix),mean,offset);
- offset *= -1.0;
+ RealVector offset = -prod(trans(whiteningMatrix),mean);
model.setStructure(RealMatrix(trans(whiteningMatrix)), offset);
}
diff --git a/include/shark/LinAlg/BLAS/vector_expression.hpp b/include/shark/LinAlg/BLAS/vector_expression.hpp
index f0c0b00..e268ef1 100644
--- a/include/shark/LinAlg/BLAS/vector_expression.hpp
+++ b/include/shark/LinAlg/BLAS/vector_expression.hpp
@@ -63,16 +63,16 @@ public:
//computation kernels
template<class VecX>
void assign_to(vector_expression<VecX>& x, scalar_type alpha = scalar_type(1) )const{
- m_expression.assign_to(x,m_scalar*alpha);
+ m_expression.assign_to(x,alpha*m_scalar);
}
template<class VecX>
void plus_assign_to(vector_expression<VecX>& x, scalar_type alpha = scalar_type(1) )const{
- m_expression.plus_assign_to(x,m_scalar*alpha);
+ m_expression.plus_assign_to(x,alpha*m_scalar);
}
template<class VecX>
void minus_assign_to(vector_expression<VecX>& x, scalar_type alpha = scalar_type(1) )const{
- m_expression.minus_assign_to(x,m_scalar*alpha);
+ m_expression.minus_assign_to(x,alpha*m_scalar);
}
diff --git a/include/shark/LinAlg/BLAS/vector_proxy.hpp b/include/shark/LinAlg/BLAS/vector_proxy.hpp
index 39ae8af..bf7f1ec 100644
--- a/include/shark/LinAlg/BLAS/vector_proxy.hpp
+++ b/include/shark/LinAlg/BLAS/vector_proxy.hpp
@@ -77,6 +77,10 @@ public:
size_type size() const {
return expression().size();
}
+
+ void clear(){
+ expression().clear();
+ }
// ---------
// Dense low level interface
diff --git a/include/shark/LinAlg/Impl/solveSystem.inl b/include/shark/LinAlg/Impl/solveSystem.inl
index 25abe27..8bfb77c 100644
--- a/include/shark/LinAlg/Impl/solveSystem.inl
+++ b/include/shark/LinAlg/Impl/solveSystem.inl
@@ -218,8 +218,7 @@ void shark::blas::solveSymmSemiDefiniteSystemInPlace(
symm_prod(trans(L),LTL);
//compute z= L^Tb
- RealVector z(rank);
- axpy_prod(trans(L),b,z);
+ RealVector z = prod(trans(L),b);
//compute cholesky factor of L^TL
RealMatrix LTLcholesky(rank,rank);
@@ -228,7 +227,7 @@ void shark::blas::solveSymmSemiDefiniteSystemInPlace(
//A'b = L(L^TL)^-1(L^TL)^-1z
solveTriangularCholeskyInPlace<SolveAXB>(LTLcholesky,z);
solveTriangularCholeskyInPlace<SolveAXB>(LTLcholesky,z);
- axpy_prod(L,z,b);
+ noalias(b)=prod(L,z);
}
//finally swap back into the unpermuted coordinate system
swap_rows_inverted(permutation,b);
@@ -333,8 +332,7 @@ void shark::blas::generalSolveSystemInPlace(
axpy_prod(trans(A),A,ATA);
//compute z=Ab
- RealVector z(n);
- axpy_prod(trans(A),b,z);
+ RealVector z = prod(trans(A),b);
//call recursively for the quadratic case
solveSymmSemiDefiniteSystemInPlace<System>(ATA,z);
@@ -350,8 +348,7 @@ void shark::blas::generalSolveSystemInPlace(
axpy_prod(A,trans(A),AAT);
//compute z=Ab
- RealVector z(m);
- axpy_prod(A,b,z);
+ RealVector z = prod(A,b);
//call recursively for the quadratic case
solveSymmSemiDefiniteSystemInPlace<System>(AAT,z);
diff --git a/include/shark/LinAlg/rotations.h b/include/shark/LinAlg/rotations.h
index 0641ea2..aa0f539 100644
--- a/include/shark/LinAlg/rotations.h
+++ b/include/shark/LinAlg/rotations.h
@@ -97,13 +97,11 @@ void applyHouseholderOnTheRight(
}
SIZE_CHECK(matrix().size2() == reflection().size());
- vector<T> temp(matrix().size1());
-
//Ax
- axpy_prod(matrix,reflection,temp);
+ vector<T> temp = prod(matrix,reflection);
//A -=beta*(Ax)x^T
- noalias(matrix()) -= beta * outer_prod(temp,reflection);
+ noalias(matrix()) -= beta * outer_prod(temp,reflection);
}
@@ -125,14 +123,11 @@ void applyHouseholderOnTheLeft(
matrix()*=1-beta;
return;
}
-
- vector<T> temp(matrix().size2());
-
//x^T A
- axpy_prod(trans(matrix),reflection,temp);
+ vector<T> temp = prod(trans(matrix),reflection);
//A -=beta*x(x^T A)
- noalias(matrix()) -= beta * outer_prod(reflection,temp);
+ noalias(matrix()) -= beta * outer_prod(reflection,temp);
}
/// \brief rotates a matrix using a householder reflection
diff --git a/include/shark/LinAlg/solveSystem.h b/include/shark/LinAlg/solveSystem.h
index bda1ba9..12e801b 100644
--- a/include/shark/LinAlg/solveSystem.h
+++ b/include/shark/LinAlg/solveSystem.h
@@ -318,7 +318,7 @@ void approxsolveSymmPosDefSystem(
vector<value_type> r = b;//current residual
if(initialSolution){
SIZE_CHECK(x().size() == dim);
- axpy_prod(A,x,r,false,-1.0);
+ noalias(r) -= prod(A,x);
if(norm_inf(r) > norm_inf(b)){
x().clear();
r = b;
@@ -334,7 +334,7 @@ void approxsolveSymmPosDefSystem(
vector<value_type> Ap(dim); //stores prod(A,p)
for(std::size_t i = 0; i != maxIt; ++i){
- axpy_prod(A,p,Ap);
+ noalias(Ap) = prod(A,p);
double rsqr=inner_prod(r,r);
double alpha = rsqr/inner_prod(p,Ap);
noalias(x())+=alpha*p;
diff --git a/include/shark/ObjectiveFunctions/NegativeGaussianProcessEvidence.h b/include/shark/ObjectiveFunctions/NegativeGaussianProcessEvidence.h
index 11fb800..c271f3e 100644
--- a/include/shark/ObjectiveFunctions/NegativeGaussianProcessEvidence.h
+++ b/include/shark/ObjectiveFunctions/NegativeGaussianProcessEvidence.h
@@ -195,8 +195,7 @@ public:
blas::solveTriangularCholeskyInPlace<blas::SolveAXB>(choleskyFactor,W);
//calculate z = Wt=M^-1 t
- RealVector z(N);
- axpy_prod(W,t,z);
+ RealVector z = prod(W,t);
// W is already initialized as the inverse of M, so we only need
// to change the sign and add z. to calculate W fully
diff --git a/include/shark/Statistics/Distributions/MultiVariateNormalDistribution.h b/include/shark/Statistics/Distributions/MultiVariateNormalDistribution.h
index 55b58c3..7120f63 100644
--- a/include/shark/Statistics/Distributions/MultiVariateNormalDistribution.h
+++ b/include/shark/Statistics/Distributions/MultiVariateNormalDistribution.h
@@ -208,8 +208,7 @@ public:
y=z;
blas::triangular_prod<blas::lower>(m_lowerCholesky,y);
}else{
- y.clear();
- axpy_prod(m_lowerCholesky,z,y,false);
+ noalias(y) = prod(m_lowerCholesky,z);
}
}
diff --git a/include/shark/Unsupervised/RBM/Neuronlayers/BinaryLayer.h b/include/shark/Unsupervised/RBM/Neuronlayers/BinaryLayer.h
index 67c9584..f927726 100644
--- a/include/shark/Unsupervised/RBM/Neuronlayers/BinaryLayer.h
+++ b/include/shark/Unsupervised/RBM/Neuronlayers/BinaryLayer.h
@@ -228,12 +228,9 @@ public:
SIZE_CHECK(state.size1() == beta.size());
//the following code does for batches the equivalent thing to:
//return inner_prod(m_bias,state)
- RealVector energies(state.size1());
- axpy_prod(state,m_bias,energies);
- RealVector baseRateEnergies(state.size1());
- axpy_prod(state,m_baseRate,baseRateEnergies);
- noalias(energies)*=beta;
- noalias(energies)+=(1-beta)*baseRateEnergies;
+ RealVector energies = prod(state,m_bias);
+ RealVector baseRateEnergies = prod(state,m_baseRate);
+ noalias(energies) = beta*energies +(1-beta)*baseRateEnergies;
return energies;
}
@@ -286,7 +283,7 @@ public:
template<class Vector, class SampleBatch, class WeightVector>
void expectedParameterDerivative(Vector& derivative, SampleBatch const& samples, WeightVector const& weights )const{
SIZE_CHECK(derivative.size() == size());
- axpy_prod(trans(samples.statistics),weights,derivative,false);
+ noalias(derivative) += prod(weights,samples.statistics);
}
@@ -310,7 +307,7 @@ public:
template<class Vector, class SampleBatch, class WeightVector>
void parameterDerivative(Vector& derivative, SampleBatch const& samples, WeightVector const& weights)const{
SIZE_CHECK(derivative.size() == size());
- axpy_prod(trans(samples.state),weights,derivative,false);
+ noalias(derivative) += prod(weights,samples.state);
}
/// \brief Returns the vector with the parameters associated with the neurons in the layer, i.e. the bias vector.
diff --git a/include/shark/Unsupervised/RBM/Neuronlayers/BipolarLayer.h b/include/shark/Unsupervised/RBM/Neuronlayers/BipolarLayer.h
index a985567..18824ff 100644
--- a/include/shark/Unsupervised/RBM/Neuronlayers/BipolarLayer.h
+++ b/include/shark/Unsupervised/RBM/Neuronlayers/BipolarLayer.h
@@ -195,9 +195,7 @@ public:
RealVector energyTerm(Matrix const& state, BetaVector const& beta)const{
SIZE_CHECK(state.size2() == size());
- RealVector energies(state.size1(),0.0);
- axpy_prod(state,m_bias,energies,false);
- noalias(energies) *= beta;
+ RealVector energies = beta*prod(state,m_bias);
return energies;
}
@@ -233,7 +231,8 @@ public:
template<class Vector, class SampleBatch>
void expectedParameterDerivative(Vector& derivative, SampleBatch const& samples )const{
SIZE_CHECK(derivative.size() == size());
- sumRows(expectedPhiValue(samples.statistics),derivative);
+ sumRows(2*samples.statistics,derivative);
+ derivative -= samples.size();
}
///\brief Calculates the expectation of the derivatives of the energy term of this neuron layer with respect to it's parameters - the bias weights.
@@ -246,7 +245,7 @@ public:
template<class Vector, class SampleBatch, class WeightVector>
void expectedParameterDerivative(Vector& derivative, SampleBatch const& samples, WeightVector const& weights )const{
SIZE_CHECK(derivative.size() == size());
- axpy_prod(trans(expectedPhiValue(samples.statistics)),weights,derivative,1.0);
+ noalias(derivative) += 2*prod(weights,samples.statistics) - sum(weights);
}
@@ -270,7 +269,7 @@ public:
template<class Vector, class SampleBatch, class WeightVector>
void parameterDerivative(Vector& derivative, SampleBatch const& samples, WeightVector const& weights)const{
SIZE_CHECK(derivative.size() == size());
- axpy_prod(trans(samples.state),weights,derivative,false);
+ noalias(derivative) += prod(weights,samples.state);
}
/// \brief Returns the vector with the parameters associated with the neurons in the layer, i.e. the bias vector.
diff --git a/include/shark/Unsupervised/RBM/Neuronlayers/GaussianLayer.h b/include/shark/Unsupervised/RBM/Neuronlayers/GaussianLayer.h
index 1b996ac..f7f1aa7 100644
--- a/include/shark/Unsupervised/RBM/Neuronlayers/GaussianLayer.h
+++ b/include/shark/Unsupervised/RBM/Neuronlayers/GaussianLayer.h
@@ -236,7 +236,7 @@ public:
template<class Vector, class SampleBatch, class Vector2 >
void expectedParameterDerivative(Vector& derivative, SampleBatch const& samples, Vector2 const& weights )const{
SIZE_CHECK(derivative.size() == size());
- axpy_prod(samples.statistics,weights, derivative);
+ noalias(derivative) += prod(weights,samples.statistics);
}
///\brief Calculates the derivatives of the energy term of this neuron layer with respect to it's parameters - the bias weights.
@@ -259,7 +259,7 @@ public:
template<class Vector, class SampleBatch, class WeightVector>
void parameterDerivative(Vector& derivative, SampleBatch const& samples, WeightVector const& weights)const{
SIZE_CHECK(derivative.size() == size());
- axpy_prod(trans(samples.state),weights,derivative,false);
+ noalias(derivative) += prod(weights,samples.state);
}
///\brief Returns the vector with the parameters associated with the neurons in the layer.
diff --git a/include/shark/Unsupervised/RBM/Neuronlayers/TruncatedExponentialLayer.h b/include/shark/Unsupervised/RBM/Neuronlayers/TruncatedExponentialLayer.h
index 2313dc3..4c05af6 100644
--- a/include/shark/Unsupervised/RBM/Neuronlayers/TruncatedExponentialLayer.h
+++ b/include/shark/Unsupervised/RBM/Neuronlayers/TruncatedExponentialLayer.h
@@ -202,9 +202,7 @@ public:
//the following code does for batches the equivalent thing to:
//return inner_prod(m_bias,state)
- RealVector energies(state.size1());
- axpy_prod(state,m_bias,energies);
- noalias(energies) *= beta;
+ RealVector energies = beta * prod(state,m_bias);
return energies;
}
diff --git a/patch.txt b/patch.txt
new file mode 100644
index 0000000..31cc7a6
--- /dev/null
+++ b/patch.txt
@@ -0,0 +1,260 @@
+diff --git a/include/shark/LinAlg/BLAS/matrix_proxy.hpp b/include/shark/LinAlg/BLAS/matrix_proxy.hpp
+index 5e6395e..a97eb82 100644
+--- a/include/shark/LinAlg/BLAS/matrix_proxy.hpp
++++ b/include/shark/LinAlg/BLAS/matrix_proxy.hpp
+@@ -41,7 +41,6 @@ namespace blas {
+ ///\brief Wraps another expression as a reference.
+ template<class M>
+ class matrix_reference:public matrix_expression<matrix_reference<M> > {
+- typedef matrix_reference<M> self_type;
+ public:
+ typedef typename M::size_type size_type;
+ typedef typename M::difference_type difference_type;
+@@ -57,7 +56,7 @@ public:
+ typedef typename index_pointer<M>::type index_pointer;
+
+ typedef matrix_reference<M const> const_closure_type;
+- typedef self_type closure_type;
++ typedef matrix_reference<M> closure_type;
+ typedef typename M::orientation orientation;
+ typedef typename M::storage_category storage_category;
+ typedef elementwise_tag evaluation_category;
+@@ -484,8 +483,6 @@ temporary_proxy< matrix_transpose<M> > trans(temporary_proxy<M> m) {
+
+ template<class M>
+ class matrix_row: public vector_expression<matrix_row<M> > {
+- typedef matrix_row<M> self_type;
+-
+ public:
+ typedef M matrix_type;
+ typedef std::size_t size_type;
+@@ -503,7 +500,7 @@ public:
+
+ typedef typename closure<M>::type matrix_closure_type;
+ typedef matrix_row<typename const_expression<M>::type> const_closure_type;
+- typedef self_type closure_type;
++ typedef matrix_row<M> closure_type;
+ typedef typename M::storage_category storage_category;
+ typedef elementwise_tag evaluation_category;
+
+@@ -701,8 +698,6 @@ temporary_proxy<matrix_row<M> > row(temporary_proxy<M> expression, typename M::i
+
+ template<class M>
+ class matrix_column: public vector_expression<matrix_column<M> > {
+- typedef matrix_column<M> self_type;
+-
+ public:
+ typedef M matrix_type;
+ typedef std::size_t size_type;
+@@ -720,7 +715,7 @@ public:
+
+ typedef typename closure<M>::type matrix_closure_type;
+ typedef matrix_column<typename const_expression<M>::type> const_closure_type;
+- typedef self_type closure_type;
++ typedef matrix_column<M> closure_type;
+ typedef typename M::storage_category storage_category;
+ typedef elementwise_tag evaluation_category;
+
+@@ -919,10 +914,7 @@ temporary_proxy<matrix_column<M> > column(temporary_proxy<M> expression, typenam
+
+ // Matrix based vector range class representing (off-)diagonals of a matrix.
+ template<class M>
+-class matrix_vector_range:
+- public vector_expression<matrix_vector_range<M> > {
+-
+- typedef matrix_vector_range<M> self_type;
++class matrix_vector_range: public vector_expression<matrix_vector_range<M> > {
+ public:
+ typedef M matrix_type;
+ typedef std::size_t size_type;
+@@ -940,7 +932,7 @@ public:
+
+ typedef typename closure<M>::type matrix_closure_type;
+ typedef matrix_vector_range<typename const_expression<M>::type> const_closure_type;
+- typedef self_type closure_type;
++ typedef matrix_vector_range<M> closure_type;
+ typedef typename M::storage_category storage_category;
+ typedef elementwise_tag evaluation_category;
+
+@@ -1094,7 +1086,6 @@ temporary_proxy< matrix_vector_range<M> > diag(temporary_proxy<M> mat){
+ // Matrix based range class
+ template<class M>
+ class matrix_range:public matrix_expression<matrix_range<M> > {
+- typedef matrix_range<M> self_type;
+ public:
+ typedef M matrix_type;
+ typedef std::size_t size_type;
+@@ -1112,7 +1103,7 @@ public:
+
+ typedef typename closure<M>::type matrix_closure_type;
+ typedef matrix_range<typename const_expression<M>::type> const_closure_type;
+- typedef self_type closure_type;
++ typedef matrix_range<M> closure_type;
+ typedef typename M::storage_category storage_category;
+ typedef elementwise_tag evaluation_category;
+ typedef typename M::orientation orientation;
+@@ -1202,11 +1193,11 @@ public:
+
+ // Assignment
+
+- self_type& operator = (self_type const& e) {
+- return assign(*this, typename matrix_temporary<self_type>::type(e));
++ matrix_range& operator = (matrix_range const& e) {
++ return assign(*this, typename matrix_temporary<matrix_range>::type(e));
+ }
+ template<class E>
+- self_type& operator = (matrix_expression<E> const& e) {
++ matrix_range& operator = (matrix_expression<E> const& e) {
+ return assign(*this, typename matrix_temporary<E>::type(e));
+ }
+
+@@ -1421,7 +1412,7 @@ public:
+ , m_stride2(expression.stride2())
+ {}
+
+- /// \brief Constructor of a self_type proxy from a Dense MatrixExpression
++ /// \brief Constructor of a vector proxy from a Dense MatrixExpression
+ ///
+ /// Be aware that the expression must live longer than the proxy!
+ /// \param expression Expression from which to construct the Proxy
+@@ -1438,7 +1429,7 @@ public:
+ ));
+ }
+
+- /// \brief Constructor of a self_type proxy from a Dense MatrixExpression
++ /// \brief Constructor of a vector proxy from a Dense MatrixExpression
+ ///
+ /// Be aware that the expression must live longer than the proxy!
+ /// \param expression Expression from which to construct the Proxy
+@@ -1455,7 +1446,7 @@ public:
+ );
+ }
+
+- /// \brief Constructor of a self_type proxy from a block of memory
++ /// \brief Constructor of a vector proxy from a block of memory
+ /// \param values the block of memory used
+ /// \param size1 size in 1st direction
+ /// \param size2 size in 2nd direction
+diff --git a/include/shark/LinAlg/BLAS/vector_proxy.hpp b/include/shark/LinAlg/BLAS/vector_proxy.hpp
+index 6d58283..39ae8af 100644
+--- a/include/shark/LinAlg/BLAS/vector_proxy.hpp
++++ b/include/shark/LinAlg/BLAS/vector_proxy.hpp
+@@ -42,9 +42,6 @@ namespace blas{
+
+ template<class V>
+ class vector_reference:public vector_expression<vector_reference<V> >{
+-
+- typedef vector_reference<V> self_type;
+- typedef V referred_type;
+ public:
+ typedef typename V::size_type size_type;
+ typedef typename V::difference_type difference_type;
+@@ -59,16 +56,20 @@ public:
+ typedef typename V::const_index_pointer const_index_pointer;
+ typedef typename index_pointer<V>::type index_pointer;
+
+- typedef const self_type const_closure_type;
+- typedef self_type closure_type;
++ typedef vector_reference<V const> const_closure_type;
++ typedef vector_reference<V> closure_type;
+ typedef typename V::storage_category storage_category;
+ typedef elementwise_tag evaluation_category;
+
+- // Construction and destruction
+- vector_reference(referred_type& v):m_expression(&v){}
++ // Construction
++ vector_reference(V& v):m_expression(&v){}
++
++ template<class E>
++ vector_reference(vector_reference<E> const& other)
++ :m_expression(&other.expression()){}
+
+ // Expression accessors
+- referred_type& expression() const{
++ V& expression() const{
+ return *m_expression;
+ }
+
+@@ -171,7 +172,7 @@ public:
+ }
+
+ private:
+- referred_type* m_expression;
++ V* m_expression;
+ };
+
+ /** \brief A vector referencing a continuous subvector of elements of vector \c v containing all elements specified by \c range.
+@@ -185,9 +186,6 @@ private:
+ */
+ template<class V>
+ class vector_range:public vector_expression<vector_range<V> >{
+-
+- typedef vector_range<V> self_type;
+- typedef typename closure<V>::type vector_closure_type;
+ public:
+ typedef typename V::size_type size_type;
+ typedef typename V::difference_type difference_type;
+@@ -202,8 +200,9 @@ public:
+ typedef typename V::const_index_pointer const_index_pointer;
+ typedef typename index_pointer<V>::type index_pointer;
+
+- typedef const self_type const_closure_type;
+- typedef self_type closure_type;
++ typedef typename closure<V>::type vector_closure_type;
++ typedef vector_range<typename const_expression<V>::type> const_closure_type;
++ typedef vector_range<V> closure_type;
+ typedef typename V::storage_category storage_category;
+ typedef elementwise_tag evaluation_category;
+
+@@ -214,6 +213,16 @@ public:
+ RANGE_CHECK(start() + size() <= m_expression.size());
+ }
+
++ //non-const-> const conversion
++ template<class E>
++ vector_range(
++ vector_range<E> const& other,
++ typename boost::disable_if<
++ boost::is_same<E,vector_range>
++ >::type* dummy = 0
++ ):m_expression(other.expression())
++ , m_range(other.range()){}
++
+ // ---------
+ // Internal Accessors
+ // ---------
+@@ -229,6 +238,10 @@ public:
+ return m_expression;
+ }
+
++ blas::range const& range()const{
++ return m_range;
++ }
++
+ /// \brief Return the size of the vector.
+ size_type size() const {
+ return m_range.size();
+@@ -320,7 +333,7 @@ public:
+ }
+ private:
+ vector_closure_type m_expression;
+- range m_range;
++ blas::range m_range;
+ };
+
+ // ------------------
+@@ -343,8 +356,12 @@ temporary_proxy<vector_range<V> > subrange(vector_expression<V>& data, typename
+ * Vector Expression and access to an element outside of index range of the vector is \b undefined.
+ */
+ template<class V>
+-vector_range<V const> subrange(vector_expression<V> const& data, typename V::size_type start, typename V::size_type stop){
+- return vector_range<V const> (data(), range(start, stop));
++vector_range<typename const_expression<V>::type > subrange(
++ vector_expression<V> const& data,
++ typename V::size_type start,
++ typename V::size_type stop
++){
++ return vector_range<typename const_expression<V>::type> (data(), range(start, stop));
+ }
+
+ template<class V>
diff --git a/src/Algorithms/FisherLDA.cpp b/src/Algorithms/FisherLDA.cpp
index e933382..ad57737 100644
--- a/src/Algorithms/FisherLDA.cpp
+++ b/src/Algorithms/FisherLDA.cpp
@@ -66,9 +66,7 @@ void FisherLDA::train(LinearModel<>& model, LabeledData<RealVector, unsigned int
//reduce the size of the covariance matrix the the needed
//subspace
RealMatrix subspaceDirections = trans(columns(eigenvectors,0,nComp));
- RealVector offset(inputDim,0.0);
- axpy_prod(subspaceDirections, mean,offset);
- offset*=-1;
+ RealVector offset = -prod(subspaceDirections, mean);
// write the parameters into the model
model.setStructure(subspaceDirections, offset);
diff --git a/src/Algorithms/PCA.cpp b/src/Algorithms/PCA.cpp
index dd90f00..81946e2 100644
--- a/src/Algorithms/PCA.cpp
+++ b/src/Algorithms/PCA.cpp
@@ -116,8 +116,7 @@ void PCA::encoder(LinearModel<>& model, std::size_t m) {
if(!m) m = std::min(m_n,m_l);
RealMatrix A = trans(columns(m_eigenvectors, 0, m) );
- RealVector offset(A.size1(),0.0);
- axpy_prod(A, m_mean, offset, false, -1.0);
+ RealVector offset = -prod(A, m_mean);
if(m_whitening){
for(std::size_t i=0; i<A.size1(); i++) {
//take care of numerical difficulties for very small eigenvalues.
diff --git a/src/Models/OnlineRNNet.cpp b/src/Models/OnlineRNNet.cpp
index 8de93ce..240ca4d 100644
--- a/src/Models/OnlineRNNet.cpp
+++ b/src/Models/OnlineRNNet.cpp
@@ -65,10 +65,9 @@ void OnlineRNNet::eval(RealMatrix const& pattern, RealMatrix& output){
//activation of the hidden neurons is now just a matrix vector multiplication
- axpy_prod(
+ noalias(subrange(m_activation,inputSize()+1,numUnits)) = prod(
mpe_structure->weights(),
- m_lastActivation,
- subrange(m_activation,inputSize()+1,numUnits)
+ m_lastActivation
);
//now apply the sigmoid function
@@ -135,10 +134,9 @@ void OnlineRNNet::weightedParameterDerivative(RealMatrix const& pattern, const R
noalias(row(m_unitGradient,i))= element_prod(row(m_unitGradient,i),neuronDerivatives);
}
//and formula 4 (the gradient itself)
- axpy_prod(
+ noalias(gradient) = prod(
columns(m_unitGradient,numNeurons-outputSize(),numNeurons),
- row(coefficients,0),
- gradient
+ row(coefficients,0)
);
//sanity check
SIZE_CHECK(param == mpe_structure->parameters());
diff --git a/src/Models/RNNet.cpp b/src/Models/RNNet.cpp
index 4db6faa..af9a381 100644
--- a/src/Models/RNNet.cpp
+++ b/src/Models/RNNet.cpp
@@ -62,10 +62,9 @@ void RNNet::eval(BatchInputType const& patterns, BatchOutputType& outputs, State
sequence[t-1](mpe_structure->bias())=1;
//activation of the hidden neurons is now just a matrix vector multiplication
- axpy_prod(
+ noalias(subrange(sequence[t],inputSize()+1,numUnits)) = prod(
mpe_structure->weights(),
- sequence[t-1],
- subrange(sequence[t],inputSize()+1,numUnits)
+ sequence[t-1]
);
//now apply the sigmoid function
for (std::size_t i = inputSize()+1;i != numUnits;i++)
@@ -107,11 +106,9 @@ void RNNet::weightedParameterDerivative(
double derivative = mpe_structure->neuronDerivative(sequence[t](j+mpe_structure->inputs()+1));
errorDerivative(t,j)*=derivative;
}
- axpy_prod(
+ noalias(row(errorDerivative,t-1)) += prod(
trans(columns(mpe_structure->weights(), inputSize()+1,numUnits)),
- row(errorDerivative,t),
- row(errorDerivative,t-1),
- false
+ row(errorDerivative,t)
);
}
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/shark.git
More information about the debian-science-commits
mailing list