[opengm] 245/386: Merge branch 'master' of https://bitbucket.org/jkappes/opengm-learning into maximum_likelihood
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:37:59 UTC 2016
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch debian/master
in repository opengm.
commit 81824ef2c248c0d1eaa5188b9f2af93f6f1f8233
Merge: e82a0b0 e9e0209
Author: Janez Ales <janez.ales at iwr.uni-heidelberg.de>
Date: Fri Jan 16 09:39:10 2015 +0100
Merge branch 'master' of https://bitbucket.org/jkappes/opengm-learning into maximum_likelihood
Conflicts:
include/opengm/learning/maximum_likelihood_learning.hxx
CMakeLists.txt | 3 +
fubar/real_example_2.py | 6 +-
include/opengm/functions/view_convert_function.hxx | 36 +++++--
.../learning/maximum_likelihood_learning.hxx | 50 +++++++--
include/opengm/learning/subgradient_ssvm.hxx | 26 ++++-
.../python/opengm/learning/CMakeLists.txt | 10 +-
.../python/opengm/learning/pyWeights.cxx | 1 +
src/interfaces/python/test.py | 115 ++++++++++++++++++++-
src/unittest/learning/CMakeLists.txt | 6 +-
src/unittest/learning/test_learning.cxx | 15 +--
src/unittest/learning/test_subgradient_ssvm.cxx | 4 +-
11 files changed, 233 insertions(+), 39 deletions(-)
diff --cc include/opengm/learning/maximum_likelihood_learning.hxx
index b7191ed,9c609aa..9539b32
--- a/include/opengm/learning/maximum_likelihood_learning.hxx
+++ b/include/opengm/learning/maximum_likelihood_learning.hxx
@@@ -25,20 -25,10 +25,27 @@@ namespace opengm
class Parameter{
public:
- size_t maxNumSteps_;
- double reg_;
- double temperature_;
- Parameter() : maxNumSteps_(10), reg_(1.0), temperature_(0.3) {;}
+ IndexType maximumNumberOfIterations_;
+ ValueType gradientStep_;
+ ValueType weightAccuracy_;
+ ValueType gradientStoppingCriteria_;
+ bool infoFlag_;
+ bool infoEveryStep_;
++
++ size_t maxNumSteps_;
++ double reg_;
++ double temperature_;
+ Parameter():
+ maximumNumberOfIterations_(123),
+ gradientStep_(0.123),
+ weightAccuracy_(0.0000123),
+ gradientStoppingCriteria_(0.0000000123),
+ infoFlag_(true),
- infoEveryStep_(false)
++ infoEveryStep_(false),
++ maxNumSteps_(10),
++ reg_(1.0),
++ temperature_(0.3)
+ {;}
};
class WeightGradientFunctor{
@@@ -53,18 -43,17 +60,16 @@@
std::vector<LabelType> labelVector(marg_->numberOfVariables());
for(size_t i=0; i<marg_->numberOfVariables(); ++i)
labelVector[i] = dataset_.getGT(modelID_)[marg_->variableIndex(i)];
- for(size_t i=0; i<function.numberOfWeights();++i){ // function-wise local weight index
- size_t wID = function.weightIndex(i); // global weight index
- //gradient_[wID] -= function.weightGradient(wID, labelVector.begin()); // <-----
- gradient_[wID] -= function.weightGradient(i, labelVector.begin()); //
+ for(size_t i=0; i<function.numberOfWeights();++i){
- size_t wID = function.weightIndex(i);
- gradient_[wID] -= function.weightGradient(wID, labelVector.begin());
++ size_t wID = function.weightIndex(i);
++ gradient_[wID] -= function.weightGradient(i, labelVector.begin());
}
opengm::ShapeWalker<typename F::FunctionShapeIteratorType> shapeWalker(function.functionShapeBegin(), function.dimension());
for(size_t i=0;i<function.size();++i, ++shapeWalker) {
- for(size_t i=0; i<function.numberOfWeights();++i){ // function-wise local weight index
+ for(size_t i=0; i<function.numberOfWeights();++i){
size_t wID = function.weightIndex(i);
- //gradient_[wID] += (*marg_)(shapeWalker.coordinateTuple().begin()) * function.weightGradient(wID, shapeWalker.coordinateTuple().begin() ); // <-----
- //std::cout<<"m "<<(*marg_)(shapeWalker.coordinateTuple().begin())<<"\n";
- gradient_[wID] += (*marg_)(shapeWalker.coordinateTuple().begin()) * function.weightGradient(wID, shapeWalker.coordinateTuple().begin() );
+ gradient_[wID] += (*marg_)(shapeWalker.coordinateTuple().begin()) * function.weightGradient(i, shapeWalker.coordinateTuple().begin() );
}
}
}
@@@ -110,37 -96,21 +115,35 @@@
typedef MessagePassing<GmBpType, opengm::Integrator, UpdateRules, opengm::MaxDistance> BeliefPropagation;
bool search = true;
+ double invTemperature = 1.0/param_.temperature_;
- //Parameters for inference
- const IndexType maxNumberOfIterations = 40;
- const double convergenceBound = 1e-7;
- const double damping = 0.5;
- typename BeliefPropagation::Parameter infParam(maxNumberOfIterations, convergenceBound, damping);
-
std::cout << std::endl;
- double eta = 1;
- size_t count = 0;
+ if(param_.infoFlag_){
+ std::cout << "INFO: Maximum Likelihood Learner: Maximum Number Of Iterations "<< param_.maximumNumberOfIterations_ << std::endl;
+ std::cout << "INFO: Maximum Likelihood Learner: Gradient Step "<< param_.gradientStep_ << std::endl;
+ std::cout << "INFO: Maximum Likelihood Learner: Weight Accuracy "<< param_.weightAccuracy_ << std::endl;
+ std::cout << "INFO: Maximum Likelihood Learner: Gradient Stopping Criteria "<<param_. gradientStoppingCriteria_ << std::endl;
+ std::cout << "INFO: Maximum Likelihood Learner: Info Flag "<< param_.infoFlag_ << std::endl;
+ std::cout << "INFO: Maximum Likelihood Learner: Info Every Step "<< param_.infoEveryStep_ << std::endl;
+ }
+
+ //Parameters for inference
- //const IndexType maxNumberOfIterations = 40;
- //const double convergenceBound = 1e-7;
- //const double damping = 0.5;
+ const IndexType maxNumberOfBPIterations = infParametersBP.maximumNumberOfSteps_; //40
+ const double convergenceBound = infParametersBP.bound_; //1e-7;
+ const double damping = infParametersBP.damping_; //0.5;
+
+ if(param_.infoFlag_){
+ std::cout << "INFO: Belief Propagation: Maximum Number Of Belief Propagation Iterations "<< maxNumberOfBPIterations << std::endl;
+ std::cout << "INFO: Belief Propagation: Convergence Bound "<< convergenceBound << std::endl;
+ std::cout << "INFO: Belief Propagation: Damping "<< damping << std::endl;
+ }
+ typename BeliefPropagation::Parameter infParam(maxNumberOfBPIterations, convergenceBound, damping);
+
+ size_t iterationCount = 0;
while(search){
- if(count>=param_.maxNumSteps_) break;
- ++count;
- std::cout << "\r Progress : " << count << "/"<<param_.maxNumSteps_ <<" iteration 0/"<< dataset_.getNumberOfModels() << " models "<< std::flush;
+ if(iterationCount>=param_.maximumNumberOfIterations_) break;
+ ++iterationCount;
+ std::cout << "\r Progress : " << iterationCount << "/"<<param_.maximumNumberOfIterations_ <<" iteration 0/"<< dataset_.getNumberOfModels() << " models "<< std::flush;
typename GMType::IndependentFactorType marg;
WeightGradientFunctor wgf(dataset_);
@@@ -176,19 -146,27 +179,44 @@@
//*****************************
//** Gradient Step
+ //************************
++ /*
+ if(param_.infoFlag_)
+ std::cout << " Best weights: ";
+ for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
+ dataset_.getWeights().setWeight(p, weights_.getWeight(p) + param_.gradientStep_ * wgf.getGradient(p));
+ weights_.setWeight(p, weights_.getWeight(p) + param_.gradientStep_ * wgf.getGradient(p));
+ if(param_.infoFlag_)
+ std::cout << weights_.getWeight(p) << " ";
+ }
+ if(param_.infoFlag_)
+ std::cout << std::endl;
++ */
++
+ //*****************************
+ double norm = 0;
+ for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
+ norm += (wgf.getGradient(p)-2*param_.reg_*weights_.getWeight(p)) * (wgf.getGradient(p)-2*param_.reg_*weights_.getWeight(p));
+ }
+ norm = std::sqrt(norm);
+
- std::cout << "gradient = ( ";
- for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
- std::cout << (wgf.getGradient(p)-2*param_.reg_*weights_.getWeight(p))/norm << " ";
- dataset_.getWeights().setWeight(p, weights_.getWeight(p) + eta/count * (wgf.getGradient(p)-2*param_.reg_*weights_.getWeight(p))/norm);
- weights_.setWeight(p, weights_.getWeight(p) + eta/count * (wgf.getGradient(p)-2*param_.reg_*weights_.getWeight(p))/norm);
- }
- std::cout << ")"<<std::endl;
- std::cout << "weight = ( ";
++ if(param_.infoFlag_)
++ std::cout << "gradient = ( ";
+ for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p){
- std::cout << weights_.getWeight(p) << " ";
++ if(param_.infoFlag_)
++ std::cout << (wgf.getGradient(p)-2*param_.reg_*weights_.getWeight(p))/norm << " ";
++ dataset_.getWeights().setWeight(p, weights_.getWeight(p) + param_.gradientStep_/iterationCount * (wgf.getGradient(p)-2*param_.reg_*weights_.getWeight(p))/norm);
++ weights_.setWeight(p, weights_.getWeight(p) + param_.gradientStep_/iterationCount * (wgf.getGradient(p)-2*param_.reg_*weights_.getWeight(p))/norm);
+ }
- std::cout << ")"<<std::endl;
++ if(param_.infoFlag_){
++ std::cout << ") ";
++ std::cout << " weight = ( ";
++ for(IndexType p=0; p<dataset_.getNumberOfWeights(); ++p)
++ std::cout << weights_.getWeight(p) << " ";
++ std::cout << ")"<<std::endl;
++ }
}
- std::cout << "\r Stoped after "<< count << "/"<<param_.maxNumSteps_<< " iterations. " <<std::endl;
+ std::cout << "\r Stoped after "<< iterationCount << "/" << param_.maximumNumberOfIterations_<< " iterations. " <<std::endl;
}
}
}
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git
More information about the debian-science-commits
mailing list