[opengm] 368/386: merge changes on inference methods from learning branch
Ghislain Vaillant
ghisvail-guest at moszumanska.debian.org
Wed Aug 31 08:38:35 UTC 2016
This is an automated email from the git hooks/post-receive script.
ghisvail-guest pushed a commit to branch debian/master
in repository opengm.
commit d18e758328cb2304ab0d4660d32cafc909a8ff1f
Author: Joerg Kappes <kappes at math.uni-heidelberg.de>
Date: Wed Jul 27 22:06:28 2016 +0200
merge changes on inference methods from learning branch
---
include/opengm/inference/alphabetaswap.hxx | 33 ++++-
include/opengm/inference/alphaexpansion.hxx | 85 +++++++----
include/opengm/inference/alphaexpansionfusion.hxx | 75 +++++++---
include/opengm/inference/astar.hxx | 139 ++++++++++--------
.../auxiliary/fusion_move/fusion_mover.hxx | 27 ++++
.../fusion_move/permutable_label_fusion_mover.hxx | 4 +-
.../auxiliary/lp_solver/lp_solver_interface.hxx | 12 +-
.../opengm/inference/auxiliary/minstcutboost.hxx | 2 +-
include/opengm/inference/bruteforce.hxx | 21 ++-
include/opengm/inference/combilp.hxx | 11 ++
include/opengm/inference/dmc.hxx | 3 +-
.../dualdecomposition/dualdecomposition_bundle.hxx | 28 ++++
.../dualdecomposition_subgradient.hxx | 37 ++++-
include/opengm/inference/dynamicprogramming.hxx | 20 +++
include/opengm/inference/external/ad3.hxx | 23 +++
include/opengm/inference/external/daoopt.hxx | 26 ++++
include/opengm/inference/external/fastPD.hxx | 16 ++
include/opengm/inference/external/mrflib.hxx | 15 ++
include/opengm/inference/external/qpbo.hxx | 27 +++-
include/opengm/inference/external/trws.hxx | 38 ++++-
include/opengm/inference/fusion_based_inf.hxx | 23 ++-
include/opengm/inference/graphcut.hxx | 29 +++-
include/opengm/inference/greedygremlin.hxx | 19 ++-
include/opengm/inference/hqpbo.hxx | 45 ++++--
include/opengm/inference/icm.hxx | 63 ++++----
include/opengm/inference/infandflip.hxx | 23 +++
include/opengm/inference/inference.hxx | 9 ++
.../opengm/inference/intersection_based_inf.hxx | 2 +-
include/opengm/inference/lazyflipper.hxx | 161 ++++++++++++---------
include/opengm/inference/loc.hxx | 29 ++++
include/opengm/inference/lp_inference_base.hxx | 1 -
include/opengm/inference/lpcplex.hxx | 96 +++++++++---
include/opengm/inference/lpcplex2.hxx | 10 ++
include/opengm/inference/lpgurobi.hxx | 80 +++++++---
include/opengm/inference/lpgurobi2.hxx | 10 ++
include/opengm/inference/lsatr.hxx | 26 +++-
.../inference/messagepassing/messagepassing.hxx | 30 ++++
.../inference/messagepassing/messagepassing_bp.hxx | 11 ++
.../messagepassing/messagepassing_trbp.hxx | 9 ++
include/opengm/inference/movemaker.hxx | 8 +
include/opengm/inference/mqpbo.hxx | 31 +++-
include/opengm/inference/multicut.hxx | 39 ++++-
include/opengm/inference/partition-move.hxx | 15 +-
include/opengm/inference/qpbo.hxx | 17 ++-
include/opengm/inference/reducedinference.hxx | 150 +++++++++++--------
include/opengm/inference/sat.hxx | 18 ++-
include/opengm/inference/self_fusion.hxx | 47 +++++-
include/opengm/opengm.hxx | 33 ++++-
include/opengm/utilities/shape_accessor.hxx | 78 +++++++++-
src/tutorials/c++/basics/doSumProdInference.cxx | 2 +-
src/unittest/inference/test_graphcut.cxx | 2 +-
src/unittest/inference/test_lazyflipper.cxx | 2 +-
src/unittest/inference/test_messagepassing.cxx | 20 +--
53 files changed, 1387 insertions(+), 393 deletions(-)
diff --git a/include/opengm/inference/alphabetaswap.hxx b/include/opengm/inference/alphabetaswap.hxx
index c32461b..9a21537 100644
--- a/include/opengm/inference/alphabetaswap.hxx
+++ b/include/opengm/inference/alphabetaswap.hxx
@@ -22,14 +22,33 @@ public:
typedef opengm::visitors::EmptyVisitor<AlphaBetaSwap<GM,INF> > EmptyVisitorType;
typedef opengm::visitors::TimingVisitor<AlphaBetaSwap<GM,INF> > TimingVisitorType;
- struct Parameter {
- Parameter() {
- maxNumberOfIterations_ = 1000;
- }
- typename InferenceType::Parameter parameter_;
- size_t maxNumberOfIterations_;
- };
+ template<class _GM>
+ struct RebindGm{
+ typedef typename INF:: template RebindGm<_GM>::type RebindedInf;
+ typedef AlphaBetaSwap<_GM, RebindedInf> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef typename INF:: template RebindGmAndAcc<_GM,_ACC>::type RebindedInf;
+ typedef AlphaBetaSwap<_GM, RebindedInf> type;
+ };
+
+
+ struct Parameter {
+ Parameter() {
+ maxNumberOfIterations_ = 1000;
+ }
+ template<class P>
+ Parameter(const P & p)
+ : parameter_(p.parameter_),
+ maxNumberOfIterations_(maxNumberOfIterations_){
+ }
+
+ typename InferenceType::Parameter parameter_;
+ size_t maxNumberOfIterations_;
+ };
AlphaBetaSwap(const GraphicalModelType&, Parameter = Parameter());
std::string name() const;
diff --git a/include/opengm/inference/alphaexpansion.hxx b/include/opengm/inference/alphaexpansion.hxx
index a1b9d04..d5b21e3 100644
--- a/include/opengm/inference/alphaexpansion.hxx
+++ b/include/opengm/inference/alphaexpansion.hxx
@@ -22,35 +22,62 @@ public:
typedef visitors::EmptyVisitor<AlphaExpansion<GM,INF> > EmptyVisitorType;
typedef visitors::TimingVisitor<AlphaExpansion<GM,INF> > TimingVisitorType;
- struct Parameter {
- typedef typename InferenceType::Parameter InferenceParameter;
- enum LabelingIntitialType {DEFAULT_LABEL, RANDOM_LABEL, LOCALOPT_LABEL, EXPLICIT_LABEL};
- enum OrderType {DEFAULT_ORDER, RANDOM_ORDER, EXPLICIT_ORDER};
-
- Parameter
- (
- const size_t maxNumberOfSteps = 1000,
- const InferenceParameter& para = InferenceParameter()
- )
- : parameter_(para),
- maxNumberOfSteps_(maxNumberOfSteps),
- labelInitialType_(DEFAULT_LABEL),
- orderType_(DEFAULT_ORDER),
- randSeedOrder_(0),
- randSeedLabel_(0),
- labelOrder_(),
- label_()
- {}
-
- InferenceParameter parameter_;
- size_t maxNumberOfSteps_;
- LabelingIntitialType labelInitialType_;
- OrderType orderType_;
- unsigned int randSeedOrder_;
- unsigned int randSeedLabel_;
- std::vector<LabelType> labelOrder_;
- std::vector<LabelType> label_;
- };
+ template<class _GM>
+ struct RebindGm{
+ typedef typename INF:: template RebindGm<_GM>::type RebindedInf;
+ typedef AlphaExpansion<_GM, RebindedInf> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef typename INF:: template RebindGmAndAcc<_GM,_ACC>::type RebindedInf;
+ typedef AlphaExpansion<_GM, RebindedInf> type;
+ };
+
+ struct Parameter {
+ typedef typename InferenceType::Parameter InferenceParameter;
+ enum LabelingIntitialType {DEFAULT_LABEL, RANDOM_LABEL, LOCALOPT_LABEL, EXPLICIT_LABEL};
+ enum OrderType {DEFAULT_ORDER, RANDOM_ORDER, EXPLICIT_ORDER};
+
+ Parameter
+ (
+ const size_t maxNumberOfSteps = 1000,
+ const InferenceParameter& para = InferenceParameter()
+ )
+ : parameter_(para),
+ maxNumberOfSteps_(maxNumberOfSteps),
+ labelInitialType_(DEFAULT_LABEL),
+ orderType_(DEFAULT_ORDER),
+ randSeedOrder_(0),
+ randSeedLabel_(0),
+ labelOrder_(),
+ label_()
+ {}
+
+ template<class P>
+ Parameter
+ (
+ const P & p
+ )
+ : parameter_(p.parameter_),
+ maxNumberOfSteps_(p.maxNumberOfSteps_),
+ labelInitialType_(p.labelInitialType_),
+ orderType_(p.orderType_),
+ randSeedOrder_(p.randSeedOrder_),
+ randSeedLabel_(p.randSeedLabel_),
+ labelOrder_(p.labelOrder_),
+ label_(p.labelOrder_)
+ {}
+
+ InferenceParameter parameter_;
+ size_t maxNumberOfSteps_;
+ LabelingIntitialType labelInitialType_;
+ OrderType orderType_;
+ unsigned int randSeedOrder_;
+ unsigned int randSeedLabel_;
+ std::vector<LabelType> labelOrder_;
+ std::vector<LabelType> label_;
+ };
AlphaExpansion(const GraphicalModelType&, Parameter para = Parameter());
diff --git a/include/opengm/inference/alphaexpansionfusion.hxx b/include/opengm/inference/alphaexpansionfusion.hxx
index 81f7e6a..9c26741 100644
--- a/include/opengm/inference/alphaexpansionfusion.hxx
+++ b/include/opengm/inference/alphaexpansionfusion.hxx
@@ -27,30 +27,57 @@ public:
typedef visitors::EmptyVisitor<AlphaExpansionFusion<GM,ACC> > EmptyVisitorType;
typedef visitors::TimingVisitor<AlphaExpansionFusion<GM,ACC> > TimingVisitorType;
- struct Parameter {
- enum LabelingIntitialType {DEFAULT_LABEL, RANDOM_LABEL, LOCALOPT_LABEL, EXPLICIT_LABEL};
- enum OrderType {DEFAULT_ORDER, RANDOM_ORDER, EXPLICIT_ORDER};
-
- Parameter
- (
- const size_t maxNumberOfSteps = 1000
- )
- : maxNumberOfSteps_(maxNumberOfSteps),
- labelInitialType_(DEFAULT_LABEL),
- orderType_(DEFAULT_ORDER),
- randSeedOrder_(0),
- randSeedLabel_(0),
- labelOrder_(),
- label_()
- {}
-
- size_t maxNumberOfSteps_;
- LabelingIntitialType labelInitialType_;
- OrderType orderType_;
- unsigned int randSeedOrder_;
- unsigned int randSeedLabel_;
- std::vector<LabelType> labelOrder_;
- std::vector<LabelType> label_;
+ template<class _GM>
+ struct RebindGm{
+ typedef AlphaExpansionFusion<_GM, ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef AlphaExpansionFusion<_GM, _ACC> type;
+ };
+
+ struct Parameter {
+ enum LabelingIntitialType {DEFAULT_LABEL, RANDOM_LABEL,
+ LOCALOPT_LABEL, EXPLICIT_LABEL};
+ enum OrderType {DEFAULT_ORDER, RANDOM_ORDER,
+ EXPLICIT_ORDER};
+
+
+ Parameter
+ (
+ const size_t maxNumberOfSteps = 1000
+ )
+ : maxNumberOfSteps_(maxNumberOfSteps),
+ labelInitialType_(DEFAULT_LABEL),
+ orderType_(DEFAULT_ORDER),
+ randSeedOrder_(0),
+ randSeedLabel_(0),
+ labelOrder_(),
+ label_()
+ {}
+
+ template<class P>
+ Parameter
+ (
+ const P & p
+ )
+ : maxNumberOfSteps_(p.maxNumberOfSteps_),
+ labelInitialType_(p.labelInitialType_),
+ orderType_(p.orderType_),
+ randSeedOrder_(p.randSeedOrder_),
+ randSeedLabel_(p.randSeedLabel_),
+ labelOrder_(p.labelOrder_),
+ label_(p.labelOrder_)
+ {}
+
+ size_t maxNumberOfSteps_;
+ LabelingIntitialType labelInitialType_;
+ OrderType orderType_;
+ unsigned int randSeedOrder_;
+ unsigned int randSeedLabel_;
+ std::vector<LabelType> labelOrder_;
+ std::vector<LabelType> label_;
};
AlphaExpansionFusion(const GraphicalModelType&, Parameter para = Parameter());
diff --git a/include/opengm/inference/astar.hxx b/include/opengm/inference/astar.hxx
index 1084d1e..d1a19e6 100644
--- a/include/opengm/inference/astar.hxx
+++ b/include/opengm/inference/astar.hxx
@@ -66,7 +66,6 @@ namespace opengm {
public:
///graphical model type
typedef GM GraphicalModelType;
- // -- obsolet -- typedef typename GraphicalModelType::template Rebind<true>::RebindType EditableGraphicalModelType;
///accumulation type
typedef ACC AccumulationType;
OPENGM_GM_TYPE_TYPEDEFS;
@@ -79,59 +78,81 @@ namespace opengm {
typedef opengm::visitors::TimingVisitor<AStar<GM, ACC> > TimingVisitorType;
typedef opengm::visitors::EmptyVisitor<AStar<GM, ACC> > EmptyVisitorType;
- enum Heuristic{
- DEFAULT_HEURISTIC = 0,
- FAST_HEURISTIC = 1,
- STANDARD_HEURISTIC = 2
- };
- struct Parameter {
- Parameter()
- {
- maxHeapSize_ = 3000000;
- numberOfOpt_ = 1;
- objectiveBound_ = AccumulationType::template neutral<ValueType>();
- heuristic_ = Parameter::DEFAULTHEURISTIC;
- };
- /// constuctor
-
- /// \brief add tree factor id
- /// \param id factor id
- void addTreeFactorId(size_t id)
- { treeFactorIds_.push_back(id); }
- /// DEFAULTHEURISTIC ;
- static const size_t DEFAULTHEURISTIC = 0;
- /// FASTHEURISTIC
- static const size_t FASTHEURISTIC = 1;
- /// STANDARDHEURISTIC
- static const size_t STANDARDHEURISTIC = 2;
- /// maxHeapSize_ maximum size of the heap
- size_t maxHeapSize_;
- /// number od N-best solutions that should be found
- size_t numberOfOpt_;
- /// objective bound
- ValueType objectiveBound_;
- /// heuritstic
- ///
- /// DEFAULTHEURISTIC = 0;
- /// FASTHEURISTIC = 1
- /// STANDARDHEURISTIC = 2
- size_t heuristic_;
- std::vector<IndexType> nodeOrder_;
- std::vector<size_t> treeFactorIds_;
-
- };
- AStar(const GM& gm, Parameter para = Parameter());
- virtual std::string name() const {return "AStar";}
- const GraphicalModelType& graphicalModel() const;
- virtual InferenceTermination infer();
- virtual void reset();
- template<class VisitorType> InferenceTermination infer(VisitorType& vistitor);
- ValueType bound()const {return belowBound_;}
- ValueType value()const;
- virtual InferenceTermination marginal(const size_t,IndependentFactorType& out)const {return UNKNOWN;}
- virtual InferenceTermination factorMarginal(const size_t, IndependentFactorType& out)const {return UNKNOWN;}
- virtual InferenceTermination arg(std::vector<LabelType>& v, const size_t = 1)const;
- virtual InferenceTermination args(std::vector< std::vector<LabelType> >& v)const;
+
+ template<class _GM>
+ struct RebindGm{
+ typedef AStar<_GM, ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef AStar<_GM, _ACC > type;
+ };
+
+
+ enum Heuristic{
+ DEFAULT_HEURISTIC = 0,
+ FAST_HEURISTIC = 1,
+ STANDARD_HEURISTIC = 2
+ };
+
+ struct Parameter {
+ Parameter()
+ {
+ maxHeapSize_ = 3000000;
+ numberOfOpt_ = 1;
+ objectiveBound_ = AccumulationType::template neutral<ValueType>();
+ heuristic_ = Parameter::DEFAULTHEURISTIC;
+ };
+
+ template<class P>
+ Parameter(const P & p )
+ : maxHeapSize_(p.maxHeapSize_),
+ numberOfOpt_(p.numberOfOpt_),
+ objectiveBound_(p.objectiveBound_),
+ nodeOrder_(p.nodeOrder_),
+ treeFactorIds_(p.treeFactorIds_){
+ }
+
+ /// \brief add tree factor id
+ /// \param id factor id
+ void addTreeFactorId(size_t id)
+ { treeFactorIds_.push_back(id); }
+ /// DEFAULTHEURISTIC ;
+ static const size_t DEFAULTHEURISTIC = 0;
+ /// FASTHEURISTIC
+ static const size_t FASTHEURISTIC = 1;
+ /// STANDARDHEURISTIC
+ static const size_t STANDARDHEURISTIC = 2;
+ /// maxHeapSize_ maximum size of the heap
+ size_t maxHeapSize_;
+ /// number od N-best solutions that should be found
+ size_t numberOfOpt_;
+ /// objective bound
+ ValueType objectiveBound_;
+ /// heuritstic
+ ///
+ /// DEFAULTHEURISTIC = 0;
+ /// FASTHEURISTIC = 1
+ /// STANDARDHEURISTIC = 2
+ size_t heuristic_;
+ std::vector<IndexType> nodeOrder_;
+ std::vector<size_t> treeFactorIds_;
+
+ };
+
+ AStar(const GM& gm, Parameter para = Parameter());
+ virtual std::string name() const {return "AStar";}
+ const GraphicalModelType& graphicalModel() const;
+ virtual InferenceTermination infer();
+ virtual void reset();
+ template<class VisitorType> InferenceTermination infer(VisitorType& vistitor);
+ ValueType bound()const {return belowBound_;}
+ ValueType value()const;
+ virtual InferenceTermination marginal(const size_t,IndependentFactorType& out)const {return UNKNOWN;}
+ virtual InferenceTermination factorMarginal(const size_t, IndependentFactorType& out)const {return UNKNOWN;}
+ virtual InferenceTermination arg(std::vector<LabelType>& v, const size_t = 1)const;
+ virtual InferenceTermination args(std::vector< std::vector<LabelType> >& v)const;
private:
const GM& gm_;
@@ -532,7 +553,7 @@ namespace opengm {
OPENGM_ASSERT(numStates_[index] == nodeEnergy[index].size());
for(size_t j=0;j<numStates_[index];++j) {
//nodeEnergy[index][j] = operation(f(j),nodeEnergy[index][j]);
- LabelType coordinates[]={j};
+ LabelType coordinates[]={static_cast<LabelType>(j)};
OperatorType::op(f(coordinates),nodeEnergy[index][j]);
}
}
@@ -582,7 +603,7 @@ namespace opengm {
else{
for(size_t j=0;j<numStates_[index1];++j) {
//nodeEnergy[index1][j] = operation(optimizedFactor_[i](j), nodeEnergy[index1][j]);
- LabelType coordinates[]={j};
+ LabelType coordinates[]={static_cast<LabelType>(j)};
OperatorType::op(optimizedFactor_[i](coordinates), nodeEnergy[index1][j]);
}
}
@@ -601,7 +622,7 @@ namespace opengm {
else{
for(size_t j=0;j<numStates_[f.variableIndex(0)];++j) {
//nodeEnergy[f.variableIndex(0)][j] = operation(optimizedFactor_[i](j), nodeEnergy[f.variableIndex(0)][j]);
- LabelType coordinates[]={j};
+ LabelType coordinates[]={static_cast<LabelType>(j)};
OperatorType::op(optimizedFactor_[i](coordinates), nodeEnergy[f.variableIndex(0)][j]);
}
}
@@ -627,7 +648,7 @@ namespace opengm {
ACC::neutral(min);
OPENGM_ASSERT(numStates_[index1] == nodeEnergy[index1].size());
for(size_t j1=0;j1<numStates_[index1];++j1) {
- LabelType coordinates[]={j1,j2};
+ LabelType coordinates[]={static_cast<LabelType>(j1),static_cast<LabelType>(j2)};
OperatorType::op(f(coordinates),nodeEnergy[index1][j1],temp);
ACC::op(min,temp,min);
}
@@ -646,7 +667,7 @@ namespace opengm {
ACC::neutral(min);
OPENGM_ASSERT(numStates_[index2] == nodeEnergy[index2].size());
for(size_t j2=0;j2<numStates_[index2];++j2) {
- LabelType coordinates[]={j1,j2};
+ LabelType coordinates[]={static_cast<LabelType>(j1),static_cast<LabelType>(j2)};
OperatorType::op(f(coordinates),nodeEnergy[index2][j2],temp);
ACC::op(min,temp,min);
//if(min>f(j1,j2)*node_energy[index2][j2]) min=f(j1,j2)*node_energy[index2][j2];
diff --git a/include/opengm/inference/auxiliary/fusion_move/fusion_mover.hxx b/include/opengm/inference/auxiliary/fusion_move/fusion_mover.hxx
index ad46fd6..0d0f10d 100644
--- a/include/opengm/inference/auxiliary/fusion_move/fusion_mover.hxx
+++ b/include/opengm/inference/auxiliary/fusion_move/fusion_mover.hxx
@@ -405,6 +405,18 @@ template<class GM, class ACC>
class HlFusionMover{
public:
+
+ template<class _GM>
+ struct RebindGm{
+ typedef HlFusionMover<_GM, ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef HlFusionMover<_GM, _ACC> type;
+ };
+
+
typedef GM GraphicalModelType;
typedef ACC AccumulationType;
OPENGM_GM_TYPE_TYPEDEFS;
@@ -455,6 +467,21 @@ public:
{
}
+
+ template<class P>
+ Parameter(const P & p)
+ :
+ fusionSolver_(p.fusionSolver_),
+ maxSubgraphSize_(p.maxSubgraphSize_),
+ reducedInf_(p.reducedInf_),
+ connectedComponents_(p.connectedComponents_),
+ tentacles_(p.tentacles_),
+ fusionTimeLimit_(p.fusionTimeLimit_)
+ {
+
+ }
+
+
FusionSolver fusionSolver_;
size_t maxSubgraphSize_;
bool reducedInf_;
diff --git a/include/opengm/inference/auxiliary/fusion_move/permutable_label_fusion_mover.hxx b/include/opengm/inference/auxiliary/fusion_move/permutable_label_fusion_mover.hxx
index b422a94..4da36b6 100644
--- a/include/opengm/inference/auxiliary/fusion_move/permutable_label_fusion_mover.hxx
+++ b/include/opengm/inference/auxiliary/fusion_move/permutable_label_fusion_mover.hxx
@@ -486,6 +486,8 @@ public:
valRes=valB;
res = b;
}
+ assert(false); // FIXME: the return of this function was missing, just added something arbitrary
+ return false;
}
@@ -758,7 +760,7 @@ public:
typedef vigra::MergeGraphAdaptor< Graph > MergeGraph;
typedef McClusterOp<GM,ACC> ClusterOp;
typedef typename ClusterOp::Parameter ClusterOpParam;
- typedef vigra::HierarchicalClustering< ClusterOp > HC;
+ typedef vigra::HierarchicalClusteringImpl< ClusterOp > HC;
typedef typename HC::Parameter HcParam;
std::vector<ValueType> weights(accWeights.size(),0.0);
diff --git a/include/opengm/inference/auxiliary/lp_solver/lp_solver_interface.hxx b/include/opengm/inference/auxiliary/lp_solver/lp_solver_interface.hxx
index e27b290..45e4e90 100644
--- a/include/opengm/inference/auxiliary/lp_solver/lp_solver_interface.hxx
+++ b/include/opengm/inference/auxiliary/lp_solver/lp_solver_interface.hxx
@@ -631,10 +631,14 @@ namespace opengm {
template <class LP_SOLVER_TYPE, class VALUE_TYPE, class INDEX_TYPE, class SOLUTION_ITERATOR_TYPE, class SOLVER_TIMING_TYPE>
inline LPSolverInterface<LP_SOLVER_TYPE, VALUE_TYPE, INDEX_TYPE, SOLUTION_ITERATOR_TYPE, SOLVER_TIMING_TYPE>::Parameter::Parameter()
: numberOfThreads_(LPDef::default_numberOfThreads_),
- verbose_(LPDef::default_verbose_), cutUp_(LPDef::default_cutUp_),
- epOpt_(LPDef::default_epOpt_), epMrk_(LPDef::default_epMrk_),
- epRHS_(LPDef::default_epRHS_), epInt_(LPDef::default_epInt_),
- epAGap_(LPDef::default_epAGap_), epGap_(LPDef::default_epGap_),
+ verbose_(LPDef::default_verbose_),
+ cutUp_(LPDef::default_cutUp_),
+ epOpt_(LPDef::default_epOpt_),
+ epMrk_(LPDef::default_epMrk_),
+ epRHS_(LPDef::default_epRHS_),
+ epInt_(LPDef::default_epInt_),
+ epAGap_(LPDef::default_epAGap_),
+ epGap_(LPDef::default_epGap_),
workMem_(LPDef::default_workMem_),
treeMemoryLimit_(LPDef::default_treeMemoryLimit_),
timeLimit_(LPDef::default_timeLimit_),
diff --git a/include/opengm/inference/auxiliary/minstcutboost.hxx b/include/opengm/inference/auxiliary/minstcutboost.hxx
index 0a479be..3f6642a 100644
--- a/include/opengm/inference/auxiliary/minstcutboost.hxx
+++ b/include/opengm/inference/auxiliary/minstcutboost.hxx
@@ -143,7 +143,7 @@ namespace opengm {
q.push(*(vertices(graph_).first)); // source
while (!q.empty()) {
out_edge_iterator current, end;
- boost::tie(current, end) = out_edges(q.front(), graph_);
+ boost::tuples::tie(current, end) = out_edges(q.front(), graph_);
q.pop();
while (current != end) {
if (graph_[*current].residual > 0) {
diff --git a/include/opengm/inference/bruteforce.hxx b/include/opengm/inference/bruteforce.hxx
index 1189486..2548293 100644
--- a/include/opengm/inference/bruteforce.hxx
+++ b/include/opengm/inference/bruteforce.hxx
@@ -23,7 +23,26 @@ public:
typedef visitors::VerboseVisitor<Bruteforce<GM,ACC> > VerboseVisitorType;
typedef visitors::EmptyVisitor<Bruteforce<GM,ACC> > EmptyVisitorType;
typedef visitors::TimingVisitor<Bruteforce<GM,ACC> > TimingVisitorType;
- class Parameter {};
+
+ template<class _GM>
+ struct RebindGm{
+ typedef Bruteforce<_GM, ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef Bruteforce<_GM, _ACC > type;
+ };
+
+ struct Parameter {
+ Parameter(){
+
+ }
+ template<class P>
+ Parameter(const P & p){
+
+ }
+ };
Bruteforce(const GraphicalModelType&);
Bruteforce(const GraphicalModelType&, const Parameter&);
diff --git a/include/opengm/inference/combilp.hxx b/include/opengm/inference/combilp.hxx
index 632628d..639dbe9 100644
--- a/include/opengm/inference/combilp.hxx
+++ b/include/opengm/inference/combilp.hxx
@@ -413,6 +413,17 @@ namespace opengm{
typedef ACC AccumulationType;
typedef GM GraphicalModelType;
+ template<class _GM>
+ struct RebindGm{
+ typedef CombiLP<_GM, ACC, LPSOLVER> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef CombiLP<_GM, _ACC, LPSOLVER> type;
+ };
+
+
OPENGM_GM_TYPE_TYPEDEFS;
typedef visitors::VerboseVisitor<CombiLP<GM, ACC, LPSOLVER> > VerboseVisitorType;
typedef visitors::EmptyVisitor<CombiLP<GM, ACC, LPSOLVER> > EmptyVisitorType;
diff --git a/include/opengm/inference/dmc.hxx b/include/opengm/inference/dmc.hxx
index 7bf6535..80b6383 100644
--- a/include/opengm/inference/dmc.hxx
+++ b/include/opengm/inference/dmc.hxx
@@ -59,7 +59,8 @@ public:
void setStartingPoint(typename std::vector<LabelType>::const_iterator);
virtual InferenceTermination arg(std::vector<LabelType>&, const size_t = 1) const ;
virtual ValueType value()const{
-
+ assert(false); // FIXME: the return of this function was missing, just added something arbitrary
+ return ValueType();
}
private:
diff --git a/include/opengm/inference/dualdecomposition/dualdecomposition_bundle.hxx b/include/opengm/inference/dualdecomposition/dualdecomposition_bundle.hxx
index 1bee922..af8f7b1 100644
--- a/include/opengm/inference/dualdecomposition/dualdecomposition_bundle.hxx
+++ b/include/opengm/inference/dualdecomposition/dualdecomposition_bundle.hxx
@@ -49,6 +49,20 @@ namespace opengm {
typedef typename DDBaseType::SubVariableType SubVariableType;
typedef typename DDBaseType::SubVariableListType SubVariableListType;
+
+ template<class _GM>
+ struct RebindGm{
+ typedef typename INF:: template RebindGm<_GM>::type RebindedInf;
+ typedef DualDecompositionBundle<_GM, RebindedInf, DUALBLOCK> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef typename INF:: template RebindGm<_GM,_ACC>::type RebindedInf;
+ typedef DualDecompositionBundle<_GM, RebindedInf, DUALBLOCK> type;
+ };
+
+
class Parameter : public DualDecompositionBaseParameter{
public:
/// The relative accuracy which have to be garantee to stop with an approximative solution (set 0 for optimality)
@@ -79,6 +93,20 @@ namespace opengm {
noBundle_(false),
useHeuristicStepsize_(true)
{};
+
+ template<class P>
+ Parameter(const P & p)
+ :
+ minimalRelAccuracy_(p.minimalRelAccuracy_),
+ subPara_(subPara_),
+ relativeDualBoundPrecision_(p.relativeDualBoundPrecision_),
+ maxBundlesize_(p.maxBundlesize_),
+ activeBoundFixing_(p.activeBoundFixing_),
+ minDualWeight_(p.minDualWeight_),
+ maxDualWeight_(p.maxDualWeight_),
+ noBundle_(p.noBundle_),
+ useHeuristicStepsize_(p.useHeuristicStepsize_){
+ }
};
using DualDecompositionBase<GmType, DualBlockType >::gm_;
diff --git a/include/opengm/inference/dualdecomposition/dualdecomposition_subgradient.hxx b/include/opengm/inference/dualdecomposition/dualdecomposition_subgradient.hxx
index 2bee7c0..ae3247d 100644
--- a/include/opengm/inference/dualdecomposition/dualdecomposition_subgradient.hxx
+++ b/include/opengm/inference/dualdecomposition/dualdecomposition_subgradient.hxx
@@ -43,14 +43,35 @@ namespace opengm {
typedef typename DDBaseType::SubVariableType SubVariableType;
typedef typename DDBaseType::SubVariableListType SubVariableListType;
- class Parameter : public DualDecompositionBaseParameter{
- public:
- /// Parameter for Subproblems
- typename InfType::Parameter subPara_;
- bool useAdaptiveStepsize_;
- bool useProjectedAdaptiveStepsize_;
- Parameter() : useAdaptiveStepsize_(false), useProjectedAdaptiveStepsize_(false){};
- };
+ template<class _GM>
+ struct RebindGm{
+ typedef typename INF:: template RebindGm<_GM>::type RebindedInf;
+ typedef DualDecompositionSubGradient<_GM, RebindedInf, DUALBLOCK> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef typename INF:: template RebindGm<_GM,_ACC>::type RebindedInf;
+ typedef DualDecompositionSubGradient<_GM, RebindedInf, DUALBLOCK> type;
+ };
+
+
+ class Parameter : public DualDecompositionBaseParameter{
+ public:
+ /// Parameter for Subproblems
+ typename InfType::Parameter subPara_;
+ bool useAdaptiveStepsize_;
+ bool useProjectedAdaptiveStepsize_;
+ Parameter() : useAdaptiveStepsize_(false), useProjectedAdaptiveStepsize_(false){};
+
+ template<class P>
+ Parameter(const P & p)
+ : subPara_(p.subPara_),
+ useAdaptiveStepsize_(p.useAdaptiveStepsize_),
+ useProjectedAdaptiveStepsize_(p.useProjectedAdaptiveStepsize_){
+
+ }
+ };
using DualDecompositionBase<GmType, DualBlockType >::gm_;
using DualDecompositionBase<GmType, DualBlockType >::subGm_;
diff --git a/include/opengm/inference/dynamicprogramming.hxx b/include/opengm/inference/dynamicprogramming.hxx
index ec402cb..436c36e 100644
--- a/include/opengm/inference/dynamicprogramming.hxx
+++ b/include/opengm/inference/dynamicprogramming.hxx
@@ -24,7 +24,27 @@ namespace opengm {
typedef visitors::VerboseVisitor<DynamicProgramming<GM, ACC> > VerboseVisitorType;
typedef visitors::EmptyVisitor<DynamicProgramming<GM, ACC> > EmptyVisitorType;
typedef visitors::TimingVisitor<DynamicProgramming<GM, ACC> > TimingVisitorType;
+
+
+ template<class _GM>
+ struct RebindGm{
+ typedef DynamicProgramming<_GM, ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef DynamicProgramming<_GM, _ACC > type;
+ };
+
struct Parameter {
+ Parameter(){
+
+ }
+ template<class P>
+ Parameter(const P &p)
+ : roots_(p.roots_){
+ }
+
std::vector<IndexType> roots_;
};
diff --git a/include/opengm/inference/external/ad3.hxx b/include/opengm/inference/external/ad3.hxx
index 42c9d2d..8ccaaba 100644
--- a/include/opengm/inference/external/ad3.hxx
+++ b/include/opengm/inference/external/ad3.hxx
@@ -30,6 +30,16 @@ namespace opengm {
typedef visitors::EmptyVisitor<AD3Inf<GM,ACC> > EmptyVisitorType;
typedef visitors::TimingVisitor<AD3Inf<GM,ACC> > TimingVisitorType;
+ template<class _GM>
+ struct RebindGm{
+ typedef AD3Inf<_GM,ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef AD3Inf<_GM,_ACC> type;
+ };
+
enum SolverType{
AD3_LP,
AD3_ILP,
@@ -54,6 +64,19 @@ namespace opengm {
{
}
+ template<class P>
+ Parameter(
+ const P & p
+ ) :
+ solverType_(p.solverType_),
+ eta_(p.eta_),
+ adaptEta_(p.adaptEta_),
+ steps_(p.steps_),
+ residualThreshold_(p.residualThreshold_),
+ verbosity_(p.verbosity_)
+ {
+ }
+
SolverType solverType_;
double eta_;
diff --git a/include/opengm/inference/external/daoopt.hxx b/include/opengm/inference/external/daoopt.hxx
index 477553c..709d644 100644
--- a/include/opengm/inference/external/daoopt.hxx
+++ b/include/opengm/inference/external/daoopt.hxx
@@ -51,6 +51,17 @@ namespace opengm {
typedef visitors::EmptyVisitor<DAOOPT<GM> > EmptyVisitorType;
typedef visitors::TimingVisitor<DAOOPT<GM> > TimingVisitorType;
+
+ template<class _GM>
+ struct RebindGm{
+ typedef DAOOPT<_GM> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef DAOOPT<_GM> type;
+ };
+
///Parameter inherits from daoopt ProgramOptions
struct Parameter : public daoopt::ProgramOptions {
/// \brief Constructor
@@ -68,6 +79,21 @@ namespace opengm {
sampleRepeat = 1;
aobbLookahead = 5;
}
+ template<class P>
+ Parameter(const P & p) : daoopt::ProgramOptions() {
+ // set default options, this is not done for all parameters by daoopt
+ subprobOrder = p.subprobOrder;
+ ibound = p.ibound;
+ cbound = p.cbound;
+ cbound_worker = p.cbound_worker;
+ rotateLimit = p.rotateLimit;
+ order_iterations = p.order_iterations;
+ order_timelimit = p.order_timelimit;
+ threads = p.threads;
+ sampleDepth = p.sampleDepth;
+ sampleRepeat = p.sampleRepeat;
+ aobbLookahead = p.aobbLookahead;
+ }
};
// construction
diff --git a/include/opengm/inference/external/fastPD.hxx b/include/opengm/inference/external/fastPD.hxx
index 0c8abe2..f200f30 100644
--- a/include/opengm/inference/external/fastPD.hxx
+++ b/include/opengm/inference/external/fastPD.hxx
@@ -35,6 +35,16 @@ namespace opengm {
typedef visitors::EmptyVisitor<FastPD<GM> > EmptyVisitorType;
typedef visitors::TimingVisitor<FastPD<GM> > TimingVisitorType;
+ template<class _GM>
+ struct RebindGm{
+ typedef FastPD<_GM,ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef FastPD<_GM,_ACC> type;
+ };
+
///Parameter
struct Parameter {
/// \brief Constructor
@@ -42,6 +52,12 @@ namespace opengm {
}
/// number of iterations
size_t numberOfIterations_;
+
+ template<class P>
+ Parameter(const P & p)
+ : numberOfIterations_(p.numberOfIterations_){
+
+ }
};
// construction
FastPD(const GraphicalModelType& gm, const Parameter& para = Parameter());
diff --git a/include/opengm/inference/external/mrflib.hxx b/include/opengm/inference/external/mrflib.hxx
index 99e2055..911e985 100644
--- a/include/opengm/inference/external/mrflib.hxx
+++ b/include/opengm/inference/external/mrflib.hxx
@@ -43,6 +43,17 @@ namespace opengm {
typedef visitors::TimingVisitor<MRFLIB<GM> > TimingVisitorType;
typedef size_t VariableIndex;
///Parameter
+
+ template<class _GM>
+ struct RebindGm{
+ typedef MRFLIB<_GM> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef MRFLIB<_GM> type;
+ };
+
struct Parameter {
/// possible optimization algorithms for MRFLIB
enum InferenceType {ICM, EXPANSION, SWAP, MAXPRODBP, TRWS, BPS};
@@ -60,6 +71,10 @@ namespace opengm {
Parameter(const InferenceType inferenceType = ICM, const EnergyType energyType = VIEW, const size_t numberOfIterations = 1000)
: inferenceType_(inferenceType), energyType_(energyType), numberOfIterations_(numberOfIterations), trwsTolerance_(0.0) {
}
+ template<class P>
+ Parameter(const P & p)
+ : inferenceType_(p.inferenceType_), energyType_(p.energyType_), numberOfIterations_(p.numberOfIterations_), trwsTolerance_(p.trwsTolerance_) {
+ }
};
// construction
MRFLIB(const GraphicalModelType& gm, const Parameter& para = Parameter());
diff --git a/include/opengm/inference/external/qpbo.hxx b/include/opengm/inference/external/qpbo.hxx
index ea0b732..01656d4 100644
--- a/include/opengm/inference/external/qpbo.hxx
+++ b/include/opengm/inference/external/qpbo.hxx
@@ -35,8 +35,20 @@ namespace opengm {
TB0, TB1, TBX
};
- ///Parameter for opengm::external::QPBO
- struct Parameter {
+ template<class _GM>
+ struct RebindGm{
+ typedef QPBO<_GM> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef QPBO<_GM> type;
+ };
+
+
+
+ ///Parameter for opengm::external::QPBO
+ struct Parameter {
/// using probeing technique
bool useProbeing_;
/// forcing strong persistency
@@ -47,6 +59,17 @@ namespace opengm {
std::vector<size_t> label_;
/// \brief constructor
+ template<class P>
+ Parameter(const P & p)
+ :
+ strongPersistency_(p.strongPersistency_),
+ useImproveing_ (p.useImproveing_),
+ useProbeing_ (p.useProbeing_)
+ {
+
+ }
+
+
Parameter() {
strongPersistency_ = true;
useImproveing_ = false;
diff --git a/include/opengm/inference/external/trws.hxx b/include/opengm/inference/external/trws.hxx
index 69ce500..3be5757 100644
--- a/include/opengm/inference/external/trws.hxx
+++ b/include/opengm/inference/external/trws.hxx
@@ -45,10 +45,22 @@ namespace opengm {
typedef visitors::EmptyVisitor<TRWS<GM> > EmptyVisitorType;
typedef visitors::TimingVisitor<TRWS<GM> > TimingVisitorType;
typedef size_t VariableIndex;
+
+
+ template<class _GM>
+ struct RebindGm{
+ typedef TRWS<_GM> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef TRWS<_GM> type;
+ };
+
///Parameter
struct Parameter {
/// possible energy types for TRWS
- enum EnergyType {VIEW, TABLES, TL1, TL2/*, WEIGHTEDTABLE*/};
+ enum EnergyType {VIEW=0, TABLES=1, TL1=2, TL2=3/*, WEIGHTEDTABLE*/};
/// number of iterations
size_t numberOfIterations_;
/// random starting message
@@ -66,6 +78,30 @@ namespace opengm {
/// Calculate MinMarginals
bool calculateMinMarginals_;
/// \brief Constructor
+ template<class P>
+ Parameter(const P & p)
+ : numberOfIterations_(p.numberOfIterations_),
+ useRandomStart_(p.useRandomStart_),
+ useZeroStart_(p.useZeroStart_),
+ doBPS_(p.doBPS_),
+ energyType_(),
+ tolerance_(p.tolerance_),
+ minDualChange_(p.minDualChange_)
+ {
+ if(p.energyType_==0){
+ energyType_ =VIEW;
+ }
+ else if(p.energyType_==1){
+ energyType_ =TABLES;
+ }
+ else if(p.energyType_==2){
+ energyType_ =TL1;
+ }
+ else if(p.energyType_==3){
+ energyType_ =TL2;
+ }
+ };
+
Parameter() {
numberOfIterations_ = 1000;
useRandomStart_ = false;
diff --git a/include/opengm/inference/fusion_based_inf.hxx b/include/opengm/inference/fusion_based_inf.hxx
index d4f2a1c..3ef0c3a 100644
--- a/include/opengm/inference/fusion_based_inf.hxx
+++ b/include/opengm/inference/fusion_based_inf.hxx
@@ -121,8 +121,6 @@ namespace proposal_gen{
-
-
template<class GM, class ACC>
class UpDownGen
{
@@ -947,6 +945,17 @@ public:
typedef typename ProposalGen::Parameter ProposalParameter;
typedef typename FusionMoverType::Parameter FusionParameter;
+ template<class _GM>
+ struct RebindGm{
+ typedef typename PROPOSAL_GEN:: template RebindGm<_GM>::type _P;
+ typedef FusionBasedInf<_GM, _P> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef typename PROPOSAL_GEN:: template RebindGmAndAcc<_GM, _ACC>::type _P;
+ typedef FusionBasedInf<_GM, _P> type;
+ };
class Parameter
@@ -965,6 +974,16 @@ public:
{
}
+
+ template<class P>
+ Parameter(const P & p)
+ : proposalParam_(p.proposalParam_),
+ fusionParam_(p.fusionParam_),
+ numIt_(p.numIt_),
+ numStopIt_(p.numStopIt_){
+ }
+
+
ProposalParameter proposalParam_;
FusionParameter fusionParam_;
size_t numIt_;
diff --git a/include/opengm/inference/graphcut.hxx b/include/opengm/inference/graphcut.hxx
index 1a65656..314babd 100644
--- a/include/opengm/inference/graphcut.hxx
+++ b/include/opengm/inference/graphcut.hxx
@@ -17,6 +17,17 @@ namespace opengm {
template<class GM, class ACC, class MINSTCUT>
class GraphCut : public Inference<GM, ACC> {
public:
+
+ template<class _GM>
+ struct RebindGm{
+ typedef GraphCut<_GM, ACC, MINSTCUT> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef GraphCut<_GM, _ACC, MINSTCUT> type;
+ };
+
typedef ACC AccumulationType;
typedef GM GraphicalModelType;
OPENGM_GM_TYPE_TYPEDEFS;
@@ -24,12 +35,18 @@ public:
typedef visitors::VerboseVisitor<GraphCut<GM, ACC, MINSTCUT> > VerboseVisitorType;
typedef visitors::EmptyVisitor<GraphCut<GM, ACC, MINSTCUT> > EmptyVisitorType;
typedef visitors::TimingVisitor<GraphCut<GM, ACC, MINSTCUT> > TimingVisitorType;
- struct Parameter {
- Parameter(const ValueType scale = 1)
- : scale_(scale)
- {}
- ValueType scale_;
- };
+ struct Parameter {
+ Parameter(const ValueType scale = 1)
+ : scale_(scale) {
+ }
+
+ template<class P>
+ Parameter(const P & p)
+ : scale_(p.scale_){
+ }
+
+ ValueType scale_;
+ };
GraphCut(const GraphicalModelType&, const Parameter& = Parameter(), ValueType = static_cast<ValueType>(0.0));
GraphCut(size_t numVar, std::vector<size_t> numFacDim, const Parameter& = Parameter(), ValueType = static_cast<ValueType>(0.0));
diff --git a/include/opengm/inference/greedygremlin.hxx b/include/opengm/inference/greedygremlin.hxx
index ae679b3..a282bcd 100644
--- a/include/opengm/inference/greedygremlin.hxx
+++ b/include/opengm/inference/greedygremlin.hxx
@@ -44,8 +44,25 @@ namespace opengm {
typedef visitors::EmptyVisitor<GreedyGremlin<GM, ACC> > EmptyVisitorType;
typedef visitors::TimingVisitor<GreedyGremlin<GM, ACC> > TimingVisitorType;
+ template<class _GM>
+ struct RebindGm{
+ typedef GreedyGremlin<_GM, ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef GreedyGremlin<_GM, _ACC > type;
+ };
+
+
struct Parameter {
-
+ Parameter(){
+
+ }
+ template<class P>
+ Parameter(const P & p){
+
+ }
};
GreedyGremlin(const GM& gm, Parameter para = Parameter());
virtual std::string name() const {return "GreedyGremlin";}
diff --git a/include/opengm/inference/hqpbo.hxx b/include/opengm/inference/hqpbo.hxx
index 0b41d57..a621ef0 100644
--- a/include/opengm/inference/hqpbo.hxx
+++ b/include/opengm/inference/hqpbo.hxx
@@ -13,11 +13,11 @@
#include "opengm/inference/fix-fusion/fusion-move.hpp"
namespace opengm {
-
+
/// HQPBO Algorithm\n\n
-///
///
-/// \ingroup inference
+///
+/// \ingroup inference
template<class GM, class ACC>
class HQPBO : public Inference<GM, opengm::Minimizer>
{
@@ -29,7 +29,28 @@ public:
typedef visitors::TimingVisitor<HQPBO<GM,ACC> > TimingVisitorType;
typedef visitors::EmptyVisitor<HQPBO<GM,ACC> > EmptyVisitorType;
- struct Parameter {};
+
+ template<class _GM>
+ struct RebindGm{
+ typedef HQPBO<_GM, ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef HQPBO<_GM, _ACC > type;
+ };
+
+
+
+ struct Parameter {
+ Parameter(){
+
+ }
+ template<class P>
+ Parameter(const P & p){
+
+ }
+ };
HQPBO(const GraphicalModelType&, Parameter = Parameter());
std::string name() const;
@@ -45,10 +66,10 @@ private:
HigherOrderEnergy<ValueType, 10> hoe_;
std::vector<LabelType> conf_;
ValueType bound_;
-};
-
+};
+
template<class GM, class ACC>
-inline void
+inline void
HQPBO<GM,ACC>::setStartingPoint
(
typename std::vector<typename HQPBO<GM,ACC>::LabelType>::const_iterator begin
@@ -86,14 +107,16 @@ HQPBO<GM,ACC>::HQPBO
else
{
unsigned int numAssignments = 1 << size;
- ValueType coeffs[numAssignments];
+ AutoDeleteArray<ValueType> coeffs_array(new ValueType[numAssignments]);
+ ValueType * coeffs = coeffs_array.get();
for (unsigned int subset = 1; subset < numAssignments; ++subset)
{
coeffs[subset] = 0;
}
// For each boolean assignment, get the clique energy at the
// corresponding labeling
- LabelType cliqueLabels[size];
+ AutoDeleteArray<LabelType> cliqueLabels_array(new LabelType[size]);
+ LabelType * cliqueLabels = cliqueLabels_array.get();
for (unsigned int assignment = 0; assignment < numAssignments; ++assignment)
{
for (unsigned int i = 0; i < size; ++i)
@@ -140,7 +163,7 @@ HQPBO<GM,ACC>::HQPBO
hoe_.AddTerm(coeffs[subset], degree, vars);
}
}
- }
+ }
}
template<class GM,class ACC>
@@ -167,7 +190,7 @@ HQPBO<GM,ACC>::infer() {
template<class GM,class ACC>
template<class VISITOR>
inline InferenceTermination
-HQPBO<GM,ACC>::infer(VISITOR & visitor)
+HQPBO<GM,ACC>::infer(VISITOR & visitor)
{
visitor.begin(*this);
kolmogorov::qpbo::QPBO<ValueType> qr(gm_.numberOfVariables(), 0);
diff --git a/include/opengm/inference/icm.hxx b/include/opengm/inference/icm.hxx
index 136e466..7b70ac9 100644
--- a/include/opengm/inference/icm.hxx
+++ b/include/opengm/inference/icm.hxx
@@ -36,33 +36,44 @@ public:
typedef opengm::visitors::EmptyVisitor<ICM<GM,ACC> > EmptyVisitorType;
typedef opengm::visitors::TimingVisitor<ICM<GM,ACC> > TimingVisitorType;
- class Parameter {
- public:
- Parameter(
- const std::vector<LabelType>& startPoint
- )
- : moveType_(SINGLE_VARIABLE),
- startPoint_(startPoint)
- {}
+ template<class _GM>
+ struct RebindGm{
+ typedef ICM<_GM, ACC> type;
+ };
- Parameter(
- MoveType moveType,
- const std::vector<LabelType>& startPoint
- )
- : moveType_(moveType),
- startPoint_(startPoint)
- {}
-
- Parameter(
- MoveType moveType = SINGLE_VARIABLE
- )
- : moveType_(moveType),
- startPoint_()
- {}
-
- MoveType moveType_;
- std::vector<LabelType> startPoint_;
- };
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef ICM<_GM, _ACC> type;
+ };
+
+
+ class Parameter {
+ public:
+
+ Parameter(const std::vector<LabelType>& startPoint)
+ : moveType_(SINGLE_VARIABLE),
+ startPoint_(startPoint)
+ {}
+ Parameter(MoveType moveType, const std::vector<LabelType>& startPoint)
+ : moveType_(moveType),
+ startPoint_(startPoint)
+ {}
+
+ Parameter(MoveType moveType = SINGLE_VARIABLE)
+ : moveType_(moveType),
+ startPoint_()
+ {}
+
+ template<class OP>
+ Parameter(const OP & otherParameter)
+ {
+ moveType_ = otherParameter.moveType_== 0? SINGLE_VARIABLE : FACTOR;
+ startPoint_(otherParameter.startPoint_);
+ }
+
+ MoveType moveType_;
+ std::vector<LabelType> startPoint_;
+ };
ICM(const GraphicalModelType&);
ICM(const GraphicalModelType&, const Parameter&);
diff --git a/include/opengm/inference/infandflip.hxx b/include/opengm/inference/infandflip.hxx
index dede301..09820c6 100644
--- a/include/opengm/inference/infandflip.hxx
+++ b/include/opengm/inference/infandflip.hxx
@@ -33,6 +33,22 @@ public:
typedef visitors::EmptyVisitor<InfAndFlip<GM, ACC, INF> > EmptyVisitorType;
typedef visitors::TimingVisitor<InfAndFlip<GM, ACC, INF> > TimingVisitorType;
+
+
+ template<class _GM>
+ struct RebindGm{
+ typedef typename INF::template RebindGm<_GM>::type _I;
+ typedef InfAndFlip<_GM, ACC, _I> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef typename INF::template RebindGmAndAcc<_GM,_ACC>::type _I;
+ typedef InfAndFlip<_GM, _ACC, _I> type;
+ };
+
+
+
struct Parameter
{
Parameter(const size_t maxSubgraphSize=2)
@@ -41,6 +57,13 @@ public:
subPara_(),
warmStartableInf_(false){
}
+ template<class P>
+ Parameter(const P & p)
+ :
+ maxSubgraphSize_(p.maxSubgraphSize_),
+ subPara_(p.subPara_),
+ warmStartableInf_(p.warmStartableInf_){
+ }
size_t maxSubgraphSize_;
typename INF::Parameter subPara_;
diff --git a/include/opengm/inference/inference.hxx b/include/opengm/inference/inference.hxx
index 46a774e..2f52edc 100644
--- a/include/opengm/inference/inference.hxx
+++ b/include/opengm/inference/inference.hxx
@@ -29,6 +29,15 @@ enum InferenceTermination {
INFERENCE_ERROR=4
};
+
+template<class INF>
+inline void infer(const typename INF::GraphicalModelType & gm, const typename INF::Parameter & param, std::vector<typename INF::LabelType> & conf){
+ INF inf(gm, param);
+ inf.infer();
+ inf.arg(conf);
+}
+
+
/// Inference algorithm interface
template <class GM, class ACC>
class Inference
diff --git a/include/opengm/inference/intersection_based_inf.hxx b/include/opengm/inference/intersection_based_inf.hxx
index 415cfe1..5c39ac2 100644
--- a/include/opengm/inference/intersection_based_inf.hxx
+++ b/include/opengm/inference/intersection_based_inf.hxx
@@ -452,7 +452,7 @@ namespace proposal_gen{
typedef RandMcClusterOp<GM, ACC > Cop;
typedef typename Cop::Parameter CopParam;
- typedef vigra::HierarchicalClustering< Cop > HC;
+ typedef vigra::HierarchicalClusteringImpl< Cop > HC;
typedef typename HC::Parameter HcParam;
diff --git a/include/opengm/inference/lazyflipper.hxx b/include/opengm/inference/lazyflipper.hxx
index c7929f6..53689ab 100644
--- a/include/opengm/inference/lazyflipper.hxx
+++ b/include/opengm/inference/lazyflipper.hxx
@@ -117,6 +117,18 @@ private:
template<class GM, class ACC = Minimizer>
class LazyFlipper : public Inference<GM, ACC> {
public:
+
+ template<class _GM>
+ struct RebindGm{
+ typedef LazyFlipper<_GM, ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef LazyFlipper<_GM, _ACC> type;
+ };
+
+
typedef ACC AccumulationType;
typedef GM GraphicalModelType;
OPENGM_GM_TYPE_TYPEDEFS;
@@ -145,20 +157,29 @@ public:
const size_t maxSubgraphSize = 2,
const Tribool inferMultilabel = Tribool::Maybe
)
- : maxSubgraphSize_(maxSubgraphSize),
+ : maxSubgraphSize_(maxSubgraphSize),
startingPoint_(),
inferMultilabel_(inferMultilabel)
{}
+ template<class P>
+ Parameter(
+ const P & p
+ )
+ : maxSubgraphSize_(p.maxSubgraphSize_),
+ startingPoint_(p.startingPoint_),
+ inferMultilabel_(p.inferMultilabel_)
+ {}
+
size_t maxSubgraphSize_;
std::vector<LabelType> startingPoint_;
Tribool inferMultilabel_;
};
- LazyFlipper(const GraphicalModelType&, const size_t = 2, const Tribool useMultilabelInference = Tribool::Maybe);
- LazyFlipper(const GraphicalModelType& gm, typename LazyFlipper::Parameter param);
- template<class StateIterator>
- LazyFlipper(const GraphicalModelType&, const size_t, StateIterator, const Tribool useMultilabelInference = Tribool::Maybe);
+ //LazyFlipper(const GraphicalModelType&, const size_t = 2, const Tribool useMultilabelInference = Tribool::Maybe);
+ LazyFlipper(const GraphicalModelType& gm, Parameter param = Parameter());
+ //template<class StateIterator>
+ //LazyFlipper(const GraphicalModelType&, const size_t, StateIterator, const Tribool useMultilabelInference = Tribool::Maybe);
std::string name() const;
const GraphicalModelType& graphicalModel() const;
const size_t maxSubgraphSize() const;
@@ -585,37 +606,37 @@ Forest<T>::setLevelOrderSuccessor(
// implementation of LazyFlipper
-template<class GM, class ACC>
-inline
-LazyFlipper<GM, ACC>::LazyFlipper(
- const GraphicalModelType& gm,
- const size_t maxSubgraphSize,
- const Tribool useMultilabelInference
-)
-: gm_(gm),
- variableAdjacency_(Adjacency(gm.numberOfVariables())),
- movemaker_(Movemaker<GM>(gm)),
- subgraphForest_(SubgraphForest()),
- maxSubgraphSize_(maxSubgraphSize),
- useMultilabelInference_(useMultilabelInference)
-{
- if(gm_.numberOfVariables() == 0) {
- throw RuntimeError("The graphical model has no variables.");
- }
- setMaxSubgraphSize(maxSubgraphSize);
- // initialize activation_
- activation_[0].append(gm_.numberOfVariables());
- activation_[1].append(gm_.numberOfVariables());
- // initialize variableAdjacency_
- for(size_t j=0; j<gm_.numberOfFactors(); ++j) {
- const FactorType& factor = gm_[j];
- for(size_t m=0; m<factor.numberOfVariables(); ++m) {
- for(size_t n=m+1; n<factor.numberOfVariables(); ++n) {
- variableAdjacency_.connect(factor.variableIndex(m), factor.variableIndex(n));
- }
- }
- }
-}
+//template<class GM, class ACC>
+//inline
+//LazyFlipper<GM, ACC>::LazyFlipper(
+// const GraphicalModelType& gm,
+// const size_t maxSubgraphSize,
+// const Tribool useMultilabelInference
+//)
+//: gm_(gm),
+// variableAdjacency_(Adjacency(gm.numberOfVariables())),
+// movemaker_(Movemaker<GM>(gm)),
+// subgraphForest_(SubgraphForest()),
+// maxSubgraphSize_(maxSubgraphSize),
+// useMultilabelInference_(useMultilabelInference)
+//{
+// if(gm_.numberOfVariables() == 0) {
+// throw RuntimeError("The graphical model has no variables.");
+// }
+// setMaxSubgraphSize(maxSubgraphSize);
+// // initialize activation_
+// activation_[0].append(gm_.numberOfVariables());
+// activation_[1].append(gm_.numberOfVariables());
+// // initialize variableAdjacency_
+// for(size_t j=0; j<gm_.numberOfFactors(); ++j) {
+// const FactorType& factor = gm_[j];
+// for(size_t m=0; m<factor.numberOfVariables(); ++m) {
+// for(size_t n=m+1; n<factor.numberOfVariables(); ++n) {
+// variableAdjacency_.connect(factor.variableIndex(m), factor.variableIndex(n));
+// }
+// }
+// }
+//}
template<class GM, class ACC>
inline
@@ -656,40 +677,40 @@ inline void
LazyFlipper<GM, ACC>::reset()
{}
-/// \todo next version: get rid of redundancy with other constructor
-template<class GM, class ACC>
-template<class StateIterator>
-inline
-LazyFlipper<GM, ACC>::LazyFlipper(
- const GraphicalModelType& gm,
- const size_t maxSubgraphSize,
- StateIterator it,
- const Tribool useMultilabelInference
-)
-: gm_(gm),
- variableAdjacency_(Adjacency(gm_.numberOfVariables())),
- movemaker_(Movemaker<GM>(gm, it)),
- subgraphForest_(SubgraphForest()),
- maxSubgraphSize_(2),
- useMultilabelInference_(useMultilabelInference)
-{
- if(gm_.numberOfVariables() == 0) {
- throw RuntimeError("The graphical model has no variables.");
- }
- setMaxSubgraphSize(maxSubgraphSize);
- // initialize activation_
- activation_[0].append(gm_.numberOfVariables());
- activation_[1].append(gm_.numberOfVariables());
- // initialize variableAdjacency_
- for(size_t j=0; j<gm_.numberOfFactors(); ++j) {
- const FactorType& factor = gm_[j];
- for(size_t m=0; m<factor.numberOfVariables(); ++m) {
- for(size_t n=m+1; n<factor.numberOfVariables(); ++n) {
- variableAdjacency_.connect(factor.variableIndex(m), factor.variableIndex(n));
- }
- }
- }
-}
+///// \todo next version: get rid of redundancy with other constructor
+//template<class GM, class ACC>
+//template<class StateIterator>
+//inline
+//LazyFlipper<GM, ACC>::LazyFlipper(
+// const GraphicalModelType& gm,
+// const size_t maxSubgraphSize,
+// StateIterator it,
+// const Tribool useMultilabelInference
+//)
+//: gm_(gm),
+// variableAdjacency_(Adjacency(gm_.numberOfVariables())),
+// movemaker_(Movemaker<GM>(gm, it)),
+// subgraphForest_(SubgraphForest()),
+// maxSubgraphSize_(2),
+// useMultilabelInference_(useMultilabelInference)
+//{
+// if(gm_.numberOfVariables() == 0) {
+// throw RuntimeError("The graphical model has no variables.");
+// }
+// setMaxSubgraphSize(maxSubgraphSize);
+// // initialize activation_
+// activation_[0].append(gm_.numberOfVariables());
+// activation_[1].append(gm_.numberOfVariables());
+// // initialize variableAdjacency_
+// for(size_t j=0; j<gm_.numberOfFactors(); ++j) {
+// const FactorType& factor = gm_[j];
+// for(size_t m=0; m<factor.numberOfVariables(); ++m) {
+// for(size_t n=m+1; n<factor.numberOfVariables(); ++n) {
+// variableAdjacency_.connect(factor.variableIndex(m), factor.variableIndex(n));
+// }
+// }
+// }
+//}
template<class GM, class ACC>
inline void
diff --git a/include/opengm/inference/loc.hxx b/include/opengm/inference/loc.hxx
index aa7c821..9ecea8d 100644
--- a/include/opengm/inference/loc.hxx
+++ b/include/opengm/inference/loc.hxx
@@ -81,6 +81,16 @@ public:
typedef opengm::LPCplex<SubGmType,AccumulationType> LpCplexSubInf;
#endif
+ template<class _GM>
+ struct RebindGm{
+ typedef LOC<_GM, ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef LOC<_GM, _ACC > type;
+ };
+
class Parameter {
public:
@@ -115,6 +125,25 @@ public:
{
}
+
+ template<class P>
+ Parameter
+ (
+ const P & p
+ )
+ : solver_(p.solver_),
+ phi_(p.phi_),
+ maxBlockRadius_(p.maxBlockRadius_),
+ maxTreeRadius_(p.maxTreeRadius_),
+ pFastHeuristic_(p.pFastHeuristic_),
+ maxIterations_(p.maxIterations_),
+ stopAfterNBadIterations_(p.stopAfterNBadIterations_),
+ maxBlockSize_(p.maxBlockSize_),
+ treeRuns_(p.treeRuns_)
+ {
+
+ }
+
// subsolver used for submodel ("ad3" or "astar" so far)
std::string solver_;
/// phi of the truncated geometric distribution is used to select a certain subgraph radius with a certain probability
diff --git a/include/opengm/inference/lp_inference_base.hxx b/include/opengm/inference/lp_inference_base.hxx
index 36fc750..cf47706 100644
--- a/include/opengm/inference/lp_inference_base.hxx
+++ b/include/opengm/inference/lp_inference_base.hxx
@@ -1842,7 +1842,6 @@ inline LPInferenceBase<LP_INFERENCE_TYPE>::LPInferenceBase(const GraphicalModelT
if(!opengm::meta::Compare<OperatorType, opengm::Adder>::value) {
throw RuntimeError("This implementation does only supports Min-Sum-Semiring and Max-Sum-Semiring.");
}
-
// sort factors
sortFactors();
diff --git a/include/opengm/inference/lpcplex.hxx b/include/opengm/inference/lpcplex.hxx
index 28fafcf..d78f70f 100644
--- a/include/opengm/inference/lpcplex.hxx
+++ b/include/opengm/inference/lpcplex.hxx
@@ -45,6 +45,16 @@ public:
typedef visitors::EmptyVisitor<LPCplex<GM,ACC> > EmptyVisitorType;
typedef visitors::TimingVisitor<LPCplex<GM,ACC> > TimingVisitorType;
+ template<class _GM>
+ struct RebindGm{
+ typedef LPCplex<_GM, ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef LPCplex<_GM, _ACC > type;
+ };
+
// enum LP_SOLVER {LP_SOLVER_AUTO, LP_SOLVER_PRIMAL_SIMPLEX, LP_SOLVER_DUAL_SIMPLEX, LP_SOLVER_NETWORK_SIMPLEX, LP_SOLVER_BARRIER, LP_SOLVER_SIFTING, LP_SOLVER_CONCURRENT};
// enum LP_PRESOLVE{LP_PRESOLVE_AUTO, LP_PRESOLVE_OFF, LP_PRESOLVE_CONSEVATIVE, LP_PRESOLVE_AGRESSIVE};
@@ -52,9 +62,9 @@ public:
class Parameter {
public:
- bool integerConstraint_; // ILP=true, 1order-LP=false
- int numberOfThreads_; // number of threads (0=autosect)
- bool verbose_; // switch on/off verbode mode
+ bool integerConstraint_;// ILP=true, 1order-LP=false
+ int numberOfThreads_; // number of threads (0=autosect)
+ bool verbose_; // switch on/off verbode mode
double cutUp_; // upper cutoff
double epOpt_; // Optimality tolerance
double epMrk_; // Markowitz tolerance
@@ -65,25 +75,62 @@ public:
double workMem_; // maximal ammount of memory in MB used for workspace
double treeMemoryLimit_; // maximal ammount of memory in MB used for treee
double timeLimit_; // maximal time in seconds the solver has
- int probeingLevel_;
+ int probeingLevel_;
//int coverCutLevel_;
//int disjunctiverCutLevel_;
//int cliqueCutLevel_;
//int MIRCutLevel_;
- LP_SOLVER rootAlg_;
- LP_SOLVER nodeAlg_;
- LP_PRESOLVE presolve_;
+ LP_SOLVER rootAlg_;
+ LP_SOLVER nodeAlg_;
+ LP_PRESOLVE presolve_;
MIP_EMPHASIS mipEmphasis_;
- MIP_CUT cutLevel_; // Determines whether or not to cuts for the problem and how aggressively (will be overruled by specific ones).
- MIP_CUT cliqueCutLevel_; // Determines whether or not to generate clique cuts for the problem and how aggressively.
- MIP_CUT coverCutLevel_; // Determines whether or not to generate cover cuts for the problem and how aggressively.
- MIP_CUT gubCutLevel_; // Determines whether or not to generate generalized upper bound (GUB) cuts for the problem and how aggressively.
- MIP_CUT mirCutLevel_; // Determines whether or not mixed integer rounding (MIR) cuts should be generated for the problem and how aggressively.
- MIP_CUT iboundCutLevel_; // Determines whether or not to generate implied bound cuts for the problem and how aggressively.
- MIP_CUT flowcoverCutLevel_; //Determines whether or not to generate flow cover cuts for the problem and how aggressively.
- MIP_CUT flowpathCutLevel_; //Determines whether or not to generate flow path cuts for the problem and how aggressively.
- MIP_CUT disjunctCutLevel_; // Determines whether or not to generate disjunctive cuts for the problem and how aggressively.
- MIP_CUT gomoryCutLevel_; // Determines whether or not to generate gomory fractional cuts for the problem and how aggressively.
+ MIP_CUT cutLevel_; // Determines whether or not to cuts for the problem and how aggressively (will be overruled by specific ones).
+ MIP_CUT cliqueCutLevel_; // Determines whether or not to generate clique cuts for the problem and how aggressively.
+ MIP_CUT coverCutLevel_; // Determines whether or not to generate cover cuts for the problem and how aggressively.
+ MIP_CUT gubCutLevel_; // Determines whether or not to generate generalized upper bound (GUB) cuts for the problem and how aggressively.
+ MIP_CUT mirCutLevel_; // Determines whether or not mixed integer rounding (MIR) cuts should be generated for the problem and how aggressively.
+ MIP_CUT iboundCutLevel_; // Determines whether or not to generate implied bound cuts for the problem and how aggressively.
+ MIP_CUT flowcoverCutLevel_; //Determines whether or not to generate flow cover cuts for the problem and how aggressively.
+ MIP_CUT flowpathCutLevel_; //Determines whether or not to generate flow path cuts for the problem and how aggressively.
+ MIP_CUT disjunctCutLevel_; // Determines whether or not to generate disjunctive cuts for the problem and how aggressively.
+ MIP_CUT gomoryCutLevel_; // Determines whether or not to generate gomory fractional cuts for the problem and how aggressively.
+
+ template<class P>
+ Parameter(
+ const P & p
+ ):
+
+ integerConstraint_(p.integerConstraint_),
+ numberOfThreads_(p.numberOfThreads_),
+ verbose_(p.verbose_),
+ cutUp_(p.cutUp_),
+ epOpt_(p.epOpt_),
+ epMrk_(p.epMrk_),
+ epRHS_(p.epRHS_),
+ epInt_(p.epInt_),
+ epAGap_(p.epAGap_),
+ epGap_(p.epGap_),
+ workMem_(p.workMem_),
+ treeMemoryLimit_(p.treeMemoryLimit_),
+ timeLimit_(p.timeLimit_),
+ probeingLevel_(p.probeingLevel_),
+ rootAlg_(p.rootAlg_),
+ nodeAlg_(p.nodeAlg_),
+ presolve_(p.presolve_),
+ mipEmphasis_(p.mipEmphasis_),
+ cutLevel_(p.cutLevel_),
+ cliqueCutLevel_(p.cliqueCutLevel_),
+ coverCutLevel_(p.coverCutLevel_),
+ gubCutLevel_(p.gubCutLevel_),
+ mirCutLevel_(p.mirCutLevel_),
+ iboundCutLevel_(p.iboundCutLevel_),
+ flowcoverCutLevel_(p.flowcoverCutLevel_),
+ flowpathCutLevel_(p.flowpathCutLevel_),
+ disjunctCutLevel_(p.disjunctCutLevel_),
+ gomoryCutLevel_(p.gomoryCutLevel_)
+ {
+
+ }
/// constructor
/// \param cutUp upper cutoff - assume that: min_x f(x) <= cutUp
@@ -435,7 +482,7 @@ LPCplex<GM, ACC>::infer
break;
}
- // MIP EMPHASIS
+ // MIP EMPHASIS
switch(parameter_.mipEmphasis_) {
case MIP_EMPHASIS_BALANCED:
cplex_.setParam(IloCplex::MIPEmphasis, 0);
@@ -456,11 +503,11 @@ LPCplex<GM, ACC>::infer
// verbose options
if(parameter_.verbose_ == false) {
- cplex_.setParam(IloCplex::MIPDisplay, 0);
- cplex_.setParam(IloCplex::BarDisplay, 0);
- cplex_.setParam(IloCplex::SimDisplay, 0);
- cplex_.setParam(IloCplex::NetDisplay, 0);
- cplex_.setParam(IloCplex::SiftDisplay, 0);
+ cplex_.setParam(IloCplex::MIPDisplay, 0);
+ cplex_.setParam(IloCplex::BarDisplay, 0);
+ cplex_.setParam(IloCplex::SimDisplay, 0);
+ cplex_.setParam(IloCplex::NetDisplay, 0);
+ cplex_.setParam(IloCplex::SiftDisplay, 0);
}
// tolarance settings
@@ -508,12 +555,13 @@ LPCplex<GM, ACC>::infer
//cplex_.setParam(IloCplex::MIRCuts, parameter_.MIRCutLevel_);
// solve problem
+
if(!cplex_.solve()) {
std::cout << "failed to optimize. " <<cplex_.getStatus() << std::endl;
inferenceStarted_ = 0;
return UNKNOWN;
}
- cplex_.getValues(sol_, x_);
+ cplex_.getValues(sol_, x_);
}
catch(IloCplex::Exception e) {
std::cout << "caught CPLEX exception: " << e << std::endl;
diff --git a/include/opengm/inference/lpcplex2.hxx b/include/opengm/inference/lpcplex2.hxx
index 8769055..b2a5d20 100644
--- a/include/opengm/inference/lpcplex2.hxx
+++ b/include/opengm/inference/lpcplex2.hxx
@@ -25,6 +25,16 @@ public:
// public member functions
virtual std::string name() const;
+
+ template<class _GM>
+ struct RebindGm{
+ typedef LPCplex2<_GM, ACC_TYPE> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef LPCplex2<_GM, _ACC > type;
+ };
};
template<class GM_TYPE, class ACC_TYPE>
diff --git a/include/opengm/inference/lpgurobi.hxx b/include/opengm/inference/lpgurobi.hxx
index 1eab43f..671f340 100644
--- a/include/opengm/inference/lpgurobi.hxx
+++ b/include/opengm/inference/lpgurobi.hxx
@@ -46,11 +46,21 @@ public:
typedef visitors::EmptyVisitor< LPGurobi<GM, ACC> > EmptyVisitorType;
+ template<class _GM>
+ struct RebindGm{
+ typedef LPGurobi<_GM, ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef LPGurobi<_GM, _ACC > type;
+ };
+
class Parameter {
public:
- bool integerConstraint_; // ILP=true, 1order-LP=false
- int numberOfThreads_; // number of threads (0=autosect)
- bool verbose_; // switch on/off verbode mode
+ bool integerConstraint_;// ILP=true, 1order-LP=false
+ int numberOfThreads_; // number of threads (0=autosect)
+ bool verbose_; // switch on/off verbode mode
double cutUp_; // upper cutoff
double epOpt_; // Optimality tolerance
double epMrk_; // Markowitz tolerance
@@ -61,26 +71,62 @@ public:
double workMem_; // maximal ammount of memory in MB used for workspace
double treeMemoryLimit_; // maximal ammount of memory in MB used for treee
double timeLimit_; // maximal time in seconds the solver has
- int probeingLevel_;
+ int probeingLevel_;
//int coverCutLevel_;
//int disjunctiverCutLevel_;
//int cliqueCutLevel_;
//int MIRCutLevel_;
//int presolveLevel_;
- LP_SOLVER rootAlg_;
- LP_SOLVER nodeAlg_;
+ LP_SOLVER rootAlg_;
+ LP_SOLVER nodeAlg_;
MIP_EMPHASIS mipFocus_;
- LP_PRESOLVE presolve_;
- MIP_CUT cutLevel_; // Determines whether or not to cuts for the problem and how aggressively (will be overruled by specific ones).
- MIP_CUT cliqueCutLevel_; // Determines whether or not to generate clique cuts for the problem and how aggressively.
- MIP_CUT coverCutLevel_; // Determines whether or not to generate cover cuts for the problem and how aggressively.
- MIP_CUT gubCutLevel_; // Determines whether or not to generate generalized upper bound (GUB) cuts for the problem and how aggressively.
- MIP_CUT mirCutLevel_; // Determines whether or not mixed integer rounding (MIR) cuts should be generated for the problem and how aggressively.
- MIP_CUT iboundCutLevel_; // Determines whether or not to generate implied bound cuts for the problem and how aggressively.
- MIP_CUT flowcoverCutLevel_; //Determines whether or not to generate flow cover cuts for the problem and how aggressively.
- MIP_CUT flowpathCutLevel_; //Determines whether or not to generate flow path cuts for the problem and how aggressively.
- MIP_CUT disjunctCutLevel_; // Determines whether or not to generate disjunctive cuts for the problem and how aggressively.
- MIP_CUT gomoryCutLevel_; // Determines whether or not to generate gomory fractional cuts for the problem and how aggressively.
+ LP_PRESOLVE presolve_;
+ MIP_CUT cutLevel_; // Determines whether or not to cuts for the problem and how aggressively (will be overruled by specific ones).
+ MIP_CUT cliqueCutLevel_; // Determines whether or not to generate clique cuts for the problem and how aggressively.
+ MIP_CUT coverCutLevel_; // Determines whether or not to generate cover cuts for the problem and how aggressively.
+ MIP_CUT gubCutLevel_; // Determines whether or not to generate generalized upper bound (GUB) cuts for the problem and how aggressively.
+ MIP_CUT mirCutLevel_; // Determines whether or not mixed integer rounding (MIR) cuts should be generated for the problem and how aggressively.
+ MIP_CUT iboundCutLevel_; // Determines whether or not to generate implied bound cuts for the problem and how aggressively.
+ MIP_CUT flowcoverCutLevel_; //Determines whether or not to generate flow cover cuts for the problem and how aggressively.
+ MIP_CUT flowpathCutLevel_; //Determines whether or not to generate flow path cuts for the problem and how aggressively.
+ MIP_CUT disjunctCutLevel_; // Determines whether or not to generate disjunctive cuts for the problem and how aggressively.
+ MIP_CUT gomoryCutLevel_; // Determines whether or not to generate gomory fractional cuts for the problem and how aggressively.
+
+
+ template<class P>
+ Parameter(const P & p )
+ :
+ integerConstraint_(p.integerConstraint_),
+ numberOfThreads_(p.numberOfThreads_),
+ verbose_(p.verbose_),
+ cutUp_(p.cutUp_),
+ epOpt_(p.epOpt_),
+ epMrk_(p.epMrk_),
+ epRHS_(p.epRHS_),
+ epInt_(p.epInt_),
+ epAGap_(p.epAGap_),
+ epGap_(p.epGap_),
+ workMem_(p.workMem_),
+ treeMemoryLimit_(p.treeMemoryLimit_),
+ timeLimit_(p.timeLimit_),
+ probeingLevel_(p.probeingLevel_),
+ rootAlg_(p.rootAlg_),
+ nodeAlg_(p.nodeAlg_),
+ mipFocus_(p.mipFocus_),
+ presolve_(p.presolve_),
+ cutLevel_(p.cutLevel_),
+ cliqueCutLevel_(p.cliqueCutLevel_),
+ coverCutLevel_(p.coverCutLevel_),
+ gubCutLevel_(p.gubCutLevel_),
+ mirCutLevel_(p.mirCutLevel_),
+ iboundCutLevel_(p.iboundCutLevel_),
+ flowcoverCutLevel_(p.flowcoverCutLevel_),
+ flowpathCutLevel_(p.flowpathCutLevel_),
+ disjunctCutLevel_(p.disjunctCutLevel_),
+ gomoryCutLevel_(p.gomoryCutLevel_)
+ {
+
+ }
/// constructor
/// \param cutUp upper cutoff - assume that: min_x f(x) <= cutUp
diff --git a/include/opengm/inference/lpgurobi2.hxx b/include/opengm/inference/lpgurobi2.hxx
index a9c1a9c..192b038 100644
--- a/include/opengm/inference/lpgurobi2.hxx
+++ b/include/opengm/inference/lpgurobi2.hxx
@@ -25,6 +25,16 @@ public:
// public member functions
virtual std::string name() const;
+
+ template<class _GM>
+ struct RebindGm{
+ typedef LPGurobi2<_GM, ACC_TYPE> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef LPGurobi2<_GM, _ACC > type;
+ };
};
template<class GM_TYPE, class ACC_TYPE>
diff --git a/include/opengm/inference/lsatr.hxx b/include/opengm/inference/lsatr.hxx
index 1ddfa42..2772356 100644
--- a/include/opengm/inference/lsatr.hxx
+++ b/include/opengm/inference/lsatr.hxx
@@ -97,7 +97,19 @@ namespace opengm {
typedef opengm::visitors::VerboseVisitor<LSA_TR<GM,ACC> > VerboseVisitorType;
typedef opengm::visitors::EmptyVisitor<LSA_TR<GM,ACC> > EmptyVisitorType;
typedef opengm::visitors::TimingVisitor<LSA_TR<GM,ACC> > TimingVisitorType;
-
+
+
+ template<class _GM>
+ struct RebindGm{
+ typedef LSA_TR<_GM, ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef LSA_TR<_GM, _ACC > type;
+ };
+
+
class Parameter {
public:
enum DISTANCE {HAMMING, EUCLIDEAN};
@@ -118,6 +130,18 @@ namespace opengm {
reductionRatio_ = 0.25; // used to decide whether to increase or decrease lambda using the multiplier
distance_ = EUCLIDEAN;
}
+
+ template<class P>
+ Parameter(const P & p)
+ : randSeed_(p.randSeed_),
+ maxLambda_(p.maxLambda_),
+ initialLambda_(p.initialLambda_),
+ precisionLambda_(p.precisionLambda_),
+ lambdaMultiplier_(p.lambdaMultiplier_),
+ reductionRatio_(p.reductionRatio_),
+ distance_(p.distance_){
+
+ }
};
LSA_TR(const GraphicalModelType&);
diff --git a/include/opengm/inference/messagepassing/messagepassing.hxx b/include/opengm/inference/messagepassing/messagepassing.hxx
index d54c332..56a854b 100644
--- a/include/opengm/inference/messagepassing/messagepassing.hxx
+++ b/include/opengm/inference/messagepassing/messagepassing.hxx
@@ -64,6 +64,21 @@ public:
/// Visitor
typedef visitors::EmptyVisitor<MessagePassing<GM, ACC, UPDATE_RULES, DIST> > EmptyVisitorType;
+
+ template<class _GM>
+ struct RebindGm{
+ typedef typename UPDATE_RULES:: template RebindGm<_GM>::type UR;
+ typedef MessagePassing<_GM, ACC, UR, DIST> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef typename UPDATE_RULES:: template RebindGmAndAcc<_GM,_ACC>::type UR;
+ typedef MessagePassing<_GM, _ACC, UR, DIST> type;
+ };
+
+
+
struct Parameter {
typedef typename UPDATE_RULES::SpecialParameterType SpecialParameterType;
Parameter
@@ -82,6 +97,21 @@ public:
specialParameter_(specialParameter),
isAcyclic_(isAcyclic)
{}
+
+ template<class P>
+ Parameter
+ (
+ const P & p
+ )
+ : maximumNumberOfSteps_(p.maximumNumberOfSteps_),
+ bound_(p.bound_),
+ damping_(p.damping_),
+ inferSequential_(p.inferSequential_),
+ useNormalization_(p.useNormalization_),
+ specialParameter_(p.specialParameter_),
+ isAcyclic_(p.isAcyclic_)
+ {}
+
size_t maximumNumberOfSteps_;
ValueType bound_;
diff --git a/include/opengm/inference/messagepassing/messagepassing_bp.hxx b/include/opengm/inference/messagepassing/messagepassing_bp.hxx
index 4146588..c76d74f 100644
--- a/include/opengm/inference/messagepassing/messagepassing_bp.hxx
+++ b/include/opengm/inference/messagepassing/messagepassing_bp.hxx
@@ -87,6 +87,17 @@ namespace opengm {
typedef VariableHullBP<GM, BufferType, OperatorType, ACC> VariableHullType;
typedef meta::EmptyType SpecialParameterType;
+ template<class _GM>
+ struct RebindGm{
+ typedef BeliefPropagationUpdateRules<_GM, ACC, BUFFER> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef BeliefPropagationUpdateRules<_GM, _ACC, BUFFER> type;
+ };
+
+
template<class MP_PARAM>
static void initializeSpecialParameter(const GM& gm, MP_PARAM& mpParameter)
{}
diff --git a/include/opengm/inference/messagepassing/messagepassing_trbp.hxx b/include/opengm/inference/messagepassing/messagepassing_trbp.hxx
index 92b8de7..9d16fbb 100644
--- a/include/opengm/inference/messagepassing/messagepassing_trbp.hxx
+++ b/include/opengm/inference/messagepassing/messagepassing_trbp.hxx
@@ -82,6 +82,15 @@ namespace opengm {
typedef FactorHullTRBP<GM, BUFFER, OperatorType, ACC> FactorHullType;
typedef VariableHullTRBP<GM, BUFFER, OperatorType, ACC> VariableHullType;
typedef std::vector<ValueType> SpecialParameterType;
+ template<class _GM>
+ struct RebindGm{
+ typedef TrbpUpdateRules<_GM, ACC, BUFFER> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef TrbpUpdateRules<_GM, _ACC, BUFFER> type;
+ };
template<class MP_PARAM>
static void initializeSpecialParameter(const GM& gm,MP_PARAM& mpParameter) {
diff --git a/include/opengm/inference/movemaker.hxx b/include/opengm/inference/movemaker.hxx
index caf3478..468115c 100644
--- a/include/opengm/inference/movemaker.hxx
+++ b/include/opengm/inference/movemaker.hxx
@@ -32,6 +32,14 @@ public:
typedef opengm::GraphicalModel<ValueType, OperatorType, FunctionTypeList, SubGmSpace> SubGmType;
/// \endcond
+
+ template<class _GM>
+ struct RebindGm{
+ typedef Movemaker<_GM> type;
+ };
+
+
+
Movemaker(const GraphicalModelType&);
template<class StateIterator>
Movemaker(const GraphicalModelType&, StateIterator);
diff --git a/include/opengm/inference/mqpbo.hxx b/include/opengm/inference/mqpbo.hxx
index c1fd399..44ee5e0 100644
--- a/include/opengm/inference/mqpbo.hxx
+++ b/include/opengm/inference/mqpbo.hxx
@@ -46,11 +46,36 @@ namespace opengm {
typedef visitors::TimingVisitor<MQPBO<GM, ACC> > TimingVisitorType;
typedef ValueType GraphValueType;
+ template<class _GM>
+ struct RebindGm{
+ typedef MQPBO<_GM, ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef MQPBO<_GM, _ACC > type;
+ };
+
+
+
enum PermutationType {NONE, RANDOM, MINMARG};
class Parameter{
public:
Parameter(): useKovtunsMethod_(true), probing_(false), strongPersistency_(false), rounds_(0), permutationType_(NONE) {};
+
+
+ template<class P>
+ Parameter(const P & p)
+ : label_(p.label_),
+ useKovtunsMethod_(p.useKovtunsMethod_),
+ probing_(p.probing_),
+ strongPersistency_(p.strongPersistency_),
+ rounds_(p.rounds_),
+ permutationType_(p.permutationType_){
+
+ }
+
std::vector<LabelType> label_;
bool useKovtunsMethod_;
const bool probing_; //do not use this!
@@ -164,12 +189,12 @@ namespace opengm {
}
if(param_.rounds_>0){
- std::cout << "Large" <<std::endl;
+ //std::cout << "Large" <<std::endl;
qpbo_ = new kolmogorov::qpbo::QPBO<GraphValueType > (numNodes_, numEdges_); // max number of nodes & edges
qpbo_->AddNode(numNodes_);
}
else{
- std::cout << "Small" <<std::endl;
+ //std::cout << "Small" <<std::endl;
qpbo_ = new kolmogorov::qpbo::QPBO<GraphValueType > (gm_.numberOfVariables(), numSOF); // max number of nodes & edges
qpbo_->AddNode(gm_.numberOfVariables());
}
@@ -820,7 +845,7 @@ namespace opengm {
if(param_.useKovtunsMethod_){
if(isPotts){
- std::cout << "Use Kovtuns method for potts"<<std::endl;
+ //std::cout << "Use Kovtuns method for potts"<<std::endl;
for(LabelType l=0; l<maxNumberOfLabels; ++l) {
testQuess(l);
double xoptimality = optimality();
diff --git a/include/opengm/inference/multicut.hxx b/include/opengm/inference/multicut.hxx
index 7933d17..196e15c 100644
--- a/include/opengm/inference/multicut.hxx
+++ b/include/opengm/inference/multicut.hxx
@@ -84,7 +84,15 @@ public:
typedef visitors::VerboseVisitor<Multicut<GM,ACC> > VerboseVisitorType;
typedef visitors::EmptyVisitor<Multicut<GM,ACC> > EmptyVisitorType;
typedef visitors::TimingVisitor<Multicut<GM,ACC> > TimingVisitorType;
+ template<class _GM>
+ struct RebindGm{
+ typedef Multicut<_GM, ACC> type;
+ };
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef Multicut<_GM, _ACC > type;
+ };
#ifdef WITH_BOOST
typedef boost::unordered_map<IndexType, LPIndexType> EdgeMapType;
@@ -120,6 +128,7 @@ public:
bool useBufferedStates_;
bool initializeWith3Cycles_;
+
/// \param numThreads number of threads that should be used (default = 0 [automatic])
/// \param cutUp value which the optima at least has (helps to cut search-tree)
Parameter
@@ -127,9 +136,18 @@ public:
int numThreads=0,
double cutUp=1.0e+75
)
- : numThreads_(numThreads), verbose_(false),verboseCPLEX_(false), cutUp_(cutUp),
- timeOut_(36000000), maximalNumberOfConstraintsPerRound_(1000000),
- edgeRoundingValue_(0.00000001),MWCRounding_(NEAREST), reductionMode_(3),useOldPriorityQueue_(false), useChordalSearch_(false), useBufferedStates_(false),
+ : numThreads_(numThreads),
+ verbose_(false),
+ verboseCPLEX_(false),
+ cutUp_(cutUp),
+ timeOut_(36000000),
+ maximalNumberOfConstraintsPerRound_(1000000),
+ edgeRoundingValue_(0.00000001),
+ MWCRounding_(NEAREST),
+ reductionMode_(3),
+ useOldPriorityQueue_(false),
+ useChordalSearch_(false),
+ useBufferedStates_(false),
initializeWith3Cycles_(false)
{};
@@ -138,10 +156,17 @@ public:
(
const OTHER_PARAM & p
)
- : numThreads_(p.numThreads_), verbose_(p.verbose_),verboseCPLEX_(p.verboseCPLEX_), cutUp_(p.cutUp_),
- timeOut_(p.timeOut_), maximalNumberOfConstraintsPerRound_(p.maximalNumberOfConstraintsPerRound_),
- edgeRoundingValue_(p.edgeRoundingValue_),MWCRounding_(p.MWCRounding_), reductionMode_(p.reductionMode_),
- useOldPriorityQueue_(p.useOldPriorityQueue_), useChordalSearch_(p.useChordalSearch_),
+ : numThreads_(p.numThreads_),
+ verbose_(p.verbose_),
+ verboseCPLEX_(p.verboseCPLEX_),
+ cutUp_(p.cutUp_),
+ timeOut_(p.timeOut_),
+ maximalNumberOfConstraintsPerRound_(p.maximalNumberOfConstraintsPerRound_),
+ edgeRoundingValue_(p.edgeRoundingValue_),
+ MWCRounding_(p.MWCRounding_),
+ reductionMode_(p.reductionMode_),
+ useOldPriorityQueue_(p.useOldPriorityQueue_),
+ useChordalSearch_(p.useChordalSearch_),
initializeWith3Cycles_(false)
{};
};
diff --git a/include/opengm/inference/partition-move.hxx b/include/opengm/inference/partition-move.hxx
index 0dc49a7..b508356 100644
--- a/include/opengm/inference/partition-move.hxx
+++ b/include/opengm/inference/partition-move.hxx
@@ -53,10 +53,23 @@ public:
typedef __gnu_cxx::hash_map<IndexType, LPIndexType> EdgeMapType;
typedef __gnu_cxx::hash_set<IndexType> VariableSetType;
#endif
-
+
+
+ template<class _GM>
+ struct RebindGm{
+ typedef PartitionMove<_GM, ACC> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef PartitionMove<_GM, _ACC > type;
+ };
+
struct Parameter{
Parameter ( ) {};
+ template<class P>
+ Parameter (const P & p) {};
};
~PartitionMove();
diff --git a/include/opengm/inference/qpbo.hxx b/include/opengm/inference/qpbo.hxx
index b24d2bf..b31571c 100644
--- a/include/opengm/inference/qpbo.hxx
+++ b/include/opengm/inference/qpbo.hxx
@@ -26,7 +26,22 @@ public:
typedef visitors::TimingVisitor<QPBO<GM,MIN_ST_CUT> > TimingVisitorType;
typedef visitors::EmptyVisitor<QPBO<GM,MIN_ST_CUT> > EmptyVisitorType;
- struct Parameter {};
+ template<class _GM>
+ struct RebindGm{
+ typedef QPBO<_GM, MIN_ST_CUT> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef QPBO<_GM ,MIN_ST_CUT> type;
+ };
+
+
+ struct Parameter{
+ Parameter ( ) {};
+ template<class P>
+ Parameter (const P & p) {};
+ };
QPBO(const GraphicalModelType&, Parameter = Parameter());
std::string name() const;
diff --git a/include/opengm/inference/reducedinference.hxx b/include/opengm/inference/reducedinference.hxx
index 16baad6..c533fa6 100644
--- a/include/opengm/inference/reducedinference.hxx
+++ b/include/opengm/inference/reducedinference.hxx
@@ -39,13 +39,13 @@ namespace opengm {
typedef typename GM::OperatorType OperatorType;
typedef DiscreteSpace<IndexType, LabelType> SpaceType;
- typedef typename meta::TypeListGenerator< ViewFixVariablesFunction<GM>,
- ViewFunction<GM>,
+ typedef typename meta::TypeListGenerator< ViewFixVariablesFunction<GM>,
+ ViewFunction<GM>,
ConstantFunction<ValueType, IndexType, LabelType>,
ExplicitFunction<ValueType, IndexType, LabelType>
>::type FunctionTypeList;
typedef GraphicalModel<ValueType, OperatorType, FunctionTypeList, SpaceType> InfGmType;
- };
+ };
//! [class reducedinference]
/// Reduced Inference
@@ -59,7 +59,7 @@ namespace opengm {
///
/// additional to the CVPR-Paper
/// * the complete code is refactort - parts of the code are moved to graphicalmodel_manipulator.hxx
- /// * higher order models are supported
+ /// * higher order models are supported
///
/// it requires:
/// * external-qpbo
@@ -84,6 +84,21 @@ namespace opengm {
typedef visitors::TimingVisitor<ReducedInference<GM, ACC, INF> > TimingVisitorType;
+ template<class _GM>
+ struct RebindGm{
+ typedef typename ReducedInferenceHelper<_GM>::InfGmType RebindedInfGmType;
+ typedef typename INF:: template RebindGm<RebindedInfGmType>::type RebindedInf;
+ typedef ReducedInference<_GM, ACC, RebindedInf> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef typename ReducedInferenceHelper<_GM>::InfGmType RebindedInfGmType;
+ typedef typename INF:: template RebindGmAndAcc<RebindedInfGmType,_ACC>::type RebindedInf;
+ typedef ReducedInference<_GM,_ACC, RebindedInf> type;
+ };
+
+
class Parameter
{
public:
@@ -91,6 +106,17 @@ namespace opengm {
bool Persistency_;
bool Tentacle_;
bool ConnectedComponents_;
+
+
+ template<class P>
+ Parameter(const P & p)
+ : subParameter_(p.subParameter_),
+ Persistency_(p.Persistency_),
+ Tentacle_(p.Tentacle_),
+ ConnectedComponents_(p.ConnectedComponents_)
+ {
+ }
+
Parameter(
const bool Persistency=false,
const bool Tentacle=false,
@@ -111,29 +137,29 @@ namespace opengm {
std::string name() const;
const GmType& graphicalModel() const;
InferenceTermination infer();
- typename GM::ValueType bound() const;
+ typename GM::ValueType bound() const;
template<class VisitorType>
InferenceTermination infer(VisitorType&);
virtual InferenceTermination arg(std::vector<LabelType>&, const size_t = 1) const ;
typename GM::ValueType value() const;
-
+
private:
//typedef typename ReducedInferenceHelper<GM>::InfGmType GM2;
//typedef external::QPBO<GM> QPBO;
-
+
//// typedef Partition<IndexType> Set;
//typedef disjoint_set<IndexType> Set;
//typedef opengm::DynamicProgramming<GM2,AccumulationType> dynP;
//typedef modelTrees<GM2> MT;
-
- const GmType& gm_;
-
- Parameter param_;
+
+ const GmType& gm_;
+
+ Parameter param_;
ValueType bound_;
ValueType value_;
- std::vector<LabelType> state_;
+ std::vector<LabelType> state_;
void getPartialOptimalityByQPBO(std::vector<LabelType>&, std::vector<bool>&);
void getPartialOptimalityByFixsHOQPBO(std::vector<LabelType>&, std::vector<bool>&);
@@ -143,12 +169,12 @@ namespace opengm {
void setPartialOptimality(std::vector<LabelType>&, std::vector<bool>&);
void subinf(const typename ReducedInferenceHelper<GM>::InfGmType&,const bool,std::vector<LabelType>&, typename GM::ValueType&, typename GM::ValueType&);
-
+
//std::vector<bool> variableOpt_;
//std::vector<bool> factorOpt_;
//ValueType const_;
//std::vector<IndexType> model2gm_;
-
+
//void updateFactorOpt(std::vector<ExplicitFunction<ValueType,IndexType,LabelType> >&);
//void getConnectComp(std::vector< std::vector<IndexType> >&, std::vector<GM2>&, std::vector<ExplicitFunction<ValueType,IndexType,LabelType> >&, bool );
//void getTentacle(std::vector< std::vector<IndexType> >&, std::vector<IndexType>&, std::vector< std::vector<ValueType> >&, std::vector< std::vector<std::vector<LabelType> > >&, std::vector< std::vector<IndexType> >&, std::vector<ExplicitFunction<ValueType,IndexType,LabelType> >& );
@@ -167,8 +193,8 @@ namespace opengm {
)
: gm_( gm ),
param_(parameter)
- {
-
+ {
+
ACC::ineutral(bound_);
OperatorType::neutral(value_);
state_.resize(gm.numberOfVariables(),0);
@@ -190,7 +216,7 @@ namespace opengm {
binary = false;
}
}
-
+
for(IndexType j = 0; j < gm_.numberOfFactors(); ++j) {
if(potts && gm_[j].numberOfVariables() >1 && (gm_[j].numberOfVariables() > 3 || !gm_[j].isPotts() ) )
potts=false;
@@ -198,7 +224,7 @@ namespace opengm {
order = gm_[j].numberOfVariables();
}
}
-
+
if(binary){
if(order<=2)
getPartialOptimalityByQPBO(arg,opt);
@@ -214,22 +240,22 @@ namespace opengm {
throw RuntimeError("This implementation of Reduced Inference supports no higher order multi-label problems.");
}
}
-
+
template<class GM, class ACC, class INF>
void ReducedInference<GM,ACC,INF>::getPartialOptimalityByQPBO(std::vector<LabelType>& arg, std::vector<bool>& opt)
- {
+ {
typedef external::QPBO<GM> QPBO;
typename QPBO::Parameter paraQPBO;
- paraQPBO.strongPersistency_=false;
+ paraQPBO.strongPersistency_=false;
QPBO qpbo(gm_,paraQPBO);
qpbo.infer();
qpbo.arg(arg);
- qpbo.partialOptimality(opt);
+ qpbo.partialOptimality(opt);
bound_=qpbo.bound();
- }
+ }
template<class GM, class ACC, class INF>
void ReducedInference<GM,ACC,INF>::getPartialOptimalityByFixsHOQPBO(std::vector<LabelType>& arg, std::vector<bool>& opt)
- {
+ {
const size_t maxOrder = 10;
ValueType constV = 0;
HigherOrderEnergy<ValueType, maxOrder> hoe;
@@ -248,16 +274,18 @@ namespace opengm {
hoe.AddUnaryTerm(var, e1 - e0);
} else {
unsigned int numAssignments = 1 << size;
- ValueType coeffs[numAssignments];
+ AutoDeleteArray<ValueType> coeffs_array(new ValueType[numAssignments]);
+ ValueType * coeffs = coeffs_array.get();
for (unsigned int subset = 1; subset < numAssignments; ++subset) {
coeffs[subset] = 0;
}
- // For each boolean assignment, get the clique energy at the
+ // For each boolean assignment, get the clique energy at the
// corresponding labeling
- LabelType cliqueLabels[size];
+ AutoDeleteArray<LabelType> cliqueLabels_array(new LabelType[size]);
+ LabelType * cliqueLabels = cliqueLabels_array.get();
for(unsigned int assignment = 0; assignment < numAssignments; ++assignment){
for (unsigned int i = 0; i < size; ++i) {
- if (assignment & (1 << i)) {
+ if (assignment & (1 << i)) {
cliqueLabels[i] = l1;
} else {
cliqueLabels[i] = l0;
@@ -288,11 +316,11 @@ namespace opengm {
hoe.AddTerm(coeffs[subset], degree, vars);
}
}
- }
- kolmogorov::qpbo::QPBO<ValueType> qr(gm_.numberOfVariables(), 0);
+ }
+ kolmogorov::qpbo::QPBO<ValueType> qr(gm_.numberOfVariables(), 0);
hoe.ToQuadratic(qr);
qr.Solve();
-
+
for (IndexType i = 0; i < gm_.numberOfVariables(); ++i) {
int label = qr.GetLabel(i);
if(label == 0 ){
@@ -302,24 +330,24 @@ namespace opengm {
else if(label == 1){
arg[i] = 1;
opt[i] = true;
- }
+ }
else{
arg[i] = 0;
opt[i] = false;
}
- }
+ }
bound_ = constV + 0.5 * qr.ComputeTwiceLowerBound();
}
-
+
template<class GM, class ACC, class INF>
void ReducedInference<GM,ACC,INF>::getPartialOptimalityByMQPBO(std::vector<LabelType>& arg, std::vector<bool>& opt)
- {
+ {
typedef opengm::MQPBO<GM,ACC> MQPBOType;
- typename MQPBOType::Parameter mqpboPara;
+ typename MQPBOType::Parameter mqpboPara;
mqpboPara.useKovtunsMethod_ = false;
mqpboPara.strongPersistency_ = true;
mqpboPara.rounds_ = 10;
- mqpboPara.permutationType_ = MQPBOType::RANDOM;
+ mqpboPara.permutationType_ = MQPBOType::RANDOM;
MQPBOType mqpbo(gm_,mqpboPara);
mqpbo.infer();
arg.resize(gm_.numberOfVariables(),0);
@@ -328,15 +356,15 @@ namespace opengm {
opt[var] = mqpbo.partialOptimality(var,arg[var]);
}
}
-
+
template<class GM, class ACC, class INF>
void ReducedInference<GM,ACC,INF>::getPartialOptimalityByKovtunsMethod(std::vector<LabelType>& arg, std::vector<bool>& opt)
- {
+ {
typedef opengm::MQPBO<GM,ACC> MQPBOType;
typename MQPBOType::Parameter mqpboPara;
- mqpboPara.strongPersistency_ = true;
+ mqpboPara.strongPersistency_ = true;
MQPBOType mqpbo(gm_,mqpboPara);
- mqpbo.infer();
+ mqpbo.infer();
arg.resize(gm_.numberOfVariables(),0);
opt.resize(gm_.numberOfVariables(),false);
for(IndexType var=0; var<gm_.numberOfVariables(); ++var){
@@ -358,27 +386,27 @@ namespace opengm {
{
return gm_;
}
-
+
template<class GM, class ACC, class INF>
inline InferenceTermination
ReducedInference<GM,ACC,INF>::infer()
- {
+ {
EmptyVisitorType v;
return infer(v);
}
-
+
template<class GM, class ACC, class INF>
template<class VisitorType>
InferenceTermination ReducedInference<GM,ACC,INF>::infer
(
VisitorType& visitor
)
- {
+ {
visitor.begin(*this);
-
+
GraphicalModelManipulator<GM> gmm(gm_);
-
+
// Find persistency
size_t numFixedVars = 0;
if(param_.Persistency_ == true){
@@ -388,13 +416,13 @@ namespace opengm {
for(IndexType i=0; i<gm_.numberOfVariables(); ++i){
if(opt[i]){
++numFixedVars;
- gmm.fixVariable(i, arg[i]);
+ gmm.fixVariable(i, arg[i]);
}
}
- }
-
+ }
+
//std::cout << numFixedVars <<" of " <<gm_.numberOfVariables() << " are fixed."<<std::endl;
-
+
if(numFixedVars == gm_.numberOfVariables()){
gmm.lock();
std::vector<LabelType> arg(0);
@@ -404,14 +432,14 @@ namespace opengm {
visitor.end(*this);
return NORMAL;
}
-
+
if(param_.Tentacle_ == true){
//std::cout << " Search for tentacles." <<std::endl;
gmm.template lockAndTentacelElimination<ACC>();
}
else{
gmm.lock();
- }
+ }
if( visitor(*this) != visitors::VisitorReturnFlag::ContinueInf ) {
visitor.end(*this);
@@ -422,7 +450,7 @@ namespace opengm {
//ValueType sv, v;
ValueType sb, b, v;
OperatorType::neutral(sb);
- //OperatorType::neutral(sv);
+ //OperatorType::neutral(sv);
// CONNTECTED COMPONENTS INFERENCE
if(param_.ConnectedComponents_ == true){
@@ -430,7 +458,7 @@ namespace opengm {
std::vector<std::vector<LabelType> > args(gmm.numberOfSubmodels(),std::vector<LabelType>() );
for(size_t i=0; i<gmm.numberOfSubmodels(); ++i){
args[i].resize(gmm.getModifiedSubModel(i).numberOfVariables());
- }
+ }
for(size_t i=0; i<gmm.numberOfSubmodels(); ++i){
typename ReducedInferenceHelper<GM>::InfGmType agm = gmm.getModifiedSubModel(i);
subinf(agm, param_.Tentacle_, args[i],v,b);
@@ -450,7 +478,7 @@ namespace opengm {
return NORMAL;
}
//gmm.modifiedSubStates2OriginalState(args, state_);
-
+
}
else{
//size_t i=0;
@@ -458,9 +486,9 @@ namespace opengm {
gmm.buildModifiedModel();
typename ReducedInferenceHelper<GM>::InfGmType agm = gmm.getModifiedModel();
subinf(agm, param_.Tentacle_, arg,v,b);
- gmm.modifiedState2OriginalState(arg, state_);
+ gmm.modifiedState2OriginalState(arg, state_);
//visitor(*this,value(),bound(),"numberOfComp",i);
- //gmm.modifiedState2OriginalState(arg, state_);
+ //gmm.modifiedState2OriginalState(arg, state_);
bound_=b;
}
//value_=gm_.evaluate(state_);
@@ -479,11 +507,11 @@ namespace opengm {
typename GM::ValueType& bound
)
{
- //std::cout << "solve model with "<<agm.numberOfVariables()<<" and "<<agm.numberOfFactors()<<" factors."<<std::endl;
+ //std::cout << "solve model with "<<agm.numberOfVariables()<<" and "<<agm.numberOfFactors()<<" factors."<<std::endl;
InfType inf(agm, param_.subParameter_);
inf.infer();
arg.resize(agm.numberOfVariables());
- inf.arg(arg);
+ inf.arg(arg);
value = inf.value();
bound = inf.bound();
}
@@ -495,7 +523,7 @@ namespace opengm {
}
template<class GM, class ACC, class INF>
- typename GM::ValueType ReducedInference<GM,ACC,INF>::value() const {
+ typename GM::ValueType ReducedInference<GM,ACC,INF>::value() const {
return gm_.evaluate(state_);
}
diff --git a/include/opengm/inference/sat.hxx b/include/opengm/inference/sat.hxx
index 2fdbfc4..8515aaa 100644
--- a/include/opengm/inference/sat.hxx
+++ b/include/opengm/inference/sat.hxx
@@ -27,7 +27,23 @@ namespace opengm {
typedef GM GraphicalModelType;
OPENGM_GM_TYPE_TYPEDEFS;
- struct Parameter {};
+
+ template<class _GM>
+ struct RebindGm{
+ typedef SAT<_GM> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef SAT<_GM> type;
+ };
+
+
+ struct Parameter{
+ Parameter ( ) {};
+ template<class P>
+ Parameter (const P & p) {};
+ };
SAT(const GraphicalModelType&, const Parameter& = Parameter());
std::string name() const;
diff --git a/include/opengm/inference/self_fusion.hxx b/include/opengm/inference/self_fusion.hxx
index 7f260a3..e2f282d 100644
--- a/include/opengm/inference/self_fusion.hxx
+++ b/include/opengm/inference/self_fusion.hxx
@@ -72,7 +72,7 @@ struct FusionVisitor{
iteration_(0),
fuseNth_(fuseNth),
value_(value),
- bound_(bound),
+ bound_(bound),
argFromInf_(selfFusion.graphicalModel().numberOfVariables()),
argBest_(argBest),
argOut_(selfFusion.graphicalModel().numberOfVariables()),
@@ -280,9 +280,22 @@ public:
typedef INFERENCE ToFuseInferenceType;
enum FusionSolver{
- QpboFusion,
- CplexFusion,
- LazyFlipperFusion
+ QpboFusion=0,
+ CplexFusion=1,
+ LazyFlipperFusion=2
+ };
+
+
+ template<class _GM>
+ struct RebindGm{
+ typedef typename INFERENCE:: template RebindGm<_GM>::type RebindedInf;
+ typedef SelfFusion<RebindedInf> type;
+ };
+
+ template<class _GM,class _ACC>
+ struct RebindGmAndAcc{
+ typedef typename INFERENCE:: template RebindGmAndAcc<_GM, _ACC>::type RebindedInf;
+ typedef SelfFusion<RebindedInf> type;
};
@@ -311,6 +324,32 @@ public:
{
}
+
+ template<class P>
+ Parameter(
+ const P & p
+ )
+ : fuseNth_(p.fuseNth_),
+ fusionSolver_(),
+ infParam_(p.infParam_),
+ maxSubgraphSize_(p.maxSubgraphSize_),
+ reducedInf_(p.reducedInf_),
+ connectedComponents_(p.connectedComponents_),
+ tentacles_(p.tentacles_),
+ fusionTimeLimit_(p.fusionTimeLimit_),
+ numStopIt_(p.numStopIt_)
+ {
+ if(p.fusionSolver_ == 0){
+ fusionSolver_ = QpboFusion;
+ }
+ else if(p.fusionSolver_ == 1){
+ fusionSolver_ = CplexFusion;
+ }
+ else if(p.fusionSolver_ == 2){
+ fusionSolver_ = LazyFlipperFusion;
+ }
+ }
+
UInt64Type fuseNth_;
FusionSolver fusionSolver_;
typename INFERENCE::Parameter infParam_;
diff --git a/include/opengm/opengm.hxx b/include/opengm/opengm.hxx
index 17faa79..7b51032 100644
--- a/include/opengm/opengm.hxx
+++ b/include/opengm/opengm.hxx
@@ -36,7 +36,7 @@
/// runtime assertion
#ifdef NDEBUG
- #ifndef OPENGM_DEBUG
+ #ifndef OPENGM_DEBUG
#define OPENGM_ASSERT_OP(a,op,b) { }
#else
#define OPENGM_ASSERT_OP(a,op,b) \
@@ -108,8 +108,8 @@ struct RuntimeError
// abs function
template<class T>
-inline T abs(const T& x) {
- return x > 0 ? x : -x;
+inline T abs(const T& x) {
+ return x > 0 ? x : -x;
}
template<class T>
@@ -122,6 +122,33 @@ inline T opengmMin(const T& x, const T& y) {
return x <= y ? x : y;
}
+ // simple replacement for std::unique_ptr<T[]> which is not
+ // available everywhere
+template <class T>
+struct AutoDeleteArray
+{
+ T * data_;
+
+ AutoDeleteArray(T * data)
+ : data_(data)
+ {}
+
+ ~AutoDeleteArray()
+ {
+ delete[] data_;
+ }
+
+ T * get()
+ {
+ return data_;
+ }
+
+ T const * get() const
+ {
+ return data_;
+ }
+};
+
} // namespace opengm
#endif // #ifndef OPENGM_HXX
diff --git a/include/opengm/utilities/shape_accessor.hxx b/include/opengm/utilities/shape_accessor.hxx
index 5541832..6d9b45e 100644
--- a/include/opengm/utilities/shape_accessor.hxx
+++ b/include/opengm/utilities/shape_accessor.hxx
@@ -61,12 +61,88 @@ namespace opengm {
const value_type operator[](const size_t j) const
{ return factor_->numberOfLabels(j); }
bool operator==(const FactorShapeAccessor<FACTOR> & other) const
- { return factor_ == other.factor_; }
+ { return factor_ == other.factor_; }
private:
factor_pointer factor_;
};
+
+
+ template<class SUBSET_ITERATOR, class GM_LABEL_ITER>
+ class SubsetAccessor {
+ public:
+ typedef typename std::iterator_traits<GM_LABEL_ITER>::value_type value_type;
+
+ typedef const value_type reference;
+ typedef const value_type* pointer;
+
+ SubsetAccessor()
+ : sBegin_(),
+ sEnd_(),
+ gmLabelIter_()
+ {}
+ SubsetAccessor(SUBSET_ITERATOR sBegin, SUBSET_ITERATOR sEnd , GM_LABEL_ITER iter)
+ : sBegin_(sBegin),
+ sEnd_(sEnd),
+ gmLabelIter_(iter)
+ {}
+ size_t size() const
+ { return std::distance(sBegin_, sEnd_); }
+ reference operator[](const size_t j)
+ { return gmLabelIter_[sBegin_[j]]; }
+ const value_type operator[](const size_t j) const
+ { return gmLabelIter_[sBegin_[j]]; }
+ bool operator==(const SubsetAccessor & other) const
+ {
+ return sBegin_ == other.sBegin_ &&
+ sEnd_ == other.sEnd_ &&
+ gmLabelIter_==other.gmLabelIter_;
+ }
+
+ private:
+ SUBSET_ITERATOR sBegin_;
+ SUBSET_ITERATOR sEnd_;
+ GM_LABEL_ITER gmLabelIter_;
+ };
+
+
+
+ template<class FACTOR, class GM_LABEL_ITER>
+ class GmLabelFactorLabelAccessor {
+ public:
+ typedef typename std::iterator_traits<GM_LABEL_ITER>::value_type value_type;
+
+ typedef const value_type reference;
+ typedef const value_type* pointer;
+ typedef const FACTOR& factor_reference;
+ typedef const FACTOR* factor_pointer;
+
+ GmLabelFactorLabelAccessor()
+ : factor_(NULL),
+ gmLabelIter_()
+ {}
+ GmLabelFactorLabelAccessor(factor_reference f , GM_LABEL_ITER iter)
+ : factor_(&f),
+ gmLabelIter_(iter)
+ {}
+ size_t size() const
+ { return factor_ == 0 ? 0 : factor_->numberOfVariables(); }
+ reference operator[](const size_t j)
+ { return gmLabelIter_[factor_->variableIndex(j)]; }
+ const value_type operator[](const size_t j) const
+ { return gmLabelIter_[factor_->variableIndex(j)]; }
+ bool operator==(const FactorShapeAccessor<FACTOR> & other) const
+ { return factor_ == other.factor_ && gmLabelIter_==other.gmLabelIter_;
+ }
+
+ private:
+ factor_pointer factor_;
+ GM_LABEL_ITER gmLabelIter_;
+ };
+
+
+
template<class FACTOR>
class FactorVariablesAccessor {
public:
diff --git a/src/tutorials/c++/basics/doSumProdInference.cxx b/src/tutorials/c++/basics/doSumProdInference.cxx
index 2cad7da..80b4b2f 100644
--- a/src/tutorials/c++/basics/doSumProdInference.cxx
+++ b/src/tutorials/c++/basics/doSumProdInference.cxx
@@ -126,7 +126,7 @@ void inferBP(const Model& gm, bool normalization = true){
typedef opengm::BeliefPropagationUpdateRules<Model, opengm::Integrator> UpdateRules;
typedef opengm::MessagePassing<Model, opengm::Integrator, UpdateRules, opengm::MaxDistance> LBP;
- LBP::Parameter parameter(100); //maximal number of iterations=0
+ LBP::Parameter parameter(100, static_cast<Model::ValueType> (0.000000)); //maximal number of iterations=0
parameter.useNormalization_ = normalization;
LBP lbp(gm, parameter);
diff --git a/src/unittest/inference/test_graphcut.cxx b/src/unittest/inference/test_graphcut.cxx
index 55a62e0..0f855c9 100644
--- a/src/unittest/inference/test_graphcut.cxx
+++ b/src/unittest/inference/test_graphcut.cxx
@@ -103,7 +103,7 @@ int main() {
{
typedef opengm::MinSTCutBoost<size_t, long, opengm::PUSH_RELABEL> MinStCutType;
typedef opengm::GraphCut<GraphicalModelType, opengm::Minimizer, MinStCutType> MinGraphCut;
- MinGraphCut::Parameter para(1000000);
+ MinGraphCut::Parameter para(static_cast<GraphicalModelType::ValueType>(1000000));
minTester.test<MinGraphCut>(para);
}
std::cout << " * Test Min-Sum with BOOST-Edmonds-Karp" << std::endl;
diff --git a/src/unittest/inference/test_lazyflipper.cxx b/src/unittest/inference/test_lazyflipper.cxx
index 6645c40..8c44640 100644
--- a/src/unittest/inference/test_lazyflipper.cxx
+++ b/src/unittest/inference/test_lazyflipper.cxx
@@ -83,7 +83,7 @@ void additionalTest() {
}
{
- LazyFlipper::Parameter parameter(6);
+ LazyFlipper::Parameter parameter(static_cast<size_t>(6));
LazyFlipper lazyFlipper(model, parameter);
lazyFlipper.infer();
diff --git a/src/unittest/inference/test_messagepassing.cxx b/src/unittest/inference/test_messagepassing.cxx
index a065942..e1c4a1b 100644
--- a/src/unittest/inference/test_messagepassing.cxx
+++ b/src/unittest/inference/test_messagepassing.cxx
@@ -167,7 +167,7 @@ int main() {
typedef opengm::GraphicalModel<double, opengm::Adder> GraphicalModelType;
typedef opengm::BeliefPropagationUpdateRules<GraphicalModelType,opengm::Minimizer> UpdateRulesType;
typedef opengm::MessagePassing<GraphicalModelType, opengm::Minimizer,UpdateRulesType, opengm::MaxDistance> BP;
- BP::Parameter para(10);
+ BP::Parameter para(static_cast<size_t>(10));
sumTester.test<BP>(para);
std::cout << " ... parallel ... ";
para.isAcyclic_=opengm::Tribool::False;
@@ -179,7 +179,7 @@ int main() {
typedef opengm::GraphicalModel<double, opengm::Adder> GraphicalModelType;
typedef opengm::BeliefPropagationUpdateRules<GraphicalModelType,opengm::Minimizer> UpdateRulesType;
typedef opengm::MessagePassing<GraphicalModelType, opengm::Minimizer,UpdateRulesType, opengm::MaxDistance> BP;
- BP::Parameter para(100);
+ BP::Parameter para(static_cast<size_t>(100));
para.isAcyclic_ = false;
sumTester.test<BP>(para);
std::cout << " OK!"<<std::endl;
@@ -198,7 +198,7 @@ int main() {
typedef opengm::GraphicalModel<double,opengm::Adder> GraphicalModelType;
typedef opengm::BeliefPropagationUpdateRules<GraphicalModelType,opengm::Maximizer> UpdateRulesType;
typedef opengm::MessagePassing<GraphicalModelType, opengm::Maximizer,UpdateRulesType, opengm::MaxDistance> BP;
- BP::Parameter para(10);
+ BP::Parameter para(static_cast<size_t>(10));
sumTester.test<BP>(para);
std::cout << " OK!"<<std::endl;
}
@@ -207,7 +207,7 @@ int main() {
typedef opengm::GraphicalModel<double,opengm::Multiplier > GraphicalModelType;
typedef opengm::BeliefPropagationUpdateRules<GraphicalModelType,opengm::Maximizer> UpdateRulesType;
typedef opengm::MessagePassing<GraphicalModelType, opengm::Maximizer,UpdateRulesType, opengm::MaxDistance> BP;
- BP::Parameter para(10);
+ BP::Parameter para(static_cast<size_t>(10));
prodTester.test<BP>(para);
std::cout << " OK!"<<std::endl;
}
@@ -290,7 +290,7 @@ int main() {
typedef opengm::GraphicalModel<double,opengm::Adder > GraphicalModelType;
typedef opengm::TrbpUpdateRules<GraphicalModelType,opengm::Minimizer> UpdateRulesType;
typedef opengm::MessagePassing<GraphicalModelType, opengm::Minimizer,UpdateRulesType, opengm::MaxDistance> BP;
- BP::Parameter para(10);
+ BP::Parameter para(static_cast<size_t>(10));
sumTester.test<BP>(para);
std::cout << " ... parallel ... ";
para.isAcyclic_=opengm::Tribool::False;
@@ -323,7 +323,7 @@ int main() {
typedef opengm::GraphicalModel<double,opengm::Adder> GraphicalModelType;
typedef opengm::TrbpUpdateRules<GraphicalModelType,opengm::Maximizer> UpdateRulesType;
typedef opengm::MessagePassing<GraphicalModelType, opengm::Maximizer, UpdateRulesType,opengm::MaxDistance> BP;
- BP::Parameter para(10);
+ BP::Parameter para(static_cast<size_t>(10));
sumTester.test<BP>(para);
std::cout << " OK!"<<std::endl;
}
@@ -332,7 +332,7 @@ int main() {
typedef opengm::GraphicalModel<double,opengm::Multiplier > GraphicalModelType;
typedef opengm::TrbpUpdateRules<GraphicalModelType,opengm::Maximizer> UpdateRulesType;
typedef opengm::MessagePassing<GraphicalModelType, opengm::Maximizer, UpdateRulesType,opengm::MaxDistance> BP;
- BP::Parameter para(10);
+ BP::Parameter para(static_cast<size_t>(10));
prodTester.test<BP>(para);
std::cout << " OK!"<<std::endl;
}
@@ -341,7 +341,7 @@ int main() {
typedef opengm::GraphicalModel<double,opengm::Multiplier > GraphicalModelType;
typedef opengm::TrbpUpdateRules<GraphicalModelType,opengm::Maximizer> UpdateRulesType;
typedef opengm::MessagePassing<GraphicalModelType, opengm::Maximizer, UpdateRulesType, opengm::MaxDistance> BP;
- BP::Parameter para(10);
+ BP::Parameter para(static_cast<size_t>(10));
prodTester.test<BP>(para);
std::cout << " OK!"<<std::endl;
}
@@ -359,7 +359,7 @@ int main() {
typedef opengm::GraphicalModel<double,opengm::Adder > GraphicalModelType;
typedef opengm::TrbpUpdateRules<GraphicalModelType,opengm::Integrator> UpdateRulesType;
typedef opengm::MessagePassing<GraphicalModelType, opengm::Integrator,UpdateRulesType, opengm::MaxDistance> BP;
- BP::Parameter para(10);
+ BP::Parameter para(static_cast<size_t>(10));
sumTester.test<BP>(para);
std::cout << " OK!"<<std::endl;
}
@@ -368,7 +368,7 @@ int main() {
typedef opengm::GraphicalModel<double,opengm::Multiplier > GraphicalModelType;
typedef opengm::TrbpUpdateRules<GraphicalModelType,opengm::Integrator> UpdateRulesType;
typedef opengm::MessagePassing<GraphicalModelType, opengm::Integrator,UpdateRulesType, opengm::MaxDistance> BP;
- BP::Parameter para(10);
+ BP::Parameter para(static_cast<size_t>(10));
prodTester.test<BP>(para);
std::cout << " OK!"<<std::endl;
}
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-science/packages/opengm.git
More information about the debian-science-commits
mailing list