Browse Source

implemented more functionality for NativePolytopes, added functions to consider exact numbers in z3LPsolver

tempestpy_adaptions
TimQu 8 years ago
parent
commit
7dfc43c828
  1. 14
      src/storm/adapters/EigenAdapter.h
  2. 4
      src/storm/settings/modules/CoreSettings.cpp
  3. 2
      src/storm/solver/SolverSelectionOptions.h
  4. 124
      src/storm/solver/Z3LPSolver.cpp
  5. 21
      src/storm/solver/Z3LPSolver.h
  6. 149
      src/storm/storage/geometry/NativePolytope.cpp
  7. 14
      src/storm/storage/geometry/NativePolytope.h
  8. 134
      src/storm/storage/geometry/nativepolytopeconversion/HyperplaneCollector.cpp
  9. 113
      src/storm/storage/geometry/nativepolytopeconversion/HyperplaneCollector.h
  10. 275
      src/storm/storage/geometry/nativepolytopeconversion/HyperplaneEnumeration.cpp
  11. 120
      src/storm/storage/geometry/nativepolytopeconversion/HyperplaneEnumeration.h
  12. 881
      src/storm/storage/geometry/nativepolytopeconversion/QuickHull.cpp
  13. 307
      src/storm/storage/geometry/nativepolytopeconversion/QuickHull.h
  14. 5
      src/storm/storage/geometry/nativepolytopeconversion/SubsetEnumerator.cpp
  15. 6
      src/storm/utility/solver.cpp
  16. 5
      src/storm/utility/solver.h

14
src/storm/adapters/EigenAdapter.h

@ -27,3 +27,17 @@ namespace storm {
}
}
namespace std {
template<class ValueType>
struct hash<StormEigen::Matrix<ValueType, StormEigen::Dynamic, 1>> {
std::size_t operator()(StormEigen::Matrix<ValueType, StormEigen::Dynamic, 1> const &vector) const {
size_t seed = 0;
for (uint_fast64_t i = 0; i < vector.rows(); ++i) {
carl::hash_add(seed, std::hash<ValueType>()(vector(i)));
}
return seed;
}
};
}

4
src/storm/settings/modules/CoreSettings.cpp

@ -48,7 +48,7 @@ namespace storm {
this->addOption(storm::settings::OptionBuilder(moduleName, ddLibraryOptionName, false, "Sets which library is preferred for decision-diagram operations.")
.addArgument(storm::settings::ArgumentBuilder::createStringArgument("name", "The name of the library to prefer.").addValidatorString(ArgumentValidatorFactory::createMultipleChoiceValidator(ddLibraries)).setDefaultValueString("cudd").build()).build());
std::vector<std::string> lpSolvers = {"gurobi", "glpk"};
std::vector<std::string> lpSolvers = {"gurobi", "glpk", "z3"};
this->addOption(storm::settings::OptionBuilder(moduleName, lpSolverOptionName, false, "Sets which LP solver is preferred.")
.addArgument(storm::settings::ArgumentBuilder::createStringArgument("name", "The name of an LP solver.").addValidatorString(ArgumentValidatorFactory::createMultipleChoiceValidator(lpSolvers)).setDefaultValueString("glpk").build()).build());
std::vector<std::string> smtSolvers = {"z3", "mathsat"};
@ -94,6 +94,8 @@ namespace storm {
return storm::solver::LpSolverType::Gurobi;
} else if (lpSolverName == "glpk") {
return storm::solver::LpSolverType::Glpk;
} else if (lpSolverName == "z3") {
return storm::solver::LpSolverType::Z3;
}
STORM_LOG_THROW(false, storm::exceptions::IllegalArgumentValueException, "Unknown LP solver '" << lpSolverName << "'.");
}

2
src/storm/solver/SolverSelectionOptions.h

@ -8,7 +8,7 @@ namespace storm {
namespace solver {
ExtendEnumsWithSelectionField(MinMaxMethod, PolicyIteration, ValueIteration, Topological)
ExtendEnumsWithSelectionField(LpSolverType, Gurobi, Glpk)
ExtendEnumsWithSelectionField(LpSolverType, Gurobi, Glpk, Z3)
ExtendEnumsWithSelectionField(EquationSolverType, Native, Gmmxx, Eigen, Elimination)
ExtendEnumsWithSelectionField(SmtSolverType, Z3, Mathsat)
}

124
src/storm/solver/Z3LPSolver.cpp

@ -30,7 +30,7 @@ namespace storm {
context = std::unique_ptr<z3::context>(new z3::context(config));
solver = std::unique_ptr<z3::optimize>(new z3::optimize(*context));
expressionAdapter = std::unique_ptr<storm::adapters::Z3ExpressionAdapter>(new storm::adapters::Z3ExpressionAdapter(*this->manager, *context));
optimizationFunction = this->getManager().rational(storm::utility::zero<double>());
optimizationFunction = this->getManager().rational(storm::utility::zero<storm::RationalNumber>());
}
Z3LpSolver::Z3LpSolver(std::string const& name) : Z3LpSolver(name, OptimizationDirection::Minimize) {
@ -112,7 +112,7 @@ namespace storm {
storm::expressions::Variable Z3LpSolver::addBinaryVariable(std::string const& name, double objectiveFunctionCoefficient) {
storm::expressions::Variable newVariable = this->manager->declareVariable(name, manager->getIntegerType());
solver->add(expressionAdapter->translateExpression((newVariable.getExpression() >= this->manager->rational(storm::utility::one<double>())) && (newVariable.getExpression() <= this->manager->rational(storm::utility::one<double>()))));
solver->add(expressionAdapter->translateExpression((newVariable.getExpression() >= this->manager->rational(storm::utility::one<storm::RationalNumber>())) && (newVariable.getExpression() <= this->manager->rational(storm::utility::one<storm::RationalNumber>()))));
optimizationFunction = optimizationFunction + this->manager->rational(objectiveFunctionCoefficient) * newVariable;
return newVariable;
}
@ -218,6 +218,82 @@ namespace storm {
STORM_LOG_THROW(!this->isUnbounded(), storm::exceptions::NotImplementedException, "Exporting LP Problems to a file is not implemented for z3.");
}
storm::expressions::Variable Z3LpSolver::addBoundedContinuousVariable(std::string const& name, storm::RationalNumber const& lowerBound, storm::RationalNumber const& upperBound, storm::RationalNumber const& objectiveFunctionCoefficient) {
storm::expressions::Variable newVariable = this->manager->declareVariable(name, manager->getRationalType());
solver->add(expressionAdapter->translateExpression((newVariable.getExpression() >= this->manager->rational(lowerBound)) && (newVariable.getExpression() <= this->manager->rational(upperBound))));
optimizationFunction = optimizationFunction + this->manager->rational(objectiveFunctionCoefficient) * newVariable;
return newVariable;
}
storm::expressions::Variable Z3LpSolver::addLowerBoundedContinuousVariable(std::string const& name, storm::RationalNumber const& lowerBound, storm::RationalNumber const& objectiveFunctionCoefficient) {
storm::expressions::Variable newVariable = this->manager->declareVariable(name, manager->getRationalType());
solver->add(expressionAdapter->translateExpression(newVariable.getExpression() >= this->manager->rational(lowerBound)));
optimizationFunction = optimizationFunction + this->manager->rational(objectiveFunctionCoefficient) * newVariable;
return newVariable;
}
storm::expressions::Variable Z3LpSolver::addUpperBoundedContinuousVariable(std::string const& name, storm::RationalNumber const& upperBound, storm::RationalNumber const& objectiveFunctionCoefficient) {
storm::expressions::Variable newVariable = this->manager->declareVariable(name, manager->getRationalType());
solver->add(expressionAdapter->translateExpression(newVariable.getExpression() <= this->manager->rational(upperBound)));
optimizationFunction = optimizationFunction + this->manager->rational(objectiveFunctionCoefficient) * newVariable;
return newVariable;
}
storm::expressions::Variable Z3LpSolver::addUnboundedContinuousVariable(std::string const& name, storm::RationalNumber const& objectiveFunctionCoefficient) {
storm::expressions::Variable newVariable = this->manager->declareVariable(name, manager->getRationalType());
optimizationFunction = optimizationFunction + this->manager->rational(objectiveFunctionCoefficient) * newVariable;
return newVariable;
}
storm::expressions::Variable Z3LpSolver::addBoundedIntegerVariable(std::string const& name, storm::RationalNumber const& lowerBound, storm::RationalNumber const& upperBound, storm::RationalNumber const& objectiveFunctionCoefficient) {
storm::expressions::Variable newVariable = this->manager->declareVariable(name, manager->getIntegerType());
solver->add(expressionAdapter->translateExpression((newVariable.getExpression() >= this->manager->rational(lowerBound)) && (newVariable.getExpression() <= this->manager->rational(upperBound))));
optimizationFunction = optimizationFunction + this->manager->rational(objectiveFunctionCoefficient) * newVariable;
return newVariable;
}
storm::expressions::Variable Z3LpSolver::addLowerBoundedIntegerVariable(std::string const& name, storm::RationalNumber const& lowerBound, storm::RationalNumber const& objectiveFunctionCoefficient) {
storm::expressions::Variable newVariable = this->manager->declareVariable(name, manager->getIntegerType());
solver->add(expressionAdapter->translateExpression(newVariable.getExpression() >= this->manager->rational(lowerBound)));
optimizationFunction = optimizationFunction + this->manager->rational(objectiveFunctionCoefficient) * newVariable;
return newVariable;
}
storm::expressions::Variable Z3LpSolver::addUpperBoundedIntegerVariable(std::string const& name, storm::RationalNumber const& upperBound, storm::RationalNumber const& objectiveFunctionCoefficient) {
storm::expressions::Variable newVariable = this->manager->declareVariable(name, manager->getIntegerType());
solver->add(expressionAdapter->translateExpression(newVariable.getExpression() <= this->manager->rational(upperBound)));
optimizationFunction = optimizationFunction + this->manager->rational(objectiveFunctionCoefficient) * newVariable;
return newVariable;
}
storm::expressions::Variable Z3LpSolver::addUnboundedIntegerVariable(std::string const& name, storm::RationalNumber const& objectiveFunctionCoefficient) {
storm::expressions::Variable newVariable = this->manager->declareVariable(name, manager->getIntegerType());
optimizationFunction = optimizationFunction + this->manager->rational(objectiveFunctionCoefficient) * newVariable;
return newVariable;
}
storm::expressions::Variable Z3LpSolver::addBinaryVariable(std::string const& name, storm::RationalNumber const& objectiveFunctionCoefficient) {
storm::expressions::Variable newVariable = this->manager->declareVariable(name, manager->getIntegerType());
solver->add(expressionAdapter->translateExpression((newVariable.getExpression() >= this->manager->rational(storm::utility::one<storm::RationalNumber>())) && (newVariable.getExpression() <= this->manager->rational(storm::utility::one<storm::RationalNumber>()))));
optimizationFunction = optimizationFunction + this->manager->rational(objectiveFunctionCoefficient) * newVariable;
return newVariable;
}
storm::RationalNumber Z3LpSolver::getExactContinuousValue(storm::expressions::Variable const& variable) const {
return getValue(variable).evaluateAsDouble();
}
storm::RationalNumber Z3LpSolver::getExactObjectiveValue() const {
if (!this->isOptimal()) {
STORM_LOG_THROW(!this->isInfeasible(), storm::exceptions::InvalidAccessException, "Unable to get Z3 solution from infeasible model.");
STORM_LOG_THROW(!this->isUnbounded(), storm::exceptions::InvalidAccessException, "Unable to get Z3 solution from unbounded model.");
STORM_LOG_THROW(false, storm::exceptions::InvalidAccessException, "Unable to get Z3 solution from unoptimized model.");
}
STORM_LOG_ASSERT(lastCheckObjectiveValue, "Objective value has not been stored.");
return this->expressionAdapter->translateExpression(*lastCheckObjectiveValue).evaluateAsDouble();
}
#else
Z3LpSolver::Z3LpSolver(std::string const&, OptimizationDirection const&) {
throw storm::exceptions::NotImplementedException() << "This version of storm was compiled without support for Z3. Yet, a method was called that requires this support.";
@ -318,10 +394,52 @@ namespace storm {
throw storm::exceptions::NotImplementedException() << "This version of storm was compiled without support for Z3. Yet, a method was called that requires this support.";
}
void Z3LpSolver::toggleOutput(bool) const {
storm::expressions::Variable Z3LpSolver::addBoundedContinuousVariable(std::string const& name, storm::RationalNumber const& lowerBound, storm::RationalNumber const& upperBound, storm::RationalNumber const& objectiveFunctionCoefficient) {
throw storm::exceptions::NotImplementedException() << "This version of storm was compiled without support for Z3. Yet, a method was called that requires this support.";
}
storm::expressions::Variable Z3LpSolver::addLowerBoundedContinuousVariable(std::string const& name, storm::RationalNumber const& lowerBound, storm::RationalNumber const& objectiveFunctionCoefficient) {
throw storm::exceptions::NotImplementedException() << "This version of storm was compiled without support for Z3. Yet, a method was called that requires this support.";
}
storm::expressions::Variable Z3LpSolver::addUpperBoundedContinuousVariable(std::string const& name, storm::RationalNumber const& upperBound, storm::RationalNumber const& objectiveFunctionCoefficient) {
throw storm::exceptions::NotImplementedException() << "This version of storm was compiled without support for Z3. Yet, a method was called that requires this support.";
}
storm::expressions::Variable Z3LpSolver::addUnboundedContinuousVariable(std::string const& name, storm::RationalNumber const& objectiveFunctionCoefficient) {
throw storm::exceptions::NotImplementedException() << "This version of storm was compiled without support for Z3. Yet, a method was called that requires this support.";
}
storm::expressions::Variable Z3LpSolver::addBoundedIntegerVariable(std::string const& name, storm::RationalNumber const& lowerBound, storm::RationalNumber const& upperBound, storm::RationalNumber const& objectiveFunctionCoefficient) {
throw storm::exceptions::NotImplementedException() << "This version of storm was compiled without support for Z3. Yet, a method was called that requires this support.";
}
storm::expressions::Variable Z3LpSolver::addLowerBoundedIntegerVariable(std::string const& name, storm::RationalNumber const& lowerBound, storm::RationalNumber const& objectiveFunctionCoefficient) {
throw storm::exceptions::NotImplementedException() << "This version of storm was compiled without support for Z3. Yet, a method was called that requires this support.";
}
storm::expressions::Variable Z3LpSolver::addUpperBoundedIntegerVariable(std::string const& name, storm::RationalNumber const& upperBound, storm::RationalNumber const& objectiveFunctionCoefficient) {
throw storm::exceptions::NotImplementedException() << "This version of storm was compiled without support for Z3. Yet, a method was called that requires this support.";
}
storm::expressions::Variable Z3LpSolver::addUnboundedIntegerVariable(std::string const& name, storm::RationalNumber const& objectiveFunctionCoefficient) {
throw storm::exceptions::NotImplementedException() << "This version of storm was compiled without support for Z3. Yet, a method was called that requires this support.";
}
storm::expressions::Variable Z3LpSolver::addBinaryVariable(std::string const& name, storm::RationalNumber const& objectiveFunctionCoefficient) {
throw storm::exceptions::NotImplementedException() << "This version of storm was compiled without support for Z3. Yet, a method was called that requires this support.";
}
storm::RationalNumber Z3LpSolver::getExactContinuousValue(storm::expressions::Variable const& variable) const {
throw storm::exceptions::NotImplementedException() << "This version of storm was compiled without support for Z3. Yet, a method was called that requires this support.";
}
storm::RationalNumber Z3LpSolver::getExactObjectiveValue() const {
throw storm::exceptions::NotImplementedException() << "This version of storm was compiled without support for Z3. Yet, a method was called that requires this support.";
}
#endif
}
}

21
src/storm/solver/Z3LPSolver.h

@ -87,14 +87,29 @@ namespace storm {
virtual bool isOptimal() const override;
// Methods to retrieve values of variables and the objective function in the optimal solutions.
virtual double getContinuousValue(storm::expressions::Variable const& name) const override;
virtual int_fast64_t getIntegerValue(storm::expressions::Variable const& name) const override;
virtual bool getBinaryValue(storm::expressions::Variable const& name) const override;
virtual double getContinuousValue(storm::expressions::Variable const& variable) const override;
virtual int_fast64_t getIntegerValue(storm::expressions::Variable const& variable) const override;
virtual bool getBinaryValue(storm::expressions::Variable const& variable) const override;
virtual double getObjectiveValue() const override;
// Methods to print the LP problem to a file.
virtual void writeModelToFile(std::string const& filename) const override;
// Methods for exact solving
virtual storm::expressions::Variable addBoundedContinuousVariable(std::string const& name, storm::RationalNumber const& lowerBound, storm::RationalNumber const& upperBound, storm::RationalNumber const& objectiveFunctionCoefficient = 0) override;
virtual storm::expressions::Variable addLowerBoundedContinuousVariable(std::string const& name, storm::RationalNumber const& lowerBound, storm::RationalNumber const& objectiveFunctionCoefficient = 0) override;
virtual storm::expressions::Variable addUpperBoundedContinuousVariable(std::string const& name, storm::RationalNumber const& upperBound, storm::RationalNumber const& objectiveFunctionCoefficient = 0) override;
virtual storm::expressions::Variable addUnboundedContinuousVariable(std::string const& name, storm::RationalNumber const& objectiveFunctionCoefficient = 0) override;
virtual storm::expressions::Variable addBoundedIntegerVariable(std::string const& name, storm::RationalNumber const& lowerBound, storm::RationalNumber const& upperBound, storm::RationalNumber const& objectiveFunctionCoefficient = 0) override;
virtual storm::expressions::Variable addLowerBoundedIntegerVariable(std::string const& name, storm::RationalNumber const& lowerBound, storm::RationalNumber const& objectiveFunctionCoefficient = 0) override;
virtual storm::expressions::Variable addUpperBoundedIntegerVariable(std::string const& name, storm::RationalNumber const& upperBound, storm::RationalNumber const& objectiveFunctionCoefficient = 0) override;
virtual storm::expressions::Variable addUnboundedIntegerVariable(std::string const& name, storm::RationalNumber const& objectiveFunctionCoefficient = 0) override;
virtual storm::expressions::Variable addBinaryVariable(std::string const& name, storm::RationalNumber const& objectiveFunctionCoefficient = 0) override;
storm::RationalNumber getExactContinuousValue(storm::expressions::Variable const& variable) const;
storm::RationalNumber getExactObjectiveValue() const;
private:
virtual storm::expressions::Expression getValue(storm::expressions::Variable const& name) const;

149
src/storm/storage/geometry/NativePolytope.cpp

@ -1,11 +1,15 @@
#include <storm/exceptions/NotImplementedException.h>
#include "storm/storage/geometry/NativePolytope.h"
#ifdef STasdf
#include "storm/utility/macros.h"
#include "storm/utility/solver.h"
#include "storm/solver/Z3LPSolver.h"
#include "storm/storage/geometry/nativepolytopeconversion/QuickHull.h"
#include "storm/storage/geometry/nativepolytopeconversion/HyperplaneEnumeration.h"
#include "storm/exceptions/InvalidArgumentException.h"
#include "storm/exceptions/UnexpectedException.h"
#include "storm/utility/solver.h"
namespace storm {
namespace storage {
@ -41,15 +45,16 @@ namespace storm {
eigenPoints.emplace_back(storm::adapters::EigenAdapter<ValueType>::toEigenVector(p));
}
// todo: quickhull(eigenPoints)
storm::storage::geometry::QuickHull<ValueType> qh;
qh.generateHalfspacesFromPoints(eigenPoints, false);
A = std::move(qh.getResultMatrix());
b = std::move(qh.getResultVector());
emptyStatus = EmptyStatus::Nonempty;
}
}
template <typename ValueType>
std::shared_ptr<Polytope<ValueType>> NativePolytope<ValueType>::create(boost::optional<std::vector<Halfspace<ValueType>>> const& halfspaces,
boost::optional<std::vector<Point>> const& points) {
std::shared_ptr<Polytope<ValueType>> NativePolytope<ValueType>::create(boost::optional<std::vector<Halfspace<ValueType>>> const& halfspaces, boost::optional<std::vector<Point>> const& points) {
if(halfspaces) {
STORM_LOG_WARN_COND(!points, "Creating a NativePolytope where halfspaces AND points are given. The points will be ignored.");
return std::make_shared<NativePolytope<ValueType>>(*halfspaces);
@ -197,52 +202,126 @@ namespace storm {
return std::make_shared<NativePolytope<ValueType>>(dynamic_cast<NativePolytope<ValueType> const&>(*rhs));
} else if (rhs->isEmpty()) {
return std::make_shared<NativePolytope<ValueType>>(*this);
}
if(this->isUniversal() || rhs->isUniversal) {
return std::make_shared<NativePolytope<ValueType>>(*this);
} else if (this->isUniversal() || rhs->isUniversal) {
return std::make_shared<NativePolytope<ValueType>>(std::vector<Halfspace<ValueType>>());
}
STORM_LOG_WARN("Implementation of convex union of two polytopes only works if the polytopes are bounded.");
STORM_LOG_WARN("Implementation of convex union of two polytopes only works if the polytopes are bounded. This is not checked.");
std::vector<EigenVector> rhsVertices = dynamic_cast<NativePolytope<ValueType> const&>(*rhs).getEigenVertices();
std::vector<EigenVector> resultVertices = this->getEigenVertices();
resultVertices.insert(resultVertices.end(), std::make_move_iterator(rhsVertices.begin()), std::make_move_iterator(rhsVertices.end()));
// todo invoke quickhull
return nullptr;
storm::storage::geometry::QuickHull<ValueType> qh;
qh.generateHalfspacesFromPoints(resultVertices, false);
return std::make_shared<NativePolytope<ValueType>>(EmptyStatus::Nonempty, std::move(qh.getResultMatrix()), std::move(qh.getResultVector()));
}
template <typename ValueType>
std::shared_ptr<Polytope<ValueType>> NativePolytope<ValueType>::minkowskiSum(std::shared_ptr<Polytope<ValueType>> const& rhs) const{
std::shared_ptr<Polytope<ValueType>> NativePolytope<ValueType>::minkowskiSum(std::shared_ptr<Polytope<ValueType>> const& rhs) const {
STORM_LOG_THROW(rhs->isNativePolytope(), storm::exceptions::InvalidArgumentException, "Invoked operation between a NativePolytope and a different polytope implementation. This is not supported");
NativePolytope<ValueType> const& nativeRhs = dynamic_cast<NativePolytope<ValueType> const&>(*rhs);
return std::make_shared<NativePolytope<ValueType>>(internPolytope.minkowskiSum(dynamic_cast<NativePolytope<ValueType> const&>(*rhs).internPolytope));
if(this->isEmpty() || nativeRhs.isEmpty()) {
return std::make_shared<NativePolytope<ValueType>>(std::vector<Point>());
}
std::vector<std::pair<EigenVector, ValueType>> resultConstraints;
resultConstraints.reserve(A.rows() + nativeRhs.A.rows());
// evaluation of rhs in directions of lhs
for (uint_fast64_t i = 0; i < A.rows(); ++i) {
auto optimizationRes = nativeRhs.optimize(A.row(i));
if ( optimizationRes.second ) {
resultConstraints.emplace_back(A.row(i), b(i) + (A.row(i) * optimizationRes.first)(0));
}
// If optimizationRes.second is false, it means that rhs is unbounded in this direction, i.e., the current constraint is not inserted
}
// evaluation of lhs in directions of rhs
for (uint_fast64_t i = 0; i < nativeRhs.A.rows(); ++i) {
auto optimizationRes = optimize(nativeRhs.A.row(i));
if ( optimizationRes.second ) {
resultConstraints.emplace_back(nativeRhs.A.row(i), nativeRhs.b(i) + (nativeRhs.A.row(i) * optimizationRes.first)(0));
}
// If optimizationRes.second is false, it means that rhs is unbounded in this direction, i.e., the current constraint is not inserted
}
if(resultConstraints.empty()) {
return std::make_shared<NativePolytope<ValueType>>(std::vector<Halfspace<ValueType>>());
} else {
EigenMatrix newA(resultConstraints.size(), resultConstraints.front().first.rows());
EigenVector newb(resultConstraints.size());
for(uint_fast64_t i = 0; i < newA.rows(); ++i) {
newA.row(i) = resultConstraints[i].first;
newb(i) = resultConstraints[i].second;
}
return std::make_shared<NativePolytope<ValueType>>(EmptyStatus::Nonempty, std::move(newA), std::move(newb));
}
}
template <typename ValueType>
std::shared_ptr<Polytope<ValueType>> NativePolytope<ValueType>::affineTransformation(std::vector<Point> const& matrix, Point const& vector) const{
std::shared_ptr<Polytope<ValueType>> NativePolytope<ValueType>::affineTransformation(std::vector<Point> const& matrix, Point const& vector) const {
STORM_LOG_THROW(!matrix.empty(), storm::exceptions::InvalidArgumentException, "Invoked affine transformation with a matrix without rows.");
hypro::matrix_t<ValueType> hyproMatrix(matrix.size(), matrix.front().size());
EigenMatrix eigenMatrix(matrix.size(), matrix.front().size());
for(uint_fast64_t row = 0; row < matrix.size(); ++row) {
hyproMatrix.row(row) = storm::adapters::toNative(matrix[row]);
eigenMatrix.row(row) = storm::adapters::EigenAdapter::toEigenVector(matrix[row]);
}
return std::make_shared<NativePolytope<ValueType>>(internPolytope.affineTransformation(std::move(hyproMatrix), storm::adapters::toNative(vector)));
EigenVector eigenVector = storm::adapters::EigenAdapter::toEigenVector(vector);
Eigen::FullPivLU<EigenMatrix> luMatrix( eigenMatrix );
STORM_LOG_THROW(luMatrix.isInvertible(), storm::exceptions::NotImplementedException, "Affine Transformation of native polytope only implemented if the transformation matrix is invertable");
EigenMatrix newA = A * luMatrix.inverse();
EigenVector newb = b + (newA * eigenVector);
return std::make_shared<NativePolytope<ValueType>>(emptyStatus, std::move(newA), std::move(newb));
}
template <typename ValueType>
std::pair<typename NativePolytope<ValueType>::Point, bool> NativePolytope<ValueType>::optimize(Point const& direction) const {
hypro::EvaluationResult<ValueType> evalRes = internPolytope.evaluate(storm::adapters::toNative(direction));
switch (evalRes.errorCode) {
case hypro::SOLUTION::FEAS:
return std::make_pair(storm::adapters::fromNative(evalRes.optimumValue), true);
case hypro::SOLUTION::UNKNOWN:
STORM_LOG_THROW(false, storm::exceptions::UnexpectedException, "Unexpected eror code for Polytope evaluation");
return std::make_pair(Point(), false);
default:
//solution is infinity or infeasible
return std::make_pair(Point(), false);
storm::solver::Z3LpSolver solver(storm::solver::OptimizationDirection::Maximize);
std::vector<storm::expressions::Variable> variables;
variables.reserve(A.rows());
for (uint_fast64_t i = 0; i < A.rows(); ++i) {
variables.push_back(solver.addUnboundedContinuousVariable("x" + std::to_string(i), direction(i)));
}
solver.addConstraint("", getConstraints(solver.getManager(), variables));
solver.update();
solver.optimize();
if (solver.isOptimal()) {
auto result = std::make_pair(Point(), true);
result.first.reserve(variables.size());
for (auto const& var : variables) {
result.first.push_back(solver.getContinuousValue(var));
}
return result;
} else {
//solution is infinity or infeasible
return std::make_pair(Point(), false);
}
}
template <typename ValueType>
std::pair<typename NativePolytope<ValueType>::EigenVector, bool> NativePolytope<ValueType>::optimize(EigenVector const& direction) const {
storm::solver::Z3LpSolver solver(storm::solver::OptimizationDirection::Maximize);
std::vector<storm::expressions::Variable> variables;
variables.reserve(A.rows());
for (uint_fast64_t i = 0; i < A.rows(); ++i) {
variables.push_back(solver.addUnboundedContinuousVariable("x" + std::to_string(i), direction(i)));
}
solver.addConstraint("", getConstraints(solver.getManager(), variables));
solver.update();
solver.optimize();
if (solver.isOptimal()) {
auto result = std::make_pair(EigenVector(variables.size()), true);
for (uint_fast64_t i = 0; i < variables.size(); ++i) {
result.first(i) = solver.getContinuousValue(variables[i]);
}
return result;
} else {
//solution is infinity or infeasible
return std::make_pair(EigenVector(), false);
}
}
@ -252,8 +331,9 @@ namespace storm {
}
template <typename ValueType>
std::vector<typename NativePolytope::ValueType>::EigenVector> NativePolytope<ValueType>::getEigenVertices() const {
// todo: invoke conversion
return std::vector<EigenVector>();
storm::storage::geometry::HyperplaneEnumeration he;
he.generateVerticesFromConstraints(A, b, false);
return he.getResultVertices();
}
template <typename ValueType>
@ -267,9 +347,9 @@ namespace storm {
}
template <typename ValueType>
storm::expressions::Expression NativePolytope<ValueType>::getConstraints(storm::expressions::ExpressionManager& manager, std::vector<storm::expressions::Variable> const& variables) cons {
storm::expressions::Expression NativePolytope<ValueType>::getConstraints(storm::expressions::ExpressionManager const& manager, std::vector<storm::expressions::Variable> const& variables) const {
storm::expressions::Expression result = manager.boolean(true);
for(uint_fast64_t row=0; row < A.rows(); ++row) {
for(uint_fast64_t row = 0; row < A.rows(); ++row) {
storm::expressions::Expression lhs = manager.rational(A(row,0)) * variables[0].getExpression();
for(uint_fast64_t col=1; col < A.cols(); ++col) {
lhs = lhs + manager.rational(A(row,col)) * variables[col].getExpression();
@ -287,4 +367,3 @@ namespace storm {
}
}
}
#endif

14
src/storm/storage/geometry/NativePolytope.h

@ -3,9 +3,7 @@
#include "storm/storage/geometry/Polytope.h"
#include "storm/storage/expressions/Expressions.h"
#include "storm/utility/eigen.h"
#ifdef asdf
#include "storm/adapters/EigenAdapter.h"
namespace storm {
namespace storage {
@ -115,7 +113,7 @@ namespace storm {
* - The polytope is not bounded in the given direction
*/
virtual std::pair<Point, bool> optimize(Point const& direction) const override;
virtual bool isNativePolytope() const override;
private:
@ -125,11 +123,15 @@ namespace storm {
// returns the vertices of this polytope as EigenVectors
std::vector<EigenVector> getEigenVertices() const;
// As optimize(..) but with EigenVectors
std::pair<EigenVector, bool> optimize(EigenVector const& direction) const;
// declares one variable for each constraint and returns the obtained variables.
std::vector<storm::expressions::Variable> declareVariables(storm::expressions::ExpressionManager& manager, std::string const& namePrefix) const;
// returns the constrains defined by this polytope as an expresseion
storm::expressions::Expression getConstraints(storm::expressions::ExpressionManager& manager, std::vector<storm::expressions::Variable> const& variables) const;
storm::expressions::Expression getConstraints(storm::expressions::ExpressionManager const& manager, std::vector<storm::expressions::Variable> const& variables) const;
enum class EmptyStatus{
@ -152,6 +154,4 @@ namespace storm {
}
}
#endif /* asdfsafO */
#endif /* STORM_STORAGE_GEOMETRY_NATIVEPOLYTOPE_H_ */

134
src/storm/storage/geometry/nativepolytopeconversion/HyperplaneCollector.cpp

@ -1,79 +1,77 @@
/*
* File: HyperplaneCollector.cpp
* Author: tim quatmann
*
* Created on December 10, 2015, 6:06 PM
*/
#include "storm/storage/geometry/nativepolytopeconversion/HyperplaneCollector.h"
#include "HyperplaneCollector.h"
namespace hypro {
namespace pterm{
template< typename Number>
bool HyperplaneCollector<Number>::insert(hypro::Hyperplane<Number>const & hyperplane, std::vector<std::size_t>const* indexList) {
return this->insert(hyperplane.normal(), hyperplane.offset(), indexList);
}
template< typename Number>
bool HyperplaneCollector<Number>::insert(hypro::vector_t<Number> const& normal, Number const& offset, std::vector<std::size_t>const* indexList) {
hypro::vector_t<Number> copyNormal(normal);
Number copyOffset(offset);
return this->insert(std::move(copyNormal), std::move(copyOffset), indexList);
}
template< typename Number>
bool HyperplaneCollector<Number>::insert(hypro::vector_t<Number> && normal, Number && offset, std::vector<std::size_t>const* indexList) {
//Normalize
Number infinityNorm = normal.template lpNorm<Eigen::Infinity>();
if(infinityNorm != (Number)0 ){
normal /= infinityNorm;
offset /= infinityNorm;
namespace storm {
namespace storage {
namespace geometry{
template< typename ValueType>
bool HyperplaneCollector<ValueType>::insert(EigenVector const& normal, ValueType const& offset, std::vector<uint_fast64_t>const* indexList) {
EigenVector copyNormal(normal);
ValueType copyOffset(offset);
return this->insert(std::move(copyNormal), std::move(copyOffset), indexList);
}
if(indexList == nullptr){
//insert with empty list
return this->mMap.insert(ValueType(KeyType(normal, offset), std::vector<std::size_t>())).second;
} else {
auto inserted = this->mMap.insert(ValueType(KeyType(normal, offset), *indexList));
if(!inserted.second){
//Append vertex list
inserted.first->second.insert(inserted.first->second.end(), indexList->begin(), indexList->end());
template< typename ValueType>
bool HyperplaneCollector<ValueType>::insert(EigenVector && normal, ValueType && offset, std::vector<uint_fast64_t>const* indexList) {
//Normalize
ValueType infinityNorm = normal.template lpNorm<StormEigen::Infinity>();
if(infinityNorm != (ValueType)0 ){
normal /= infinityNorm;
offset /= infinityNorm;
}
if(indexList == nullptr){
//insert with empty list
return map.insert(MapValueType(MapKeyType(normal, offset), std::vector<uint_fast64_t>())).second;
} else {
auto inserted = map.insert(MapValueType(MapKeyType(normal, offset), *indexList));
if(!inserted.second){
//Append vertex list
inserted.first->second.insert(inserted.first->second.end(), indexList->begin(), indexList->end());
}
return inserted.second;
}
return inserted.second;
}
}
template< typename Number>
std::pair<hypro::matrix_t<Number>, hypro::vector_t<Number>> HyperplaneCollector<Number>::getCollectedHyperplanesAsMatrixVector() const{
assert(!this->mMap.empty());
hypro::matrix_t<Number> A(this->mMap.size(), this->mMap.begin()->first.first.rows());
hypro::vector_t<Number> b(this->mMap.size());
std::size_t row = 0;
for(auto const& mapEntry : this->mMap){
A.row(row) = mapEntry.first.first;
b(row) = mapEntry.first.second;
++row;
template< typename ValueType>
std::pair<typename HyperplaneCollector<ValueType>::EigenMatrix, typename HyperplaneCollector<ValueType>::EigenVector> HyperplaneCollector<ValueType>::getCollectedHyperplanesAsMatrixVector() const{
if(map.empty()) {
return std::pair<EigenMatrix, EigenVector>();
}
EigenMatrix A(map.size(), map.begin()->first.first.rows());
EigenVector b(map.size());
uint_fast64_t row = 0;
for(auto const& mapEntry : map){
A.row(row) = mapEntry.first.first;
b(row) = mapEntry.first.second;
++row;
}
return std::pair<EigenMatrix, EigenVector>(std::move(A), std::move(b));
}
template< typename ValueType>
std::vector<std::vector<uint_fast64_t>> HyperplaneCollector<ValueType>::getIndexLists() const{
std::vector<std::vector<uint_fast64_t>> result(map.size());
auto resultIt = result.begin();
for(auto const& mapEntry : map){
*resultIt = mapEntry.second;
++resultIt;
}
return result;
}
return std::pair<hypro::matrix_t<Number>, hypro::vector_t<Number>>(std::move(A), std::move(b));
}
template< typename Number>
std::vector<std::vector<std::size_t>> HyperplaneCollector<Number>::getIndexLists() const{
assert(!this->mMap.empty());
std::vector<std::vector<std::size_t>> result(this->mMap.size());
auto resultIt = result.begin();
for(auto const& mapEntry : this->mMap){
*resultIt = mapEntry.second;
++resultIt;
template< typename ValueType>
uint_fast64_t HyperplaneCollector<ValueType>::numOfCollectedHyperplanes() const {
return map.size();
}
return result;
}
template< typename Number>
std::size_t HyperplaneCollector<Number>::numOfCollectedHyperplanes() const {
return this->mMap.size();
template class HyperplaneCollector<double>;
template class HyperplaneCollector<storm::RationalNumber>;
}
}
}

113
src/storm/storage/geometry/nativepolytopeconversion/HyperplaneCollector.h

@ -1,63 +1,64 @@
/*
* File: HyperplaneCollector.h
* Author: tim quatmann
*
* Created on December 10, 2015, 6:06 PM
*/
#pragma once
#ifndef STORM_STORAGE_GEOMETRY_NATIVEPOLYTOPECONVERSION_HYPERPLANECOLLECTOR_H_
#define STORM_STORAGE_GEOMETRY_NATIVEPOLYTOPECONVERSION_HYPERPLANECOLLECTOR_H_
#include <unordered_map>
#include "../../config.h"
#include "../../datastructures/Hyperplane.h"
namespace hypro{
namespace pterm{
/*!
* This class can be used to collect a set of hyperplanes (without duplicates).
* The inserted hyperplanes are normalized, i.e. devided by the infinity norm of the normal vector
*/
template< typename Number>
class HyperplaneCollector {
public:
HyperplaneCollector() = default;
HyperplaneCollector(const HyperplaneCollector& orig) = default;
virtual ~HyperplaneCollector() = default;
/*
* inserts the given hyperplane.
* For every (unique) hyperplane, there is a list of indices which can be used e.g. to obtain the set of vertices that lie on each hyperplane.
* If indexList is given (i.e. not nullptr), the given indices are appended to that list.
* Returns true iff the hyperplane was inserted (i.e. the hyperplane was not already contained in this)
#include "storm/adapters/CarlAdapter.h"
#include "storm/adapters/EigenAdapter.h"
namespace storm {
namespace storage {
namespace geometry {
/*!
* This class can be used to collect a set of hyperplanes (without duplicates).
* The inserted hyperplanes are normalized, i.e. devided by the infinity norm of the normal vector
*/
bool insert(hypro::Hyperplane<Number> const& hyperplane, std::vector<std::size_t>const* indexList = nullptr);
bool insert(hypro::vector_t<Number> const& normal, Number const& offset, std::vector<std::size_t>const* indexList = nullptr);
bool insert(hypro::vector_t<Number>&& normal, Number && offset, std::vector<std::size_t>const* indexList = nullptr);
std::pair<hypro::matrix_t<Number>, hypro::vector_t<Number>> getCollectedHyperplanesAsMatrixVector() const;
//Note that the returned lists might contain dublicates.
std::vector<std::vector<std::size_t>> getIndexLists() const;
std::size_t numOfCollectedHyperplanes() const;
private:
typedef std::pair<hypro::vector_t<Number>, Number> NormalOffset;
class NormalOffsetHash{
public:
std::size_t operator()(NormalOffset const& ns) const {
std::size_t seed = std::hash<hypro::vector_t<Number>>()(ns.first);
carl::hash_add(seed, std::hash<Number>()(ns.second));
return seed;
}
template< typename ValueType>
class HyperplaneCollector {
public:
typedef StormEigen::Matrix<ValueType, StormEigen::Dynamic, StormEigen::Dynamic> EigenMatrix;
typedef StormEigen::Matrix<ValueType, StormEigen::Dynamic, 1> EigenVector;
HyperplaneCollector() = default;
HyperplaneCollector(const HyperplaneCollector& orig) = default;
virtual ~HyperplaneCollector() = default;
/*
* inserts the given hyperplane.
* For every (unique) hyperplane, there is a list of indices which can be used e.g. to obtain the set of vertices that lie on each hyperplane.
* If indexList is given (i.e. not nullptr), the given indices are appended to that list.
* Returns true iff the hyperplane was inserted (i.e. the hyperplane was not already contained in this)
*/
bool insert(EigenVector const& normal, ValueType const& offset, std::vector<uint_fast64_t>const* indexList = nullptr);
bool insert(EigenVector&& normal, ValueType&& offset, std::vector<uint_fast64_t>const* indexList = nullptr);
std::pair<EigenMatrix, EigenVector> getCollectedHyperplanesAsMatrixVector() const;
//Note that the returned lists might contain dublicates.
std::vector<std::vector<uint_fast64_t>> getIndexLists() const;
uint_fast64_t numOfCollectedHyperplanes() const;
private:
typedef std::pair<EigenVector, ValueType> NormalOffset;
class NormalOffsetHash{
public:
std::size_t operator()(NormalOffset const& ns) const {
std::size_t seed = std::hash<EigenVector>()(ns.first);
carl::hash_add(seed, std::hash<ValueType>()(ns.second));
return seed;
}
};
typedef typename std::unordered_map<NormalOffset, std::vector<uint_fast64_t>, NormalOffsetHash>::key_type MapKeyType;
typedef typename std::unordered_map<NormalOffset, std::vector<uint_fast64_t>, NormalOffsetHash>::value_type MapValueType;
std::unordered_map<NormalOffset, std::vector<uint_fast64_t>, NormalOffsetHash> map;
};
typedef typename std::unordered_map<NormalOffset, std::vector<std::size_t>, NormalOffsetHash>::key_type KeyType;
typedef typename std::unordered_map<NormalOffset, std::vector<std::size_t>, NormalOffsetHash>::value_type ValueType;
std::unordered_map<NormalOffset, std::vector<std::size_t>, NormalOffsetHash> mMap;
};
}
}
}
#include "HyperplaneCollector.tpp"
#endif /* STORM_STORAGE_GEOMETRY_NATIVEPOLYTOPECONVERSION_HYPERPLANECOLLECTOR_H_ */

275
src/storm/storage/geometry/nativepolytopeconversion/HyperplaneEnumeration.cpp

@ -1,147 +1,158 @@
/*
* File: HyperplaneEnumeration.tpp
* Author: tim quatmann
* Author: phillip florian
*
* Created on December 28, 2015, 1:06 PM
*/
#include "HyperplaneEnumeration.h"
#include "../SubsetEnumerator.h"
#include "../HyperplaneCollector.h"
namespace hypro {
namespace pterm{
template<typename Number>
bool HyperplaneEnumeration<Number>::generateVerticesFromHalfspaces(PTermHPolytope<Number> const& hPoly, bool generateRelevantHyperplanesAndVertexSets){
PTERM_DEBUG("Invoked generateVerticesFromHalfspaces with " << hPoly.getMatrix().rows() << " hyperplanes. Dimension is " << hPoly.dimension());
std::unordered_map<Point<Number>, std::set<std::size_t>> vertexCollector;
hypro::pterm::SubsetEnumerator<hypro::matrix_t<Number>> subsetEnum(hPoly.getMatrix().rows(), hPoly.dimension(), hPoly.getMatrix(), this->linearDependenciesFilter);
if(subsetEnum.setToFirstSubset()){
do{
std::vector<std::size_t> const& subset = subsetEnum.getCurrentSubset();
std::pair<hypro::matrix_t<Number>, hypro::vector_t<Number>> subHPolytope(hPoly.getSubHPolytope(subset));
Point<Number> point(subHPolytope.first.fullPivLu().solve(subHPolytope.second));
//Point<Number> point(hypro::gauss(subHPolytope.first, subHPolytope.second));
if(hPoly.contains(point)){
//Note that the map avoids duplicates.
auto hyperplaneIndices = vertexCollector.insert(typename std::unordered_map<Point<Number>, std::set<std::size_t>>::value_type(std::move(point), std::set<std::size_t>())).first;
if(generateRelevantHyperplanesAndVertexSets){
hyperplaneIndices->second.insert(subset.begin(), subset.end());
#include "storm/storage/geometry/nativepolytopeconversion/HyperplaneEnumeration.h"
#include "storm/utility/macros.h"
#include "storm/storage/geometry/nativepolytopeconversion/SubsetEnumerator.h"
#include "storm/storage/geometry/nativepolytopeconversion/HyperplaneCollector.h"
namespace storm {
namespace storage {
namespace geometry{
template<typename ValueType>
void HyperplaneEnumeration<ValueType>::generateVerticesFromConstraints(EigenMatrix const& constraintMatrix, EigenVector const& constraintVector, bool generateRelevantHyperplanesAndVertexSets) {
STORM_LOG_DEBUG("Invoked Hyperplane enumeration with " << constraintMatrix.rows() << " constraints.");
const uint_fast64_t dimension = constraintMatrix.cols();
if(dimension == 0) {
// No halfspaces means no vertices
resultVertices.clear();
relevantMatrix = constraintMatrix;
relevantVector = constraintVector;
vertexSets.clear();
return;
}
std::unordered_map<EigenVector, std::set<uint_fast64_t>> vertexCollector;
storm::storage::geometry::SubsetEnumerator<EigenMatrix> subsetEnum(constraintMatrix.rows(), dimension, constraintMatrix, this->linearDependenciesFilter);
if(subsetEnum.setToFirstSubset()){
do{
std::vector<uint_fast64_t> const& subset = subsetEnum.getCurrentSubset();
EigenMatrix subMatrix(dimension, dimension);
EigenVector subVector(dimension);
for (uint_fast64_t i : subset){
subMatrix.row(i) = constraintMatrix.row(subset[i]);
subVector.row(i) = constraintVector.row(subset[i]);
}
}
} while (subsetEnum.incrementSubset());
} else {
std::cout << "Could not generate any vertex while converting from H Polytope. TODO: implement this case (we get some unbounded thing here)" << std::endl;
return false;
}
if(generateRelevantHyperplanesAndVertexSets){
//For each hyperplane, get the number of (unique) vertices that lie on it.
std::vector<std::size_t> verticesOnHyperplaneCounter(hPoly.getMatrix().rows(), 0);
for(auto const& mapEntry : vertexCollector){
for(auto const& hyperplaneIndex : mapEntry.second){
++verticesOnHyperplaneCounter[hyperplaneIndex];
}
EigenVector point = subMatrix.fullPivLu().solve(subVector);
bool pointContained = true;
for(uint_fast64_t row=0; row < constraintMatrix.rows(); ++row){
if((constraintMatrix.row(row) * point)(0) > constraintVector(row)){
pointContained = false;
break;
}
}
if(pointContained) {
//Note that the map avoids duplicates.
auto hyperplaneIndices = vertexCollector.insert(typename std::unordered_map<EigenVector, std::set<uint_fast64_t>>::value_type(std::move(point), std::set<uint_fast64_t>())).first;
if(generateRelevantHyperplanesAndVertexSets){
hyperplaneIndices->second.insert(subset.begin(), subset.end());
}
}
} while (subsetEnum.incrementSubset());
}
//Only keep the hyperplanes on which at least dimension() vertices lie.
//Note that this will change the indices of the Hyperplanes.
//Therefore, we additionally store the old indices for every hyperplane to be able to translate from old to new indices
hypro::pterm::HyperplaneCollector<Number> hyperplaneCollector;
for(std::size_t hyperplaneIndex = 0; hyperplaneIndex < verticesOnHyperplaneCounter.size(); ++hyperplaneIndex){
if(verticesOnHyperplaneCounter[hyperplaneIndex] >= hPoly.dimension()){
std::vector<std::size_t> oldIndex;
oldIndex.push_back(hyperplaneIndex);
hyperplaneCollector.insert(hPoly.getMatrix().row(hyperplaneIndex), hPoly.getVector()(hyperplaneIndex), &oldIndex);
if(generateRelevantHyperplanesAndVertexSets){
//For each hyperplane, get the number of (unique) vertices that lie on it.
std::vector<uint_fast64_t> verticesOnHyperplaneCounter(constraintMatrix.rows(), 0);
for(auto const& mapEntry : vertexCollector){
for(auto const& hyperplaneIndex : mapEntry.second){
++verticesOnHyperplaneCounter[hyperplaneIndex];
}
}
}
auto matrixVector = hyperplaneCollector.getCollectedHyperplanesAsMatrixVector();
this->mRelevantMatrix = std::move(matrixVector.first);
this->mRelevantVector = std::move(matrixVector.second);
//Get the mapping from old to new indices
std::vector<std::size_t> oldToNewIndexMapping (hPoly.getMatrix().rows(), hPoly.getMatrix().rows()); //Initialize with some illegal value
std::vector<std::vector<std::size_t>> newToOldIndexMapping(hyperplaneCollector.getIndexLists());
for(std::size_t newIndex = 0; newIndex < newToOldIndexMapping.size(); ++newIndex){
for(auto const& oldIndex : newToOldIndexMapping[newIndex]){
oldToNewIndexMapping[oldIndex] = newIndex;
//Only keep the hyperplanes on which at least dimension() vertices lie.
//Note that this will change the indices of the Hyperplanes.
//Therefore, we additionally store the old indices for every hyperplane to be able to translate from old to new indices
storm::storage::geometry::HyperplaneCollector<ValueType> hyperplaneCollector;
for(uint_fast64_t hyperplaneIndex = 0; hyperplaneIndex < verticesOnHyperplaneCounter.size(); ++hyperplaneIndex){
if(verticesOnHyperplaneCounter[hyperplaneIndex] >= dimension){
std::vector<uint_fast64_t> oldIndex;
oldIndex.push_back(hyperplaneIndex);
hyperplaneCollector.insert(constraintMatrix.row(hyperplaneIndex), constraintVector(hyperplaneIndex), &oldIndex);
}
}
}
//Insert the resulting vertices and get the set of vertices that lie on each hyperplane
std::vector<std::set<std::size_t>> vertexSets(this->mRelevantMatrix.rows());
this->mResultVertices.clear();
this->mResultVertices.reserve(vertexCollector.size());
for(auto const& mapEntry : vertexCollector){
for(auto const& oldHyperplaneIndex : mapEntry.second){
//ignore the hyperplanes which are redundant, i.e. for which there is no new index
if(oldToNewIndexMapping[oldHyperplaneIndex] < this->mRelevantVector.rows()){
vertexSets[oldToNewIndexMapping[oldHyperplaneIndex]].insert(this->mResultVertices.size());
auto matrixVector = hyperplaneCollector.getCollectedHyperplanesAsMatrixVector();
relevantMatrix = std::move(matrixVector.first);
relevantVector = std::move(matrixVector.second);
//Get the mapping from old to new indices
std::vector<uint_fast64_t> oldToNewIndexMapping (constraintMatrix.rows(), constraintMatrix.rows()); //Initialize with some illegal value
std::vector<std::vector<uint_fast64_t>> newToOldIndexMapping(hyperplaneCollector.getIndexLists());
for(uint_fast64_t newIndex = 0; newIndex < newToOldIndexMapping.size(); ++newIndex){
for(auto const& oldIndex : newToOldIndexMapping[newIndex]){
oldToNewIndexMapping[oldIndex] = newIndex;
}
}
//Insert the resulting vertices and get the set of vertices that lie on each hyperplane
std::vector<std::set<uint_fast64_t>> vertexSets(relevantMatrix.rows());
resultVertices.clear();
resultVertices.reserve(vertexCollector.size());
for(auto const& mapEntry : vertexCollector){
for(auto const& oldHyperplaneIndex : mapEntry.second){
//ignore the hyperplanes which are redundant, i.e. for which there is no new index
if(oldToNewIndexMapping[oldHyperplaneIndex] < relevantVector.rows()){
vertexSets[oldToNewIndexMapping[oldHyperplaneIndex]].insert(resultVertices.size());
}
}
resultVertices.push_back(mapEntry.first);
}
this->mResultVertices.push_back(mapEntry.first);
this->vertexSets.clear();
this->vertexSets.reserve(vertexSets.size());
for(auto const& vertexSet : vertexSets){
this->vertexSets.emplace_back(vertexSet.begin(), vertexSet.end());
}
} else {
resultVertices.clear();
resultVertices.reserve(vertexCollector.size());
for(auto const& mapEntry : vertexCollector){
resultVertices.push_back(mapEntry.first);
}
this->vertexSets.clear();
relevantMatrix = EigenMatrix();
relevantVector = EigenVector();
}
this->mVertexSets.clear();
this->mVertexSets.reserve(vertexSets.size());
for(auto const& vertexSet : vertexSets){
this->mVertexSets.emplace_back(vertexSet.begin(), vertexSet.end());
STORM_LOG_DEBUG("Invoked generateVerticesFromHalfspaces with " << hPoly.getMatrix().rows() << " hyperplanes and " << resultVertices.size() << " vertices and " << relevantMatrix.rows() << " relevant hyperplanes. Dimension is " << hPoly.dimension());
}
template <typename ValueType>
bool HyperplaneEnumeration<ValueType>::linearDependenciesFilter(std::vector<uint_fast64_t> const& subset, uint_fast64_t const& item, EigenMatrix const& A) {
EigenMatrix subMatrix(subset.size() +1, A.cols());
for (uint_fast64_t i = 0; i < subset.size(); ++i){
subMatrix.row(i) = A.row(subset[i]);
}
} else {
this->mResultVertices.clear();
this->mResultVertices.reserve(vertexCollector.size());
for(auto const& mapEntry : vertexCollector){
this->mResultVertices.push_back(mapEntry.first);
subMatrix.row(subset.size()) = A.row(item);
StormEigen::FullPivLU<EigenMatrix> lUMatrix( subMatrix );
if (lUMatrix.rank() < subMatrix.rows()){
//Linear dependent!
return false;
} else {
return true;
}
this->mVertexSets.clear();
this->mRelevantMatrix = hypro::matrix_t<Number>();
this->mRelevantVector = hypro::vector_t<Number>();
}
PTERM_DEBUG("Invoked generateVerticesFromHalfspaces with " << hPoly.getMatrix().rows() << " hyperplanes and " << this->mResultVertices.size() << " vertices and " << this->mRelevantMatrix.rows() << " relevant hyperplanes. Dimension is " << hPoly.dimension());
return true;
}
template <typename Number>
bool HyperplaneEnumeration<Number>::linearDependenciesFilter(std::vector<std::size_t> const& subset, std::size_t const& item, hypro::matrix_t<Number> const& A) {
hypro::matrix_t<Number> subMatrix(subset.size() +1, A.cols());
for (std::size_t i = 0; i < subset.size(); ++i){
subMatrix.row(i) = A.row(subset[i]);
}
subMatrix.row(subset.size()) = A.row(item);
Eigen::FullPivLU<matrix_t<Number>> lUMatrix( subMatrix );
// std::cout << "The rank is " << lUMatrix.rank() << " and the matrix has " << subMatrix.rows() << " rows" << std::endl;
if (lUMatrix.rank() < subMatrix.rows()){
//Linear dependent!
return false;
} else {
return true;
}
}
template<typename Number>
std::vector<Point<Number>>& HyperplaneEnumeration<Number>::getResultVertices() {
return this->mResultVertices;
}
template<typename Number>
hypro::matrix_t<Number>& HyperplaneEnumeration<Number>::getRelevantMatrix() {
return this->mRelevantMatrix;
}
template<typename Number>
hypro::vector_t<Number>& HyperplaneEnumeration<Number>::getRelevantVector() {
return this->mRelevantVector;
}
template<typename Number>
std::vector<std::vector<std::size_t>>& HyperplaneEnumeration<Number>::getVertexSets() {
return this->mVertexSets;
template<typename ValueType>
std::vector<typename HyperplaneEnumeration<ValueType>::EigenVector>& HyperplaneEnumeration<ValueType>::getResultVertices() {
return resultVertices;
}
template<typename ValueType>
typename HyperplaneEnumeration<ValueType>::EigenMatrix& HyperplaneEnumeration<ValueType>::getRelevantMatrix() {
return relevantMatrix;
}
template<typename ValueType>
typename HyperplaneEnumeration<ValueType>::EigenVector& HyperplaneEnumeration<ValueType>::getRelevantVector() {
return relevantVector;
}
template<typename ValueType>
std::vector<std::vector<uint_fast64_t>>& HyperplaneEnumeration<ValueType>::getVertexSets() {
return this->vertexSets;
}
}
}
}

120
src/storm/storage/geometry/nativepolytopeconversion/HyperplaneEnumeration.h

@ -1,68 +1,66 @@
/*
* File: HyperplaneEnumeration.h
* Author: tim quatmann
* Author: phillip florian
*
* Created on December 27, 2015, 1:06 PM
*/
#ifndef STORM_STORAGE_GEOMETRY_NATIVEPOLYTOPECONVERSION_HYPERPLANEENUMERATION_H_
#define STORM_STORAGE_GEOMETRY_NATIVEPOLYTOPECONVERSION_HYPERPLANEENUMERATION_H_
#pragma once
#include "../macros.h"
#include "../../../datastructures/Hyperplane.h"
#include "../../../datastructures/Point.h"
#include <vector>
#include "storm/adapters/EigenAdapter.h"
namespace hypro{
namespace pterm{
namespace storm{
namespace storage{
namespace geometry{
template< typename Number>
class HyperplaneEnumeration {
public:
HyperplaneEnumeration() = default;
virtual ~HyperplaneEnumeration() = default;
/*
* Generates the vertices of the given polytope by enumerating all intersection points generated by subsets of hyperplanes of size hPoly.dimension().
* If the given flag is true, this method will also compute
* * the minimal set of hyperplanes which represent the given hPoly (can be used to remove redundant hyperplanes), and
* * for each hyperplane, the set of (non-redundant) vertices that lie on that hyperplane.
*
* Use the provided getter methods to retrieve the results
*
* @return true iff conversion was successful.
*/
bool generateVerticesFromHalfspaces(PTermHPolytope<Number> const& hPoly, bool generateRelevantHyperplanesAndVertexSets);
std::vector<Point<Number>>& getResultVertices();
/*!
* Returns the set of halfspaces which are not redundant
* @note the returned matrix and vector are empty if the corresponding flag was false
*/
hypro::matrix_t<Number>& getRelevantMatrix();
hypro::vector_t<Number>& getRelevantVector();
/*!
* Returns for each hyperplane the set of vertices that lie on that hyperplane.
* A vertex is given as an index in the relevantVertices vector.
* @note the returned vector is empty if the corresponding flag was false
*/
std::vector<std::vector<std::size_t>>& getVertexSets();
/*
* Returns true if the hyperplanes with indices of subset and item are all linear independent
* Note that this is also used by the hybrid polytope.
*/
static bool linearDependenciesFilter(std::vector<std::size_t> const& subset, std::size_t const& item, hypro::matrix_t<Number> const& A);
template< typename ValueType>
class HyperplaneEnumeration {
public:
private:
std::vector<Point<Number>> mResultVertices;
hypro::matrix_t<Number> mRelevantMatrix;
hypro::vector_t<Number> mRelevantVector;
std::vector<std::vector<std::size_t>> mVertexSets;
};
typedef StormEigen::Matrix<ValueType, StormEigen::Dynamic, StormEigen::Dynamic> EigenMatrix;
typedef StormEigen::Matrix<ValueType, StormEigen::Dynamic, 1> EigenVector;
HyperplaneEnumeration() = default;
virtual ~HyperplaneEnumeration() = default;
/*
* Generates the vertices of the given polytope by enumerating all intersection points generated by subsets of hyperplanes of size hPoly.dimension().
* If the given flag is true, this method will also compute
* * the minimal set of hyperplanes which represent the given hPoly (can be used to remove redundant hyperplanes), and
* * for each hyperplane, the set of (non-redundant) vertices that lie on that hyperplane.
*
* Use the provided getter methods to retrieve the results
*
* @return true iff conversion was successful.
*/
void generateVerticesFromConstraints(EigenMatrix const& constraintMatrix, EigenVector const& constraintVector, bool generateRelevantHyperplanesAndVertexSets);
std::vector<EigenVector>& getResultVertices();
/*!
* Returns the set of halfspaces which are not redundant
* @note the returned matrix and vector are empty if the corresponding flag was false
*/
EigenMatrix& getRelevantMatrix();
EigenVector& getRelevantVector();
/*!
* Returns for each hyperplane the set of vertices that lie on that hyperplane.
* A vertex is given as an index in the relevantVertices vector.
* @note the returned vector is empty if the corresponding flag was false
*/
std::vector<std::vector<uint_fast64_t>>& getVertexSets();
/*
* Returns true if the hyperplanes with indices of subset and item are all linear independent
* Note that this is also used by the hybrid polytope.
*/
static bool linearDependenciesFilter(std::vector<uint_fast64_t> const& subset, uint_fast64_t const& item, EigenMatrix const& A);
private:
std::vector<EigenVector> resultVertices;
EigenMatrix relevantMatrix;
EigenVector relevantVector;
std::vector<std::vector<uint_fast64_t>> vertexSets;
};
}
}
}
#include "HyperplaneEnumeration.tpp"
#endif /* "STORM_STORAGE_GEOMETRY_NATIVEPOLYTOPECONVERSION_HYPERPLANEENUMERATION_H_" */

881
src/storm/storage/geometry/nativepolytopeconversion/QuickHull.cpp

@ -1,505 +1,532 @@
#include "storm/storage/geometry/nativepolytopeconversion/QuickHull.h"
#include <algorithm>
#include <unordered_map>
#include <storm/adapters/NumberAdapter.h>
#include "storm/utility/macros.h"
#include "storm/utility/constants.h"
#include "storm/utility/vector.h"
#include "storm/storage/geometry/nativepolytopeconversion/SubsetEnumerator.h"
#include "storm/storage/geometry/nativepolytopeconversion/HyperplaneCollector.h"
namespace hypro {
namespace pterm{
template<typename ValueType>
void QuickHull<Number>::generateHalfspacesFromVertices(std::vector<EigenVector> const& points, bool generateRelevantVerticesAndVertexSets){
STORM_LOG_ASSERT(!points.empty(), "Invoked QuickHull with empty set of points.");
STORM_LOG_DEBUG("Invoked QuickHull on " << points.size() << " points");
const uint_fast64_t dimension = points.front().rows();
// Generate initial set of d+1 affine independent points (if such a set exists)
std::vector<uint_fast64_t> vertexIndices;
uint_fast64_t minMaxVertexNumber;
if(!this->findInitialVertices(points, vertexIndices, minMaxVertexNumber)) {
// todo deal with this
std::cout << "QuickHull: Could not find d+1 affine independend points. TODO: implement this case (we get some degenerated thing here)" << std::endl;
}
#include "storm/exceptions/UnexpectedException.h"
namespace storm {
namespace storage {
namespace geometry {
template<typename ValueType>
void QuickHull<ValueType>::generateHalfspacesFromPoints(std::vector<EigenVector>& points, bool generateRelevantVerticesAndVertexSets){
STORM_LOG_ASSERT(!points.empty(), "Invoked QuickHull with empty set of points.");
STORM_LOG_DEBUG("Invoked QuickHull on " << points.size() << " points");
const uint_fast64_t dimension = points.front().rows();
if(dimension == 1) {
handle1DPoints(points, generateRelevantVerticesAndVertexSets);
} else {
// Generate initial set of d+1 affine independent points (if such a set exists)
std::vector<uint_fast64_t> vertexIndices;
uint_fast64_t minMaxVertexNumber;
if(this->findInitialVertices(points, vertexIndices, minMaxVertexNumber)) {
// compute point inside initial facet
EigenVector insidePoint(EigenVector::Zero(dimension));
for(uint_fast64_t vertexIndex : vertexIndices){
insidePoint += points[vertexIndex];
}
insidePoint /= storm::utility::convertNumber<ValueType>((uint_fast64_t) vertexIndices.size());
// Create the initial facets from the found vertices.
std::vector<Facet> facets = computeInitialFacets(points, vertexIndices, insidePoint);
// compute point inside initial facet
EigenVector insidePoint(EigenVector::Zero(dimension));
for(uint_fast64_t vertexIndex : vertexIndices){
insidePoint += points[vertexIndex];
// Enlarge the mesh by adding by first considering all points that are min (or max) in at least one dimension
storm::storage::BitVector currentFacets(facets.size(), true);
storm::storage::BitVector consideredPoints(points.size(), false);
for(uint_fast64_t i = 0; i < minMaxVertexNumber; ++i) {
consideredPoints.set(i);
}
this->extendMesh(points, consideredPoints, facets, currentFacets, insidePoint);
for(auto & facet : facets){
facet.maxOutsidePointIndex = 0;
facet.outsideSet.clear();
}
// Now consider all points
consideredPoints = storm::storage::BitVector(points.size(), true);
this->extendMesh(points, consideredPoints, facets, currentFacets, insidePoint);
// Finally retrieve the resulting constrants
this->getPolytopeFromMesh(points, facets, currentFacets, generateRelevantVerticesAndVertexSets);
STORM_LOG_DEBUG("QuickHull invokation yielded " << resultMatrix.rows() << " constraints");
} else {
// The points are affine dependent. This special case needs to be handled accordingly.
handleAffineDependentPoints(points, generateRelevantVerticesAndVertexSets);
}
}
}
insidePoint /= storm::utility::convertNumber<ValueType>(vertexIndices.size());
// Create the initial facets from the found vertices.
std::vector<Facet> facets = computeInitialFacets(vertexIndices, insidePoint);
template<typename ValueType>
typename QuickHull<ValueType>::EigenMatrix& QuickHull<ValueType>::getResultMatrix() {
return this->resultMatrix;
}
// Enlarge the mesh by adding by first considering all points that are min (or max) in at least one dimension
storm::storage::BitVector currentFacets(facets.size(), true);
storm::storage::BitVector consideredPoints(points.size(), false);
uint_fast64_t currentNumOfVertices = vertexIndices.size();
for(uint_fast64_t i = 0; i < minMaxVertexNumber; ++i) {
consideredPoints.set(i);
template<typename ValueType>
typename QuickHull<ValueType>::EigenVector& QuickHull<ValueType>::getResultVector() {
return this->resultVector;
}
this->extendMesh(points, consideredPoints, facets, currentFacets, insidePoint, currentNumOfVertices);
for(auto & facet : facets){
facet.maxOutsidePointIndex = 0;
facet.outsideSet.clear();
template<typename ValueType>
std::vector<typename QuickHull<ValueType>::EigenVector>& QuickHull<ValueType>::getRelevantVertices() {
return this->relevantVertices;
}
consideredPoints = storm::storage::BitVector(points.size(), true);
this->extendMesh(points, consideredPoints, facets, currentFacets, insidePoint, currentNumOfVertices);
this->getPolytopeFromMesh(points, facets, currentFacets, success && generateRelevantVerticesAndVertexSets);
return true;
}
template<typename ValueType>
void QuickHull<Number>::extendMesh(std::vector<EigenVector>& points,
storm::storage::BitVector& consideredPoints,
std::vector<Facet>& facets,
storm::storage::BitVector& currentFacets,
vector_t<Number>& insidePoint,
uint_fast64_t& currentNumOfVertices) const {
storm::storage::BitVector currentOutsidePoints = consideredPoints;
// Compute initial outside Sets
for(uint_fast64_t facetIndex : currentFacets){
computeOutsideSetOfFacet(facets[facetIndex], currentOutsidePoints, points);
template<typename ValueType>
std::vector<std::vector<uint_fast64_t>>& QuickHull<ValueType>::getVertexSets() {
return this->vertexSets;
}
for(uint_fast64_t facetCount = currentFacets.getNextSetIndex(0); facetCount != currentFacets.size(); facetCount = currentFacets.getNextSetIndex(facetCount+1)) {
// set all points to false to get rid of points that lie within the polytope after each iteration
currentOutsidePoints.reset();
// Find a facet with a non-empty outside set
if(!facets[facetCount].outsideSet.empty()) {
uint_fast64_t numberOfNewFacets = 0;
// Now we compute the enlarged mesh
uint_fast64_t farAwayPointIndex = facets[facetCount].maxOutsidePointIndex;
assert(consideredPoints.get(farAwayPointIndex));
// get Visible set from maxOutsidePoint of the current facet
std::set<uint_fast64_t> visibleSet = getVisibleSet(facets, facetCount, points[farAwayPointIndex]);
std::set<uint_fast64_t> invisibleSet = getInvisibleNeighbors(facets, visibleSet);
for(auto invisFacetIt = invisibleSet.begin(); invisFacetIt != invisibleSet.end(); ++invisFacetIt) {
for(auto visFacetIt = visibleSet.begin(); visFacetIt != visibleSet.end(); ++visFacetIt) {
if (facetHasNeighborWithIndex(facets[*invisFacetIt], *visFacetIt)) {
Facet newFacet;
// Set points of Facet
newFacet.points = getCommonPoints(facets[*invisFacetIt], facets[*visFacetIt]);
// replace old facet index by new facet index in the current neighbor
newFacet.points.push_back(farAwayPointIndex);
replaceFacetNeighbor(facets, *visFacetIt, facets.size(), *invisFacetIt);
newFacet.neighbors.push_back(*invisFacetIt);
// get new hyperplane from common points and new point
std::vector<EigenVector> vectorSet;
vectorSet.reserve(points[0].dimension());
EigenVector refPoint(points[farAwayPointIndex].rawCoordinates());
for (uint_fast64_t pointCount = 0; pointCount + 1 < points[0].dimension(); ++pointCount){
vectorSet.emplace_back(points[newFacet.points[pointCount]].rawCoordinates() - refPoint);
}
assert(linearDependenciesFilter(vectorSet));
newFacet.hyperplane = Hyperplane<Number>(std::move(refPoint), std::move(vectorSet));
// check orientation of hyperplane
// To avoid multiple matrix multiplications we need a point that is known to lie well within the polytope
if (!newFacet.hyperplane.holds(insidePoint)){
newFacet.hyperplane.invert();
}
facets.push_back(newFacet);
// avoid using replaced facets and add new ones
currentFacets.push_back(true);
// Set Points in outsideSet free
// increase Number Of new Facets
++numberOfNewFacets;
}
}
template<typename ValueType>
void QuickHull<ValueType>::handle1DPoints(std::vector<EigenVector>& points, bool generateRelevantVerticesAndVertexSets) {
ValueType minValue = points.front()(0);
ValueType maxValue = points.front()(0);
uint_fast64_t minIndex = 0;
uint_fast64_t maxIndex = 0;
for(uint_fast64_t pointIndex = 1; pointIndex < points.size(); ++pointIndex) {
ValueType const& pointValue = points[pointIndex](0);
if(pointValue < minValue) {
minValue = pointValue;
minIndex = pointIndex;
}
for(auto visFacetIt = visibleSet.begin(); visFacetIt != visibleSet.end(); ++visFacetIt){
for(uint_fast64_t m = 0; m < facets[*visFacetIt].outsideSet.size(); ++m){
currentOutsidePoints.set(facets[*visFacetIt].outsideSet[m], true);
}
}
for(auto visFacetIt = visibleSet.begin(); visFacetIt != visibleSet.end(); ++visFacetIt){
currentFacets.set(*visFacetIt, false);
if(pointValue > maxValue) {
maxValue = pointValue;
maxIndex = pointIndex;
}
// compute new outside sets
for(uint_fast64_t fIndex = facets.size()-numberOfNewFacets; fIndex != facets.size(); ++fIndex){
computeOutsideSetOfFacet(facets[fIndex], currentOutsidePoints, points);
}
resultMatrix = EigenMatrix(2,1);
resultMatrix(0,0) = -storm::utility::one<ValueType>();
resultMatrix(1,0) = storm::utility::one<ValueType>();
resultVector = EigenVector(2);
resultVector(0) = -minValue;
resultVector(1) = maxValue;
if(generateRelevantVerticesAndVertexSets) {
relevantVertices.push_back(points[minIndex]);
std::vector<uint_fast64_t> minVertexSet(1,0);
vertexSets.push_back(std::move(minVertexSet));
if(minIndex != maxIndex && points[minIndex] != points[maxIndex]) {
relevantVertices.push_back(points[maxIndex]);
std::vector<uint_fast64_t> maxVertexSet(1,1);
vertexSets.push_back(std::move(maxVertexSet));
} else {
vertexSets.push_back(vertexSets.front());
}
++currentNumOfVertices;
#ifdef PTERM_DEBUG_OUTPUT
numOfIncludedPoints += currentOutsidePoints.count();
PTERM_DEBUG("Mesh currently contains " << numOfIncludedPoints << " of " << consideredPoints.count() << "points");
#endif
// find neighbors in new facets
setNeighborhoodOfNewFacets(facets, facets.size() - numberOfNewFacets, points[0].dimension());
}
}
return true;
}
template<typename ValueType>
void QuickHull<Number>::getPolytopeFromMesh(std::vector<EigenVector> const& points, std::vector<Facet> const& facets, storm::storage::BitVector const& currentFacets, bool generateRelevantVerticesAndVertexSets){
hypro::pterm::HyperplaneCollector<Number> hyperplaneCollector;
for(uint_fast64_t facetCount = 0; facetCount < facets.size(); ++facetCount){
if(currentFacets[facetCount]){
hyperplaneCollector.insert(std::move(facets[facetCount].hyperplane), generateRelevantVerticesAndVertexSets ? &facets[facetCount].points : nullptr);
template <typename ValueType>
bool QuickHull<ValueType>::affineFilter(std::vector<uint_fast64_t> const& subset, uint_fast64_t const& item, std::vector<EigenVector> const& points){
EigenMatrix vectorMatrix(points[item].rows()+1, subset.size() + 1);
for (uint_fast64_t i = 0; i < subset.size(); ++i){
vectorMatrix.col(i) << points[subset[i]], storm::utility::one<ValueType>();
}
vectorMatrix.col(subset.size()) << points[item], storm::utility::one<ValueType>();
return (vectorMatrix.fullPivLu().rank() > subset.size());
}
if(generateRelevantVerticesAndVertexSets){
//Get the mapping from a hyperplane to the set of vertices that lie on that plane, erase the duplicates, and count for each vertex the number of hyperplanes on which that vertex lies
this->mVertexSets = hyperplaneCollector.getIndexLists();
std::vector<uint_fast64_t> hyperplanesOnVertexCounter(points.size(), 0);
for(auto& vertexVector : this->mVertexSets){
std::set<uint_fast64_t> vertexSet;
for(auto const& i : vertexVector){
if(vertexSet.insert(i).second){
++hyperplanesOnVertexCounter[i];
}
template<typename ValueType>
void QuickHull<ValueType>::handleAffineDependentPoints(std::vector<EigenVector>& points, bool generateRelevantVerticesAndVertexSets) {
bool pointsAffineDependent = true;
std::vector<std::pair<EigenVector, ValueType>> additionalConstraints;
while(pointsAffineDependent) {
// get one hyperplane that holds all points
const uint_fast64_t dimension = points.front().rows();
EigenVector refPoint(points.front());
EigenMatrix constraints(points.size() - 1, dimension);
for(unsigned row = 1; row < points.size(); ++row) {
constraints.row(row) = points[row] - refPoint;
}
vertexVector.assign( vertexSet.begin(), vertexSet.end());
}
//Now, we can erase all vertices which do not lie on at least dimension() hyperplanes.
//Note that the indices of the HyperplaneToVerticesMapping needs to be refreshed according to the new set of vertices
//Therefore, we additionally store the old indices for every vertex to be able to translate from old to new indices
std::unordered_map<hypro::EigenVector, std::vector<uint_fast64_t>> relevantVerticesMap;
relevantVerticesMap.reserve(points.size());
for(uint_fast64_t vertexIndex = 0; vertexIndex < hyperplanesOnVertexCounter.size(); ++vertexIndex){
if(hyperplanesOnVertexCounter[vertexIndex] >= points[0].dimension()){
auto mapEntry = relevantVerticesMap.insert(typename std::unordered_map<hypro::EigenVector, std::vector<uint_fast64_t>>::value_type(points[vertexIndex], std::vector<uint_fast64_t>())).first;
mapEntry->second.push_back(vertexIndex);
EigenVector normal = constraints.fullPivLu().kernel().col(0);
// Eigen returns the column vector 0...0 if the kernel is empty (i.e., there is no such hyperplane)
if(normal.isZero()) {
pointsAffineDependent = false;
} else {
points.push_back(refPoint + normal);
ValueType offset = normal.dot(refPoint);
additionalConstraints.emplace_back(std::move(normal),std::move(offset));
}
}
//Fill in the relevant vertices and create a translation map from old to new indices
std::vector<uint_fast64_t> oldToNewIndexMapping (points.size(), points.size()); //Initialize with some illegal value
this->mRelevantVertices.clear();
this->mRelevantVertices.reserve(relevantVerticesMap.size());
for(auto const& mapEntry : relevantVerticesMap){
for(auto const& oldIndex : mapEntry.second){
oldToNewIndexMapping[oldIndex] = this->mRelevantVertices.size();
// Now the points are affine independent so we can relaunch the procedure
generateHalfspacesFromPoints(points, generateRelevantVerticesAndVertexSets);
// Add the constraints obtained above
uint_fast64_t numOfAdditionalConstraints = additionalConstraints.size();
uint_fast64_t numOfRegularConstraints = resultMatrix.rows();
resultMatrix.resize(numOfRegularConstraints + numOfAdditionalConstraints, StormEigen::NoChange);
resultVector.resize(numOfRegularConstraints + numOfAdditionalConstraints, StormEigen::NoChange);
for(uint_fast64_t row = numOfRegularConstraints; row < numOfRegularConstraints + numOfAdditionalConstraints; ++row) {
resultMatrix.row(row) = std::move(additionalConstraints[row].first);
resultVector(row) = additionalConstraints[row].second;
}
// clear the additionally added points. Note that the order of the points might have changed
storm::storage::BitVector keptPoints(points.size(), true);
for(uint_fast64_t pointIndex = 0; pointIndex < points.size(); ++pointIndex) {
for(uint_fast64_t row = 0; row < resultMatrix.rows(); ++row) {
if((resultMatrix.row(row) * points[pointIndex])(0) > resultVector(row)) {
keptPoints.set(pointIndex, false);
break;
}
}
this->mRelevantVertices.push_back(mapEntry.first);
}
//Actually translate and erase duplicates
for(auto& vertexVector : this->mVertexSets){
std::set<uint_fast64_t> vertexSet;
for(auto const& oldIndex : vertexVector){
if(hyperplanesOnVertexCounter[oldIndex] >= points[0].dimension()){
vertexSet.insert(oldToNewIndexMapping[oldIndex]);
points = storm::utility::vector::filterVector(points, keptPoints);
if(generateRelevantVerticesAndVertexSets) {
storm::storage::BitVector keptVertices(relevantVertices.size(), true);
for(uint_fast64_t vertexIndex = 0; vertexIndex < relevantVertices.size(); ++vertexIndex) {
for(uint_fast64_t row = 0; row < resultMatrix.rows(); ++row) {
if((resultMatrix.row(row) * relevantVertices[vertexIndex])(0) > resultVector(row)) {
keptVertices.set(vertexIndex, false);
break;
}
}
}
vertexVector.assign( vertexSet.begin(), vertexSet.end());
relevantVertices = storm::utility::vector::filterVector(relevantVertices, keptVertices);
STORM_LOG_WARN("Can not retrieve vertex sets for degenerated polytope (not implemented)");
vertexSets.clear();
}
} else {
this->mRelevantVertices.clear();
this->mVertexSets.clear();
}
auto matrixVector = hyperplaneCollector.getCollectedHyperplanesAsMatrixVector();
this->mResultMatrix = std::move(matrixVector.first);
this->mResultVector = std::move(matrixVector.second);
PTERM_DEBUG("Computed H representation from " << points.size() << " vertices and " << this->mResultVector.rows() << " hyperplanes and " << this->mRelevantVertices.size() << " relevant vertices. Dimension is " << points[0].dimension());
PTERM_DEBUG("Total number of considered facets: " << facets.size() << " where " << currentFacets.count() << " are enabled.");
}
template <typename ValueType>
bool QuickHull<ValueType>::affineFilter(std::vector<uint_fast64_t> const& subset, uint_fast64_t const& item, std::vector<EigenVector<ValueType>> const& points){
EigenMatrix vectorMatrix(vertices[item].dimension()+1, subset.size() + 1);
for (uint_fast64_t i = 0; i < subset.size(); ++i){
vectorMatrix.col(i) << vertices[subset[i]], storm::utility::one<ValueType>();
}
vectorMatrix.col(subset.size()) << vertices[item], storm::utility::one<ValueType>();
return (vectorMatrix.fullPivLu().rank() > subset.size());
}
template<typename ValueType>
bool QuickHull<Number>::findInitialVertices(std::vector<hypro::EigenVector>& points, std::vector<uint_fast64_t>& verticesOfInitialPolytope, uint_fast64_t& minMaxVertices) const{
const uint_fast64_t dimension = points[0].dimension();
if(points.size() < dimension + 1){
//not enough points to obtain a (non-degenerated) polytope
return false;
}
const uint_fast64_t candidatesToFind = std::min(2*dimension, points.size());
uint_fast64_t candidatesFound = 0;
storm::storage::BitVector consideredPoints(points.size(), true);
while(candidatesFound < candidatesToFind && !consideredPoints.empty()) {
for(uint_fast64_t currDim=0; currDim<dimension; ++currDim) {
uint_fast64_t minIndex = *consideredPoints.begin();
uint_fast64_t maxIndex = minIndex;
for(uint_fast64_t pointIndex : consideredPoints){
//Check if the current point is a new minimum or maximum at the current dimension
if(points[minIndex](currDim) > points[pointIndex](currDim)){
minIndex = pointIndex;
}
if(points[maxIndex](currDim) < points[pointIndex](currDim)){
maxIndex = pointIndex;
template<typename ValueType>
bool QuickHull<ValueType>::findInitialVertices(std::vector<EigenVector>& points, std::vector<uint_fast64_t>& verticesOfInitialPolytope, uint_fast64_t& minMaxVertices) const{
const uint_fast64_t dimension = points.front().rows();
if(points.size() < dimension + 1){
//not enough points to obtain a (non-degenerated) polytope
return false;
}
const uint_fast64_t candidatesToFind = std::min(2*dimension, (uint_fast64_t) points.size());
uint_fast64_t candidatesFound = 0;
storm::storage::BitVector consideredPoints(points.size(), true);
while(candidatesFound < candidatesToFind && !consideredPoints.empty()) {
for(uint_fast64_t currDim=0; currDim<dimension; ++currDim) {
uint_fast64_t minIndex = *consideredPoints.begin();
uint_fast64_t maxIndex = minIndex;
for(uint_fast64_t pointIndex : consideredPoints){
//Check if the current point is a new minimum or maximum at the current dimension
if(points[minIndex](currDim) > points[pointIndex](currDim)){
minIndex = pointIndex;
}
if(points[maxIndex](currDim) < points[pointIndex](currDim)){
maxIndex = pointIndex;
}
}
consideredPoints.set(minIndex, false);
consideredPoints.set(maxIndex, false);
}
consideredPoints.set(minIndex, false);
consideredPoints.set(maxIndex, false);
}
//Found candidates. Now swap them to the front.
consideredPoints = ~consideredPoints;
const uint_fast64_t newNumberOfCandidates = consideredPoints.getNumberOfSetBits();
assert(newNumberOfCandidates > 0);
if(newNumberOfCandidates < points.size()){
uint_fast64_t nextPointToMove = consideredPoints.getNextSetIndex(newNumberOfCandidates);
for(uint_fast64_t indexAtFront = candidatesFound; indexAtFront < newNumberOfCandidates; ++indexAtFront){
if(!consideredPoints.get(indexAtFront)) {
assert(nextPointToMove != consideredPoints.size());
std::swap(points[indexAtFront], points[nextPointToMove]);
nextPointToMove = consideredPoints.getNextSetIndex(nextPointToMove+1);
consideredPoints.set(indexAtFront);
//Found candidates. Now swap them to the front.
consideredPoints = ~consideredPoints;
const uint_fast64_t newNumberOfCandidates = consideredPoints.getNumberOfSetBits();
assert(newNumberOfCandidates > 0);
if(newNumberOfCandidates < points.size()){
uint_fast64_t nextPointToMove = consideredPoints.getNextSetIndex(newNumberOfCandidates);
for(uint_fast64_t indexAtFront = candidatesFound; indexAtFront < newNumberOfCandidates; ++indexAtFront){
if(!consideredPoints.get(indexAtFront)) {
assert(nextPointToMove != consideredPoints.size());
std::swap(points[indexAtFront], points[nextPointToMove]);
nextPointToMove = consideredPoints.getNextSetIndex(nextPointToMove+1);
consideredPoints.set(indexAtFront);
}
}
assert(nextPointToMove == consideredPoints.size());
}
assert(nextPointToMove == consideredPoints.size());
if(candidatesFound==0){
//We are in the first iteration. It holds that the first newNumberOfCandidates points will be vertices of the final polytope
minMaxVertices = newNumberOfCandidates;
}
candidatesFound = newNumberOfCandidates;
consideredPoints = ~consideredPoints;
}
if(candidatesFound==0){
//We are in the first iteration. It holds that the first newNumberOfCandidates points will be vertices of the final polytope
minMaxVertices = newNumberOfCandidates;
storm::storage::geometry::SubsetEnumerator<std::vector<EigenVector>> subsetEnum(points.size(), dimension+1, points, affineFilter);
if(subsetEnum.setToFirstSubset()){
verticesOfInitialPolytope = subsetEnum.getCurrentSubset();
return true;
} else {
return false;
}
candidatesFound = newNumberOfCandidates;
consideredPoints = ~consideredPoints;
}
storm::storage::geometry::SubsetEnumerator<std::vector<EigenVector<ValueType>>> subsetEnum(points.size(), dimension+1, points, affineFilter);
if(subsetEnum.setToFirstSubset()){
verticesOfInitialPolytope = subsetEnum.getCurrentSubset();
return true;
} else {
return false;
}
}
template<typename ValueType>
std::vector<typename QuickHull<ValueType>::Facet> computeInitialFacets(std::vector<EigenVector> const& points, std::vector<uint_fast64_t> const& verticesOfInitialPolytope, EigenVector const& insidePoint) const {
const uint_fast64_t dimension = points.front().rows();
assert(verticesOfInitialPolytope.size() == dimension + 1);
std::vector<Facet> result;
result.reserve(dimension + 1);
storm::storage::geometry::SubsetEnumerator<> subsetEnum(verticesOfInitialPolytope.size(), dimension);
if(!subsetEnum.setToFirstSubset()){
STORM_LOG_THROW(false, storm::exceptions::UnexpectedException, "Could not find an initial subset.");
}
do{
Facet newFacet;
// set the points that lie on the new facet
std::vector<uint_fast64_t> const& subset(subsetEnum.getCurrentSubset());
newFacet.points.reserve(subset.size());
for(uint_fast64_t i : subset){
newFacet.points.push_back(verticesOfInitialPolytope[i]);
template<typename ValueType>
std::vector<typename QuickHull<ValueType>::Facet> QuickHull<ValueType>::computeInitialFacets(std::vector<EigenVector> const& points, std::vector<uint_fast64_t> const& verticesOfInitialPolytope, EigenVector const& insidePoint) const {
const uint_fast64_t dimension = points.front().rows();
assert(verticesOfInitialPolytope.size() == dimension + 1);
std::vector<Facet> result;
result.reserve(dimension + 1);
storm::storage::geometry::SubsetEnumerator<> subsetEnum(verticesOfInitialPolytope.size(), dimension);
if(!subsetEnum.setToFirstSubset()){
STORM_LOG_THROW(false, storm::exceptions::UnexpectedException, "Could not find an initial subset.");
}
//neighbors: these are always the remaining facets
newFacet.neighbors.reserve(dimension);
for(uint_fast64_t i = 0; i < dimension+1; ++i){
if(i != facets.size()){ //initFacets.size() will be the index of this new facet!
newFacet.neighbors.push_back(i);
do{
Facet newFacet;
// set the points that lie on the new facet
std::vector<uint_fast64_t> const& subset(subsetEnum.getCurrentSubset());
newFacet.points.reserve(subset.size());
for(uint_fast64_t i : subset){
newFacet.points.push_back(verticesOfInitialPolytope[i]);
}
}
// normal and offset:
computeNormalAndOffsetOfFacet(points, insidePoint, newFacet);
facets.push_back(std::move(newFacet));
} while(subsetEnum.incrementSubset());
assert(facets.size()==dimension+1);
}
//neighbors: these are always the remaining facets
newFacet.neighbors.reserve(dimension);
for(uint_fast64_t i = 0; i < dimension+1; ++i){
if(i != result.size()){ //initFacets.size() will be the index of this new facet!
newFacet.neighbors.push_back(i);
}
}
// normal and offset:
computeNormalAndOffsetOfFacet(points, insidePoint, newFacet);
template<typename ValueType>
void computeNormalAndOffsetOfFacet(std::vector<EigenVector> const& points, EigenVector const& insidePoint, Facet& facet) const {
const uint_fast64_t dimension = points.front().rows()
assert(facet.points.size() == dimension);
EigenVector refPoint(facet.points.back());
EigenMatrix constraints(dimension-1, dimension);
for(unsigned row = 0; row < dimension-1; ++row) {
constraints.row(row) = points[facet.points[row]] - refPoint;
result.push_back(std::move(newFacet));
} while(subsetEnum.incrementSubset());
assert(result.size()==dimension+1);
return result;
}
facet.normal = constraints.fullPivLu().kernel();
facet.offset = facet.normal.dot(refPoint);
// invert the plane if the insidePoint is not contained in it
if(facet.normal.dot(insidePoint) > facet.offset) {
facet.normal = -facet.normal;
facet.offset = -facet.offset;
template<typename ValueType>
void QuickHull<ValueType>::computeNormalAndOffsetOfFacet(std::vector<EigenVector> const& points, EigenVector const& insidePoint, Facet& facet) const {
const uint_fast64_t dimension = points.front().rows();
assert(facet.points.size() == dimension);
EigenVector refPoint(facet.points.back());
EigenMatrix constraints(dimension-1, dimension);
for(unsigned row = 0; row < dimension-1; ++row) {
constraints.row(row) = points[facet.points[row]] - refPoint;
}
facet.normal = constraints.fullPivLu().kernel().col(0);
facet.offset = facet.normal.dot(refPoint);
// invert the plane if the insidePoint is not contained in it
if(facet.normal.dot(insidePoint) > facet.offset) {
facet.normal = -facet.normal;
facet.offset = -facet.offset;
}
}
}
template<typename ValueType>
std::set<uint_fast64_t> QuickHull<Number>::getVisibleSet(std::vector<Facet> const& facets, uint_fast64_t const& startIndex, EigenVector const& point) const {
std::set<uint_fast64_t> facetsToCheck;
std::set<uint_fast64_t> facetsChecked;
std::set<uint_fast64_t> visibleSet;
facetsChecked.insert(startIndex);
visibleSet.insert(startIndex);
for(uint_fast64_t i = 0; i < facets[startIndex].neighbors.size(); ++i){
facetsToCheck.insert(facets[startIndex].neighbors[i]);
}
while (!facetsToCheck.empty()){
auto elementIt = facetsToCheck.begin();
if ( point.dot(facets[*elementIt].normal) > facets[*elementIt].offset) {
visibleSet.insert(*elementIt);
for(uint_fast64_t i = 0; i < facets[*elementIt].neighbors.size(); ++i){
if (facetsChecked.find(facets[*elementIt].neighbors[i]) == facetsChecked.end()){
facetsToCheck.insert(facets[*elementIt].neighbors[i]);
template<typename ValueType>
void QuickHull<ValueType>::extendMesh(std::vector<EigenVector>& points,
storm::storage::BitVector& consideredPoints,
std::vector<Facet>& facets,
storm::storage::BitVector& currentFacets,
EigenVector& insidePoint) const {
storm::storage::BitVector currentOutsidePoints = consideredPoints;
// Compute initial outside Sets
for(uint_fast64_t facetIndex : currentFacets){
computeOutsideSetOfFacet(facets[facetIndex], currentOutsidePoints, points);
}
for(uint_fast64_t facetCount = currentFacets.getNextSetIndex(0); facetCount != currentFacets.size(); facetCount = currentFacets.getNextSetIndex(facetCount+1)) {
// set all points to false to get rid of points that lie within the polytope after each iteration
currentOutsidePoints.clear();
// Find a facet with a non-empty outside set
if(!facets[facetCount].outsideSet.empty()) {
uint_fast64_t numberOfNewFacets = 0;
// Now we compute the enlarged mesh
uint_fast64_t farAwayPointIndex = facets[facetCount].maxOutsidePointIndex;
assert(consideredPoints.get(farAwayPointIndex));
// get Visible set from maxOutsidePoint of the current facet
std::set<uint_fast64_t> visibleSet = getVisibleSet(facets, facetCount, points[farAwayPointIndex]);
std::set<uint_fast64_t> invisibleSet = getInvisibleNeighbors(facets, visibleSet);
for(auto invisFacetIt = invisibleSet.begin(); invisFacetIt != invisibleSet.end(); ++invisFacetIt) {
for(auto visFacetIt = visibleSet.begin(); visFacetIt != visibleSet.end(); ++visFacetIt) {
if (std::find(facets[*invisFacetIt].neighbors.begin(), facets[*invisFacetIt].neighbors.end(), *visFacetIt) != facets[*invisFacetIt].neighbors.end()) {
Facet newFacet;
// Set points of Facet
newFacet.points = getCommonPoints(facets[*invisFacetIt], facets[*visFacetIt]);
newFacet.points.push_back(farAwayPointIndex);
// replace old facet index by new facet index in the current neighbor
replaceFacetNeighbor(facets, *visFacetIt, facets.size(), *invisFacetIt);
newFacet.neighbors.push_back(*invisFacetIt);
// Compute the normal and the offset
computeNormalAndOffsetOfFacet(points, insidePoint, newFacet);
// add new facet
facets.push_back(newFacet);
currentFacets.resize(currentFacets.size() + 1, true);
// increase Number Of new Facets
++numberOfNewFacets;
}
}
}
for(auto visibleFacet : visibleSet){
for(uint_fast64_t outsidePoint : facets[visibleFacet].outsideSet){
currentOutsidePoints.set(outsidePoint, true);
}
currentFacets.set(visibleFacet, false);
}
// compute new outside sets
for(uint_fast64_t facetIndex : currentFacets){
computeOutsideSetOfFacet(facets[facetIndex], currentOutsidePoints, points);
}
// find neighbors in new facets
setNeighborhoodOfNewFacets(facets, facets.size() - numberOfNewFacets, points.front().rows());
}
}
facetsChecked.insert(*elementIt);
facetsToCheck.erase(elementIt);
}
return visibleSet;
}
template<typename ValueType>
void QuickHull<Number>::setNeighborhoodOfNewFacets(std::vector<Facet>& facets, uint_fast64_t firstNewFacet, uint_fast64_t dimension) const{
for(uint_fast64_t currentFacet = firstNewFacet; currentFacet < facets.size(); ++currentFacet){
for(uint_fast64_t otherFacet = currentFacet + 1; otherFacet < facets.size(); ++otherFacet){
std::vector<uint_fast64_t> commonPoints = getCommonPoints(facets[currentFacet], facets[otherFacet]);
if (commonPoints.size() >= dimension-1){
facets[currentFacet].neighbors.push_back(otherFacet);
facets[otherFacet].neighbors.push_back(currentFacet);
}
template<typename ValueType>
void QuickHull<ValueType>::getPolytopeFromMesh(std::vector<EigenVector> const& points, std::vector<Facet> const& facets, storm::storage::BitVector const& currentFacets, bool generateRelevantVerticesAndVertexSets) {
storm::storage::geometry::HyperplaneCollector<ValueType> hyperplaneCollector;
for(auto facet : currentFacets) {
hyperplaneCollector.insert(std::move(facets[facet].normal), std::move(facets[facet].offset), generateRelevantVerticesAndVertexSets ? &facets[facet].points : nullptr);
}
}
}
template<typename ValueType>
void QuickHull<Number>::replaceFacetNeighbor(std::vector<Facet>& facets, uint_fast64_t oldFacetIndex, uint_fast64_t newFacetIndex, uint_fast64_t neighborIndex) const {
uint_fast64_t index = 0;
while(facets[neighborIndex].neighbors[index] != oldFacetIndex && index < facets[neighborIndex].neighbors.size()){
++index;
}
if (index < facets[neighborIndex].neighbors.size()){
facets[neighborIndex].neighbors[index] = newFacetIndex;
}
}
template<typename ValueType>
void QuickHull<Number>::computeOutsideSetOfFacet(Facet& facet, storm::storage::BitVector& currentOutsidePoints, std::vector<EigenVector> const& points) const {
Number maxMultiplicationResult = facet.hyperplane.offset();
for(uint_fast64_t pointIndex = currentOutsidePoints) {
ValueType multiplicationResult = points[pointIndex].dot(facet.normal);
if( multiplicationResult > facet.hyperplane.offset() ) {
currentOutsidePoints.set(pointIndex, false); // we already know that the point lies outside so it can be ignored for future facets
facet.outsideSet.push_back(pointIndex);
if (multiplicationResult > maxMultiplicationResult) {
maxMultiplicationResult = multiplicationResult;
facet.maxOutsidePointIndex = pointIndex;
if(generateRelevantVerticesAndVertexSets){
//Get the mapping from a hyperplane to the set of vertices that lie on that plane, erase the duplicates, and count for each vertex the number of hyperplanes on which that vertex lies
vertexSets = hyperplaneCollector.getIndexLists();
std::vector<uint_fast64_t> hyperplanesOnVertexCounter(points.size(), 0);
for(auto& vertexVector : vertexSets){
std::set<uint_fast64_t> vertexSet;
for(auto const& i : vertexVector){
if(vertexSet.insert(i).second){
++hyperplanesOnVertexCounter[i];
}
}
vertexVector.assign( vertexSet.begin(), vertexSet.end());
}
//Now, we can erase all vertices which do not lie on at least dimension hyperplanes.
//Note that the indices of the HyperplaneToVerticesMapping needs to be refreshed according to the new set of vertices
//Therefore, we additionally store the old indices for every vertex to be able to translate from old to new indices
std::unordered_map<EigenVector, std::vector<uint_fast64_t>> relevantVerticesMap;
relevantVerticesMap.reserve(points.size());
for(uint_fast64_t vertexIndex = 0; vertexIndex < hyperplanesOnVertexCounter.size(); ++vertexIndex){
if(hyperplanesOnVertexCounter[vertexIndex] >= points.front().rows()){
auto mapEntry = relevantVerticesMap.insert(typename std::unordered_map<EigenVector, std::vector<uint_fast64_t>>::value_type(points[vertexIndex], std::vector<uint_fast64_t>())).first;
mapEntry->second.push_back(vertexIndex);
}
}
//Fill in the relevant vertices and create a translation map from old to new indices
std::vector<uint_fast64_t> oldToNewIndexMapping (points.size(), points.size()); //Initialize with some illegal value
relevantVertices.clear();
relevantVertices.reserve(relevantVerticesMap.size());
for(auto const& mapEntry : relevantVerticesMap){
for(auto const& oldIndex : mapEntry.second){
oldToNewIndexMapping[oldIndex] = relevantVertices.size();
}
relevantVertices.push_back(mapEntry.first);
}
//Actually translate and erase duplicates
for(auto& vertexVector : vertexSets){
std::set<uint_fast64_t> vertexSet;
for(auto const& oldIndex : vertexVector){
if(hyperplanesOnVertexCounter[oldIndex] >= points.front().rows()){
vertexSet.insert(oldToNewIndexMapping[oldIndex]);
}
}
vertexVector.assign( vertexSet.begin(), vertexSet.end());
}
} else {
relevantVertices.clear();
vertexSets.clear();
}
auto matrixVector = hyperplaneCollector.getCollectedHyperplanesAsMatrixVector();
resultMatrix = std::move(matrixVector.first);
resultVector = std::move(matrixVector.second);
}
}
template<typename ValueType>
std::vector<uint_fast64_t> QuickHull<Number>::getCommonPoints(Facet const& lhs, Facet const& rhs) const{
std::vector<uint_fast64_t> commonPoints;
for(uint_fast64_t refPoint = 0; refPoint < lhs.points.size(); ++refPoint){
for(uint_fast64_t currentPoint = 0; currentPoint < rhs.points.size(); ++currentPoint){
if (lhs.points[refPoint] == rhs.points[currentPoint]){
commonPoints.push_back(lhs.points[refPoint]);
template<typename ValueType>
std::set<uint_fast64_t> QuickHull<ValueType>::getVisibleSet(std::vector<Facet> const& facets, uint_fast64_t const& startIndex, EigenVector const& point) const {
std::set<uint_fast64_t> facetsToCheck;
std::set<uint_fast64_t> facetsChecked;
std::set<uint_fast64_t> visibleSet;
facetsChecked.insert(startIndex);
visibleSet.insert(startIndex);
for(uint_fast64_t i = 0; i < facets[startIndex].neighbors.size(); ++i){
facetsToCheck.insert(facets[startIndex].neighbors[i]);
}
while (!facetsToCheck.empty()){
auto elementIt = facetsToCheck.begin();
if ( point.dot(facets[*elementIt].normal) > facets[*elementIt].offset) {
visibleSet.insert(*elementIt);
for(uint_fast64_t i = 0; i < facets[*elementIt].neighbors.size(); ++i){
if (facetsChecked.find(facets[*elementIt].neighbors[i]) == facetsChecked.end()){
facetsToCheck.insert(facets[*elementIt].neighbors[i]);
}
}
}
facetsChecked.insert(*elementIt);
facetsToCheck.erase(elementIt);
}
return visibleSet;
}
return commonPoints;
}
template<typename ValueType>
std::set<uint_fast64_t> QuickHull<Number>::getInvisibleNeighbors( std::vector<Facet>& facets, std::set<uint_fast64_t> const& visibleSet) const {
std::set<uint_fast64_t> invisibleNeighbors;
for(auto currentFacetIt = visibleSet.begin(); currentFacetIt != visibleSet.end(); ++currentFacetIt){
for(uint_fast64_t currentNeighbor = 0; currentNeighbor < facets[*currentFacetIt].neighbors.size(); ++currentNeighbor){
if (visibleSet.find(facets[*currentFacetIt].neighbors[currentNeighbor]) == visibleSet.end()){
invisibleNeighbors.insert(facets[*currentFacetIt].neighbors[currentNeighbor]);
template<typename ValueType>
void QuickHull<ValueType>::setNeighborhoodOfNewFacets(std::vector<Facet>& facets, uint_fast64_t firstNewFacet, uint_fast64_t dimension) const{
for(uint_fast64_t currentFacet = firstNewFacet; currentFacet < facets.size(); ++currentFacet){
for(uint_fast64_t otherFacet = currentFacet + 1; otherFacet < facets.size(); ++otherFacet){
if (getCommonPoints(facets[currentFacet], facets[otherFacet]).size() >= dimension-1) {
facets[currentFacet].neighbors.push_back(otherFacet);
facets[otherFacet].neighbors.push_back(currentFacet);
}
}
}
}
return invisibleNeighbors;
}
template<typename ValueType>
void QuickHull<Number>::enlargeIncompleteResult(std::vector<Facet> const& facets, storm::storage::BitVector const& currentFacets, std::vector<EigenVector> const& points, bool generateRelevantVerticesAndVertexSets){
PTERM_TRACE("Enlarging incomplete Result of QuickHull");
//Obtain the set of outside points
std::vector<uint_fast64_t> outsidePoints;
for(uint_fast64_t facetIndex = currentFacets.find_first(); facetIndex != currentFacets.npos; facetIndex = currentFacets.find_next(facetIndex)){
outsidePoints.insert(outsidePoints.end(), facets[facetIndex].outsideSet.begin(), facets[facetIndex].outsideSet.end());
}
//Now adjust the offsets of all the hyperplanes such that they contain each outside point
for(uint_fast64_t planeIndex=0; planeIndex < this->mResultMatrix.rows(); ++planeIndex){
Number maxValue = this->mResultVector(planeIndex);
for (uint_fast64_t outsidePointIndex : outsidePoints){
Number currValue = this->mResultMatrix.row(planeIndex) * (points[outsidePointIndex].rawCoordinates());
maxValue = std::max(maxValue, currValue);
template<typename ValueType>
void QuickHull<ValueType>::replaceFacetNeighbor(std::vector<Facet>& facets, uint_fast64_t oldFacetIndex, uint_fast64_t newFacetIndex, uint_fast64_t neighborIndex) const {
auto facetInNeighborListIt = std::find(facets[neighborIndex].neighbors.begin(), facets[neighborIndex].neighbors.end(), oldFacetIndex);
if (facetInNeighborListIt != facets[neighborIndex].neighbors.end()){
*facetInNeighborListIt = newFacetIndex;
}
if (pterm::Settings::instance().getQuickHullRoundingMode() != pterm::Settings::QuickHullRoundingMode::NONE) {
maxValue = NumberReduction<Number>::instance().roundUp(maxValue);
}
template<typename ValueType>
void QuickHull<ValueType>::computeOutsideSetOfFacet(Facet& facet, storm::storage::BitVector& currentOutsidePoints, std::vector<EigenVector> const& points) const {
ValueType maxMultiplicationResult = facet.offset;
for(uint_fast64_t pointIndex : currentOutsidePoints) {
ValueType multiplicationResult = points[pointIndex].dot(facet.normal);
if( multiplicationResult > facet.offset ) {
currentOutsidePoints.set(pointIndex, false); // we already know that the point lies outside so it can be ignored for future facets
facet.outsideSet.push_back(pointIndex);
if (multiplicationResult > maxMultiplicationResult) {
maxMultiplicationResult = multiplicationResult;
facet.maxOutsidePointIndex = pointIndex;
}
}
}
this->mResultVector(planeIndex) = maxValue;
}
if(generateRelevantVerticesAndVertexSets){
/* Note: The adjustment of the offset might introduce redundant halfspaces.
* It is also not clear if it suffices to consider intersection points of hyperplanes that intersected in the original polytope
*
* Our solution is to convert the resulting h polytope back to a v poly
*/
PTermHPolytope<Number> enlargedPoly(std::move(this->mResultMatrix), std::move(this->mResultVector));
HyperplaneEnumeration<Number> he;
if(!he.generateVerticesFromHalfspaces(enlargedPoly, true)){
PTERM_ERROR("Could not find the vertices of the enlarged Polytope");
template<typename ValueType>
std::vector<uint_fast64_t> QuickHull<ValueType>::getCommonPoints(Facet const& lhs, Facet const& rhs) const {
std::vector<uint_fast64_t> commonPoints;
for(uint_fast64_t lhsPoint : lhs.points){
for(uint_fast64_t rhsPoint : rhs.points){
if (lhsPoint == rhsPoint){
commonPoints.push_back(lhsPoint);
}
}
}
this->mResultMatrix = std::move(he.getRelevantMatrix());
this->mResultVector = std::move(he.getRelevantVector());
this->mRelevantVertices = std::move(he.getResultVertices());
this->mVertexSets = std::move(he.getVertexSets());
return commonPoints;
}
template<typename ValueType>
std::set<uint_fast64_t> QuickHull<ValueType>::getInvisibleNeighbors( std::vector<Facet>& facets, std::set<uint_fast64_t> const& visibleSet) const {
std::set<uint_fast64_t> invisibleNeighbors;
for(auto currentFacetIt = visibleSet.begin(); currentFacetIt != visibleSet.end(); ++currentFacetIt){
for(uint_fast64_t currentNeighbor = 0; currentNeighbor < facets[*currentFacetIt].neighbors.size(); ++currentNeighbor){
if (visibleSet.find(facets[*currentFacetIt].neighbors[currentNeighbor]) == visibleSet.end()){
invisibleNeighbors.insert(facets[*currentFacetIt].neighbors[currentNeighbor]);
}
}
}
return invisibleNeighbors;
}
}
}
}
/*
* File: AbstractVtoHConverter.tpp
* Author: tim quatmann
*
* Created on Februrary 2, 2016, 1:06 PM
*/
#include "AbstractVtoHConverter.h"
namespace hypro {
namespace pterm{
template<typename ValueType>
EigenMatrix& AbstractVtoHConverter<Number>::getResultMatrix() {
return this->mResultMatrix;
}
template<typename ValueType>
EigenVector& AbstractVtoHConverter<Number>::getResultVector() {
return this->mResultVector;
}
template<typename ValueType>
std::vector<EigenVector>& AbstractVtoHConverter<Number>::getRelevantVertices() {
return this->mRelevantVertices;
}
template<typename ValueType>
std::vector<std::vector<uint_fast64_t>>& AbstractVtoHConverter<Number>::getVertexSets() {
return this->mVertexSets;
template class QuickHull<double>;
template class QuickHull<storm::RationalNumber>;
}
}
}

307
src/storm/storage/geometry/nativepolytopeconversion/QuickHull.h

@ -1,165 +1,168 @@
#ifndef STORM_STORAGE_GEOMETRY_NATIVEPOLYTOPECONVERSION_QUICKHULL_H_
#define STORM_STORAGE_GEOMETRY_NATIVEPOLYTOPECONVERSION_QUICKHULL_H_
#include <set>
#include "storm/storage/BitVector.h"
#include "storm/adapters/EigenAdapter.h"
namespace storm {
namespace storage {
namespace geometry {
template< typename ValueType>
class QuickHull {
public:
typedef StormEigen::Matrix<ValueType, StormEigen::Dynamic, StormEigen::Dynamic> EigenMatrix;
typedef StormEigen::Matrix<ValueType, StormEigen::Dynamic, 1> EigenVector;
QuickHull() = default;
~QuickHull() = default;
/*
* Generates the halfspaces of the given set of Points by the QuickHull-algorithm
* If the given flag is true, this method will also compute
* * the minimal set of vertices which represent the given polytope (can be used to remove redundant vertices), and
* * for each hyperplane, the set of (non-redundant) vertices that lie on each hyperplane.
*
* Use the provided getter methods to retrieve the results
*
* @return true iff conversion was successful.
*/
void generateHalfspacesFromPoints(std::vector<EigenVector> const& points, bool generateRelevantVerticesAndVertexSets);
EigenMatrix& getResultMatrix();
EigenVector& getResultVector();
/*!
* Returns the set of vertices which are not redundant
* @note the returned vector is empty if the corresponding flag was false
*/
std::vector <EigenVector>& getRelevantVertices();
/*!
* Returns for each hyperplane the set of vertices that lie on that hyperplane.
* A vertex is given as an index in the relevantVertices vector.
* @note the returned vector is empty if the corresponding flag was false
*/
std::vector<std::vector<std::uint_fast64_t>>& getVertexSets();
private:
struct Facet {
EigenVector normal;
ValueType offset;
std::vector<uint_fast64_t> points;
std::vector<uint_fast64_t> neighbors;
// maxOutsidePointIndex and outsideSet will be set in Quickhull algorithm
std::vector<uint_fast64_t> outsideSet;
uint_fast64_t maxOutsidePointIndex;
};
template< typename ValueType>
class QuickHull {
public:
typedef StormEigen::Matrix<ValueType, StormEigen::Dynamic, StormEigen::Dynamic> EigenMatrix;
typedef StormEigen::Matrix<ValueType, StormEigen::Dynamic, 1> EigenVector;
QuickHull() = default;
~QuickHull() = default;
/*
* Generates the halfspaces of the given set of Points by the QuickHull-algorithm
* If the given flag is true, this method will also compute
* * the minimal set of vertices which represent the given polytope (can be used to remove redundant vertices), and
* * for each hyperplane, the set of (non-redundant) vertices that lie on each hyperplane.
*
* Use the provided getter methods to retrieve the results
*
* @return true iff conversion was successful.
*/
void generateHalfspacesFromPoints(std::vector<EigenVector>& points, bool generateRelevantVerticesAndVertexSets);
EigenMatrix& getResultMatrix();
EigenVector& getResultVector();
/*!
* Returns the set of vertices which are not redundant
* @note the returned vector is empty if the corresponding flag was false
*/
std::vector <EigenVector>& getRelevantVertices();
/*!
* Returns for each hyperplane the set of vertices that lie on that hyperplane.
* A vertex is given as an index in the relevantVertices vector.
* @note the returned vector is empty if the corresponding flag was false
*/
std::vector<std::vector<std::uint_fast64_t>>& getVertexSets();
private:
struct Facet {
EigenVector normal;
ValueType offset;
std::vector<uint_fast64_t> points;
std::vector<uint_fast64_t> neighbors;
// maxOutsidePointIndex and outsideSet will be set in Quickhull algorithm
std::vector<uint_fast64_t> outsideSet;
uint_fast64_t maxOutsidePointIndex;
};
/*
* Properly handles the 1D case
*
*/
void handle1DPoints(std::vector<EigenVector>& points, bool generateRelevantVerticesAndVertexSets);
/*
* Returns true if the vertices with index of subset and item are affine independent
* Note that this filter also works for dimension()+1 many vertices
*/
static bool affineFilter(std::vector<uint_fast64_t> const& subset, uint_fast64_t const& item, std::vector<EigenVector> const& vertices);
/*
* handles degenerated polytopes
*
*/
void handleAffineDependentPoints(std::vector<EigenVector>& points, bool generateRelevantVerticesAndVertexSets);
/*!
* finds a set of vertices that correspond to a (hopefully) large V polytope.
*
* @param points The set of points from which vertices are picked. Note that the order of the points might be changed when calling this!!
* @param verticesOfInitialPolytope Will be set to the resulting vertices (represented by indices w.r.t. the given points)
* @param minMaxVertices after calling this, the first 'minMaxVertices' points will have an extreme value in at least one dimension
* @return true if the method was successful. False if the given points are affine dependend, i.e. the polytope is degenerated.
*/
bool findInitialVertices(std::vector<EigenVector>& points, std::vector<uint_fast64_t>& verticesOfInitialPolytope, uint_fast64_t& minMaxVertices) const;
/*!
* Computes the initial facets out of the given dimension+1 initial vertices
*/
std::vector<Facet> computeInitialFacets(std::vector<EigenVector> const& points, std::vector<uint_fast64_t> const& verticesOfInitialPolytope, EigenVector const& insidePoint) const;
// Computes the normal vector and the offset of the given facet from the (dimension many) points specified in the facet.
// The insidePoint specifies the orientation of the facet.
void computeNormalAndOffsetOfFacet(std::vector<EigenVector> const& points, EigenVector const& insidePoint, Facet& facet) const;
/*
* Extends the given mesh using the QuickHull-algorithm
* For optimization reasons a point thats inside of the initial polytope but on none of the facets has to be provided.
* @return true iff all consideredPoints are now contained by the mesh
*/
void extendMesh(std::vector<EigenVector>& points,
storm::storage::BitVector& consideredPoints,
std::vector<Facet>& facets,
storm::storage::BitVector& currentFacets,
EigenVector& insidePoint) const;
/*!
* Uses the provided mesh to generate a HPolytope (in form of a matrix and a vector)
* If the given flag is true, this method will also compute
* * the minimal set of vertices which represent the given vPoly (can be used to remove redundant vertices), and
* * for each hyperplane, the set of (non-redundant) vertices that lie on each hyperplane.
*
*/
void getPolytopeFromMesh(std::vector<EigenVector> const& points, std::vector<Facet> const& facets, storm::storage::BitVector const& currentFacets, bool generateRelevantVerticesAndVertexSets);
/*
* Returns the set of facets visible from point starting with the facet with index startIndex and recursively testing all neighbors
*/
std::set<uint_fast64_t> getVisibleSet(std::vector<Facet> const& facets, uint_fast64_t const& startIndex, EigenVector const& point) const;
/*
* Sets neighborhood for all facets with index >= firstNewFacet in facets
*/
void setNeighborhoodOfNewFacets(std::vector<Facet>& facets, uint_fast64_t firstNewFacet, uint_fast64_t dimension) const;
/*
* replaces oldFacet by newFacet in the neighborhood of neighbor
*/
void replaceFacetNeighbor(std::vector<Facet>& facets, uint_fast64_t oldFacetIndex, uint_fast64_t newFacetIndex, uint_fast64_t neighborIndex) const;
/*
* computes the outside set of the given facet
*/
void computeOutsideSetOfFacet(Facet& facet, storm::storage::BitVector& currentOutsidePoints, std::vector<EigenVector> const& points) const;
/*
* returns common points of lhs and rhs
*/
std::vector<uint_fast64_t> getCommonPoints(Facet const& lhs, Facet const& rhs) const;
/*
* computes all neighbors that are not in the visibleSet
*/
std::set<uint_fast64_t> getInvisibleNeighbors( std::vector<Facet>& facets, std::set<uint_fast64_t> const& visibleSet) const;
EigenMatrix resultMatrix;
EigenVector resultVector;
std::vector <EigenVector> relevantVertices;
std::vector <std::vector<uint_fast64_t>> vertexSets;
/*
* Returns true if the vertices with index of subset and item are affine independent
* Note that this filter also works for dimension()+1 many vertices
*/
static bool affineFilter(std::vector<uint_fast64_t> const& subset, uint_fast64_t const& item, std::vector<EigenVector> const& vertices);
/*!
* finds a set of vertices that correspond to a (hopefully) large V polytope.
*
* @param points The set of points from which vertices are picked. Note that the order of the points might be changed when calling this!!
* @param verticesOfInitialPolytope Will be set to the resulting vertices (represented by indices w.r.t. the given points)
* @param minMaxVertices after calling this, the first 'minMaxVertices' points will have an extreme value in at least one dimension
* @return true if the method was successful. False if the given points are affine dependend, i.e. the polytope is degenerated.
*/
bool findInitialVertices(std::vector<EigenVector>& points, std::vector<uint_fast64_t>& verticesOfInitialPolytope, uint_fast64_t& minMaxVertices) const;
/*!
* Computes the initial facets out of the given dimension+1 initial vertices
*/
std::vector<Facet> computeInitialFacets(std::vector<EigenVector> const& points, std::vector<uint_fast64_t> const& verticesOfInitialPolytope, EigenVector const& insidePoint) const;
// Computes the normal vector and the offset of the given facet from the (dimension many) points specified in the facet.
// The insidePoint specifies the orientation of the facet.
void computeNormalAndOffsetOfFacet(std::vector<EigenVector> const& points, EigenVector const& insidePoint, Facet& facet) const;
/*
* Extends the given mesh using the QuickHull-algorithm
* For optimization reasons a point thats inside of the initial polytope but on none of the facets has to be provided.
* @return true iff all consideredPoints are now contained by the mesh
*/
void extendMesh(std::vector<EigenVector>& points,
storm::storage::BitVector& consideredPoints,
std::vector<Facet<Number>>& facets,
storm::storage::BitVector& currentFacets,
vector_t<Number>& insidePoint,
uint_fast64_t& currentNumOfVertices) const;
/*!
* Uses the provided mesh to generate a HPolytope (in form of a matrix and a vector)
* If the given flag is true, this method will also compute
* * the minimal set of vertices which represent the given vPoly (can be used to remove redundant vertices), and
* * for each hyperplane, the set of (non-redundant) vertices that lie on each hyperplane.
*
*/
void getPolytopeFromMesh(std::vector<EigenVector> const& points, std::vector<Facet<Number>> const& facets, storm::storage::BitVector const& currentFacets, bool generateRelevantVerticesAndVertexSets);
/*
* Returns the set of facets visible from point starting with the facet with index startIndex and recursively testing all neighbors
*/
std::set<uint_fast64_t> getVisibleSet(std::vector<Facet<Number>> const& facets, uint_fast64_t const& startIndex, EigenVector const& point) const;
/*
* Sets neighborhood for all facets with index >= firstNewFacet in facets
*/
void setNeighborhoodOfNewFacets(std::vector<Facet<Number>>& facets, uint_fast64_t firstNewFacet, uint_fast64_t dimension) const;
/*
* replaces oldFacet by newFacet in the neighborhood of neighbor
*/
void replaceFacetNeighbor(std::vector<Facet<Number>>& facets, uint_fast64_t oldFacetIndex, uint_fast64_t newFacetIndex, uint_fast64_t neighborIndex) const;
/*
* computes the outside set of the given facet
*/
void computeOutsideSetOfFacet(Facet<Number>& facet, storm::storage::BitVector& currentOutsidePoints, std::vector<EigenVector> const& points) const;
/*
* returns common points of lhs and rhs
*/
std::vector<uint_fast64_t> getCommonPoints(Facet<Number> const& lhs, Facet<Number> const& rhs) const;
/*
* computes all neighbors that are not in the visibleSet
*/
std::set<uint_fast64_t> getInvisibleNeighbors( std::vector<Facet<Number>>& facets, std::set<uint_fast64_t> const& visibleSet) const;
bool facetHasNeighborWithIndex(Facet<Number> const& facet, uint_fast64_t searchIndex) const;
/*
* Enlarges the result such that all points that are still outside will be contained in the resulting polytope.
*/
void enlargeIncompleteResult(std::vector<Facet<Number>> const& facets, storm::storage::BitVector const& currentFacets, std::vector<EigenVector> const& points, bool generateRelevantVerticesAndVertexSets);
hypro::matrix_t<Number> resultMatrix;
hypro::vector_t<Number> resultVector;
std::vector <EigenVector> relevantVertices;
std::vector <std::vector<uint_fast64_t>> vertexSets;
};
};
}
}
}
#endif STORM_STORAGE_GEOMETRY_NATIVEPOLYTOPECONVERSION_QUICKHULL_H_
#endif /* STORM_STORAGE_GEOMETRY_NATIVEPOLYTOPECONVERSION_QUICKHULL_H_ */

5
src/storm/storage/geometry/nativepolytopeconversion/SubsetEnumerator.cpp

@ -1,6 +1,7 @@
#include "SubsetEnumerator.h"
#include "storm/adapters/CarlAdapter.h"
#include "storm/utility/eigen.h"
namespace storm {
@ -87,7 +88,9 @@ namespace storm {
return true;
}
template class SubsetEnumerator<StormEigen::Matrix<ValueType, StormEigen::Dynamic, 1>;
template class SubsetEnumerator<std::nullptr_t>;
template class SubsetEnumerator<std::vector<StormEigen::Matrix<double, StormEigen::Dynamic, 1>>>;
template class SubsetEnumerator<std::vector<StormEigen::Matrix<storm::RationalNumber, StormEigen::Dynamic, 1>>>;
}
}
}

6
src/storm/utility/solver.cpp

@ -10,6 +10,7 @@
#include "storm/solver/TopologicalMinMaxLinearEquationSolver.h"
#include "storm/solver/GurobiLpSolver.h"
#include "storm/solver/Z3LpSolver.h"
#include "storm/solver/GlpkLpSolver.h"
#include "storm/solver/Z3SmtSolver.h"
@ -54,6 +55,7 @@ namespace storm {
switch (t) {
case storm::solver::LpSolverType::Gurobi: return std::unique_ptr<storm::solver::LpSolver>(new storm::solver::GurobiLpSolver(name));
case storm::solver::LpSolverType::Glpk: return std::unique_ptr<storm::solver::LpSolver>(new storm::solver::GlpkLpSolver(name));
case storm::solver::LpSolverType::Z3: return std::unique_ptr<storm::solver::LpSolver>(new storm::solver::Z3LpSolver(name));
}
return nullptr;
}
@ -69,6 +71,10 @@ namespace storm {
std::unique_ptr<storm::solver::LpSolver> GurobiLpSolverFactory::create(std::string const& name) const {
return LpSolverFactory::create(name, storm::solver::LpSolverTypeSelection::Gurobi);
}
std::unique_ptr<storm::solver::LpSolver> Z3LpSolverFactory::create(std::string const& name) const {
return LpSolverFactory::create(name, storm::solver::LpSolverTypeSelection::Z3);
}
std::unique_ptr<storm::solver::LpSolver> getLpSolver(std::string const& name, storm::solver::LpSolverTypeSelection solvType) {
std::unique_ptr<storm::utility::solver::LpSolverFactory> factory(new LpSolverFactory());

5
src/storm/utility/solver.h

@ -105,6 +105,11 @@ namespace storm {
public:
virtual std::unique_ptr<storm::solver::LpSolver> create(std::string const& name) const override;
};
class Z3LpSolverFactory : public LpSolverFactory {
public:
virtual std::unique_ptr<storm::solver::LpSolver> create(std::string const& name) const override;
};
std::unique_ptr<storm::solver::LpSolver> getLpSolver(std::string const& name, storm::solver::LpSolverTypeSelection solvType = storm::solver::LpSolverTypeSelection::FROMSETTINGS) ;

Loading…
Cancel
Save