#include "storm/modelchecker/csl/helper/SparseMarkovAutomatonCslHelper.h" #include "storm/modelchecker/prctl/helper/SparseMdpPrctlHelper.h" #include "storm/models/sparse/StandardRewardModel.h" #include "storm/storage/StronglyConnectedComponentDecomposition.h" #include "storm/storage/MaximalEndComponentDecomposition.h" #include "storm/settings/SettingsManager.h" #include "storm/settings/modules/GeneralSettings.h" #include "storm/settings/modules/MinMaxEquationSolverSettings.h" #include "storm/settings/modules/MarkovAutomatonSettings.h" #include "storm/utility/macros.h" #include "storm/utility/vector.h" #include "storm/utility/graph.h" #include "storm/storage/expressions/Variable.h" #include "storm/storage/expressions/Expression.h" #include "storm/storage/expressions/ExpressionManager.h" #include "storm/utility/numerical.h" #include "storm/solver/MinMaxLinearEquationSolver.h" #include "storm/solver/LpSolver.h" #include "storm/exceptions/InvalidStateException.h" #include "storm/exceptions/InvalidPropertyException.h" #include "storm/exceptions/InvalidOperationException.h" #include "storm/exceptions/UncheckedRequirementException.h" namespace storm { namespace modelchecker { namespace helper { template ::SupportsExponential, int>::type> void SparseMarkovAutomatonCslHelper::computeBoundedReachabilityProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRates, storm::storage::BitVector const& goalStates, storm::storage::BitVector const& markovianNonGoalStates, storm::storage::BitVector const& probabilisticNonGoalStates, std::vector& markovianNonGoalValues, std::vector& probabilisticNonGoalValues, ValueType delta, uint64_t numberOfSteps, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { // Start by computing four sparse matrices: // * a matrix aMarkovian with all (discretized) transitions from Markovian non-goal states to all Markovian non-goal states. // * a matrix aMarkovianToProbabilistic with all (discretized) transitions from Markovian non-goal states to all probabilistic non-goal states. // * a matrix aProbabilistic with all (non-discretized) transitions from probabilistic non-goal states to other probabilistic non-goal states. // * a matrix aProbabilisticToMarkovian with all (non-discretized) transitions from probabilistic non-goal states to all Markovian non-goal states. typename storm::storage::SparseMatrix aMarkovian = transitionMatrix.getSubmatrix(true, markovianNonGoalStates, markovianNonGoalStates, true); typename storm::storage::SparseMatrix aMarkovianToProbabilistic = transitionMatrix.getSubmatrix(true, markovianNonGoalStates, probabilisticNonGoalStates); typename storm::storage::SparseMatrix aProbabilistic = transitionMatrix.getSubmatrix(true, probabilisticNonGoalStates, probabilisticNonGoalStates); typename storm::storage::SparseMatrix aProbabilisticToMarkovian = transitionMatrix.getSubmatrix(true, probabilisticNonGoalStates, markovianNonGoalStates); // The matrices with transitions from Markovian states need to be digitized. // Digitize aMarkovian. Based on whether the transition is a self-loop or not, we apply the two digitization rules. uint64_t rowIndex = 0; for (auto state : markovianNonGoalStates) { for (auto& element : aMarkovian.getRow(rowIndex)) { ValueType eTerm = std::exp(-exitRates[state] * delta); if (element.getColumn() == rowIndex) { element.setValue((storm::utility::one() - eTerm) * element.getValue() + eTerm); } else { element.setValue((storm::utility::one() - eTerm) * element.getValue()); } } ++rowIndex; } // Digitize aMarkovianToProbabilistic. As there are no self-loops in this case, we only need to apply the digitization formula for regular successors. rowIndex = 0; for (auto state : markovianNonGoalStates) { for (auto& element : aMarkovianToProbabilistic.getRow(rowIndex)) { element.setValue((1 - std::exp(-exitRates[state] * delta)) * element.getValue()); } ++rowIndex; } // Initialize the two vectors that hold the variable one-step probabilities to all target states for probabilistic and Markovian (non-goal) states. std::vector bProbabilistic(aProbabilistic.getRowCount()); std::vector bMarkovian(markovianNonGoalStates.getNumberOfSetBits()); // Compute the two fixed right-hand side vectors, one for Markovian states and one for the probabilistic ones. std::vector bProbabilisticFixed = transitionMatrix.getConstrainedRowGroupSumVector(probabilisticNonGoalStates, goalStates); std::vector bMarkovianFixed; bMarkovianFixed.reserve(markovianNonGoalStates.getNumberOfSetBits()); for (auto state : markovianNonGoalStates) { bMarkovianFixed.push_back(storm::utility::zero()); for (auto& element : transitionMatrix.getRowGroup(state)) { if (goalStates.get(element.getColumn())) { bMarkovianFixed.back() += (1 - std::exp(-exitRates[state] * delta)) * element.getValue(); } } } // Check for requirements of the solver. // The solution is unique as we assume non-zeno MAs. storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(true, dir); requirements.clearBounds(); STORM_LOG_THROW(requirements.empty(), storm::exceptions::UncheckedRequirementException, "Cannot establish requirements for solver."); std::unique_ptr> solver = minMaxLinearEquationSolverFactory.create(aProbabilistic); solver->setHasUniqueSolution(); solver->setBounds(storm::utility::zero(), storm::utility::one()); solver->setRequirementsChecked(); solver->setCachingEnabled(true); // Perform the actual value iteration // * loop until the step bound has been reached // * in the loop: // * perform value iteration using A_PSwG, v_PS and the vector b where b = (A * 1_G)|PS + A_PStoMS * v_MS // and 1_G being the characteristic vector for all goal states. // * perform one timed-step using v_MS := A_MSwG * v_MS + A_MStoPS * v_PS + (A * 1_G)|MS std::vector markovianNonGoalValuesSwap(markovianNonGoalValues); for (uint64_t currentStep = 0; currentStep < numberOfSteps; ++currentStep) { // Start by (re-)computing bProbabilistic = bProbabilisticFixed + aProbabilisticToMarkovian * vMarkovian. aProbabilisticToMarkovian.multiplyWithVector(markovianNonGoalValues, bProbabilistic); storm::utility::vector::addVectors(bProbabilistic, bProbabilisticFixed, bProbabilistic); // Now perform the inner value iteration for probabilistic states. solver->solveEquations(dir, probabilisticNonGoalValues, bProbabilistic); // (Re-)compute bMarkovian = bMarkovianFixed + aMarkovianToProbabilistic * vProbabilistic. aMarkovianToProbabilistic.multiplyWithVector(probabilisticNonGoalValues, bMarkovian); storm::utility::vector::addVectors(bMarkovian, bMarkovianFixed, bMarkovian); aMarkovian.multiplyWithVector(markovianNonGoalValues, markovianNonGoalValuesSwap); std::swap(markovianNonGoalValues, markovianNonGoalValuesSwap); storm::utility::vector::addVectors(markovianNonGoalValues, bMarkovian, markovianNonGoalValues); } // After the loop, perform one more step of the value iteration for PS states. aProbabilisticToMarkovian.multiplyWithVector(markovianNonGoalValues, bProbabilistic); storm::utility::vector::addVectors(bProbabilistic, bProbabilisticFixed, bProbabilistic); solver->solveEquations(dir, probabilisticNonGoalValues, bProbabilistic); } template ::SupportsExponential, int>::type> void SparseMarkovAutomatonCslHelper::computeBoundedReachabilityProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRates, storm::storage::BitVector const& goalStates, storm::storage::BitVector const& markovianNonGoalStates, storm::storage::BitVector const& probabilisticNonGoalStates, std::vector& markovianNonGoalValues, std::vector& probabilisticNonGoalValues, ValueType delta, uint64_t numberOfSteps, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { STORM_LOG_THROW(false, storm::exceptions::InvalidOperationException, "Computing bounded reachability probabilities is unsupported for this value type."); } template ::SupportsExponential, int>::type> void SparseMarkovAutomatonCslHelper::printTransitions(std::vector> relReachability, std::vector const& exitRateVector, storm::storage::SparseMatrix const& fullTransitionMatrix, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, storm::storage::BitVector const& cycleStates, storm::storage::BitVector const& cycleGoalStates, std::vector>& vd, std::vector>& vu, std::vector>& wu){ std::ofstream logfile("U+logfile.txt", std::ios::app); auto const& rowGroupIndices = fullTransitionMatrix.getRowGroupIndices(); auto numberOfStates = fullTransitionMatrix.getRowGroupCount(); logfile << "number of states = num of row group count " << numberOfStates << "\n"; for (uint_fast64_t i = 0; i < fullTransitionMatrix.getRowGroupCount(); i++) { logfile << " from node " << i << " "; auto from = rowGroupIndices[i]; auto to = rowGroupIndices[i+1]; for (auto j = from ; j < to; j++){ for (auto &v : fullTransitionMatrix.getRow(j)) { if (markovianStates[i]){ logfile << v.getValue() *exitRateVector[i] << " -> "<< v.getColumn() << "\t"; } else { logfile << v.getValue() << " -> "<< v.getColumn() << "\t"; } } logfile << "\n"; } } logfile << "\n"; logfile << "probStates\tmarkovianStates\tgoalStates\tcycleStates\tcycleGoalStates\n"; for (int i =0 ; i< markovianStates.size() ; i++){ logfile << (~markovianStates)[i] << "\t\t" << markovianStates[i] << "\t\t" << psiStates[i] << "\t\t" << cycleStates[i] << "\t\t" << cycleGoalStates[i] << "\n"; } for (int i =0; i ValueType SparseMarkovAutomatonCslHelper::poisson(ValueType lambda, uint64_t i) { ValueType res = pow(lambda, i); ValueType fac = 1; for (uint64_t j = i ; j>0 ; j--){ fac = fac *j; } res = res / fac ; res = res * exp(-lambda); return res; } template ::SupportsExponential, int>::type> void SparseMarkovAutomatonCslHelper::calculateVu(std::vector> const& relativeReachability, OptimizationDirection dir, uint64_t k, uint64_t node, ValueType lambda, std::vector>& vu, std::vector>& wu, storm::storage::SparseMatrix const& fullTransitionMatrix, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, std::unique_ptr> const& solver){ if (vu[k][node]!=-1){return;} //dynamic programming. avoiding multiple calculation. uint64_t N = vu.size()-1; auto rowGroupIndices = fullTransitionMatrix.getRowGroupIndices(); ValueType res =0; for (uint64_t i = k ; i < N ; i++ ){ if (wu[N-1-(i-k)][node]==-1){ calculateWu(relativeReachability, dir, (N-1-(i-k)),node,lambda,wu,fullTransitionMatrix,markovianStates,psiStates, solver); } res+=poisson(lambda, i)*wu[N-1-(i-k)][node]; } vu[k][node]=res; } template ::SupportsExponential, int>::type> void SparseMarkovAutomatonCslHelper::calculateWu(std::vector> const& relativeReachability, OptimizationDirection dir, uint64_t k, uint64_t node, ValueType lambda, std::vector>& wu, storm::storage::SparseMatrix const& fullTransitionMatrix, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, std::unique_ptr> const& solver){ if (wu[k][node]!=-1){return;} //dynamic programming. avoiding multiple calculation. std::ofstream logfile("U+logfile.txt", std::ios::app); auto probabilisticStates = ~markovianStates; uint64_t N = wu.size()-1; auto const& rowGroupIndices = fullTransitionMatrix.getRowGroupIndices( ); ValueType res; if (k==N){ wu[k][node]=0; return; } if (psiStates[node]){ wu[k][node]=1; return; } if (markovianStates[node]){ res = 0; auto line = fullTransitionMatrix.getRow(rowGroupIndices[node]); for (auto &element : line){ uint64_t to = element.getColumn(); if (wu[k+1][to]==-1){ calculateWu(relativeReachability, dir, k+1,to,lambda,wu,fullTransitionMatrix,markovianStates,psiStates, solver); } res+=element.getValue()*wu[k+1][to]; } wu[k][node]=res; } else { // applying simpleMDP stuff auto numberOfStates=relativeReachability[0].size(); typename storm::storage::SparseMatrix probMatrix = fullTransitionMatrix.getSubmatrix(true, ~markovianStates , ~markovianStates, true); auto probSize = probMatrix.getRowGroupCount(); std::vector b(probMatrix.getRowCount(), 0), x(probSize,0); //calculate b uint64_t lineCounter=0; for (int i =0; isolveEquations(dir, x, b); for (uint64_t i =0 ; i::SupportsExponential, int>::type> void SparseMarkovAutomatonCslHelper::calculateVd(std::vector> const& relativeReachability, OptimizationDirection dir, uint64_t k, uint64_t node, ValueType lambda, std::vector>& vd, storm::storage::SparseMatrix const& fullTransitionMatrix, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, std::unique_ptr> const& solver){ std::ofstream logfile("U+logfile.txt", std::ios::app); auto probabilisticStates = ~markovianStates; if (vd[k][node]!=-1){return;} //dynamic programming. avoiding multiple calculation. logfile << "calculating vd for k = " << k << " node "<< node << " \t"; uint64_t N = vd.size()-1; auto const& rowGroupIndices = fullTransitionMatrix.getRowGroupIndices(); ValueType res; if (k==N){ logfile << "k == N! res = 0\n"; vd[k][node]=0; return; } //goal state if (psiStates[node]){ res = storm::utility::zero(); for (uint64_t i = k ; i probMatrix = fullTransitionMatrix.getSubmatrix(true, ~markovianStates , ~markovianStates, true); auto probSize = probMatrix.getRowGroupCount(); std::vector b(probMatrix.getRowCount(), 0), x(probSize,0); //calculate b uint64_t lineCounter=0; for (int i =0; isolveEquations(dir, x, b); logfile << "vd goal vector x is \n"; for (int i =0 ; i::SupportsExponential, int>::type=0> uint64_t SparseMarkovAutomatonCslHelper::trajans(storm::storage::SparseMatrix const& transitionMatrix, uint64_t node, std::vector& disc, std::vector& finish, uint64_t* counter) { auto const& rowGroupIndice = transitionMatrix.getRowGroupIndices(); disc[node] = *counter; finish[node] = *counter; (*counter)+=1; auto from = rowGroupIndice[node]; auto to = rowGroupIndice[node+1]; for(uint64_t i =from; i::SupportsExponential, int>::type=0> storm::storage::BitVector SparseMarkovAutomatonCslHelper::identifyProbCyclesGoalStates(storm::storage::SparseMatrix const& transitionMatrix, storm::storage::BitVector const& cycleStates) { storm::storage::BitVector goalStates(cycleStates.size(), false); auto const& rowGroupIndices = transitionMatrix.getRowGroupIndices(); for (uint64_t i = 0 ; i < transitionMatrix.getRowGroupCount() ; i++){ if (!cycleStates[i]){ continue; } auto from = rowGroupIndices[i]; auto to = rowGroupIndices[i+1]; for (auto j = from ; j::SupportsExponential, int>::type=0> storm::storage::BitVector SparseMarkovAutomatonCslHelper::identifyProbCycles(storm::storage::SparseMatrix const& transitionMatrix, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates){ storm::storage::BitVector const& probabilisticStates = ~markovianStates; storm::storage::BitVector const& probabilisticNonGoalStates = ~markovianStates & ~psiStates; storm::storage::SparseMatrix const& probMatrix = transitionMatrix.getSubmatrix(true, probabilisticNonGoalStates, probabilisticNonGoalStates); uint64_t probSize = probMatrix.getRowGroupCount(); std::vector disc(probSize, 0), finish(probSize, 0); uint64_t counter =1; for (uint64_t i =0; i::SupportsExponential, int>::type=0> void SparseMarkovAutomatonCslHelper::deleteProbDiagonals(storm::storage::SparseMatrix& transitionMatrix, storm::storage::BitVector const& markovianStates){ auto const& rowGroupIndices = transitionMatrix.getRowGroupIndices(); for (uint64_t i =0; i::SupportsExponential, int>::type> std::vector SparseMarkovAutomatonCslHelper::unifPlus(OptimizationDirection dir, std::pair const& boundsPair, std::vector const& exitRateVector, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory){ STORM_LOG_TRACE("Using UnifPlus to compute bounded until probabilities."); std::ofstream logfile("U+logfile.txt", std::ios::app); ValueType maxNorm = 0; //bitvectors to identify different kind of states storm::storage::BitVector allStates(markovianStates.size(), true); auto probabilisticStates = ~markovianStates; //vectors to save calculation std::vector> vd,vu,wu; //transition matrix with diagonal entries. The values can be changed during uniformisation std::vector exitRate{exitRateVector}; typename storm::storage::SparseMatrix fullTransitionMatrix = transitionMatrix.getSubmatrix(true, allStates , allStates , true); typename storm::storage::SparseMatrix probMatrix = fullTransitionMatrix.getSubmatrix(true, ~markovianStates , ~markovianStates, true); auto rowGroupIndices = fullTransitionMatrix.getRowGroupIndices(); //(1) define horizon, epsilon, kappa , N, lambda, uint64_t numberOfStates = fullTransitionMatrix.getRowGroupCount(); double T = boundsPair.second; ValueType kappa = storm::utility::one() /10; // would be better as option-parameter ValueType epsilon = storm::settings::getModule().getPrecision(); ValueType lambda = exitRateVector[0]; for (ValueType act: exitRateVector) { lambda = std::max(act, lambda); } uint64_t N; //calculate relative ReachabilityVectors std::vector in(numberOfStates, 0); std::vector> relReachability(fullTransitionMatrix.getRowCount(),in); printTransitions(relReachability, exitRateVector, fullTransitionMatrix, markovianStates, psiStates, psiStates, psiStates, vd,vu,wu); // TODO: delete when develepmont is finished //calculate relative reachability for(uint64_t i=0 ; i& act = relReachability[j]; for(auto element: fullTransitionMatrix.getRow(j)){ if (markovianStates[element.getColumn()]){ act[element.getColumn()]=element.getValue(); } } } } //create equitation solver storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(true, dir); requirements.clearBounds(); STORM_LOG_THROW(requirements.empty(), storm::exceptions::UncheckedRequirementException, "Cannot establish requirements for solver."); std::unique_ptr> solver = minMaxLinearEquationSolverFactory.create(probMatrix); solver->setHasUniqueSolution(); solver->setBounds(storm::utility::zero(), storm::utility::one()); solver->setRequirementsChecked(); solver->setCachingEnabled(true); printTransitions(relReachability, exitRateVector, fullTransitionMatrix, markovianStates, psiStates, psiStates, psiStates, vd,vu,wu); // TODO: delete when develepmont is finished // while not close enough to precision: do { // (2) update parameter N = ceil(lambda*T*exp(2)-log(kappa*epsilon)); // (3) uniform - just applied to markovian states for (uint_fast64_t i = 0; i < fullTransitionMatrix.getRowGroupCount(); i++) { if (!markovianStates[i]) { continue; } uint64_t from = rowGroupIndices[i]; //markovian state -> no Nondeterminism -> only one row if (exitRate[i] == lambda) { continue; //already unified } auto line = fullTransitionMatrix.getRow(from); ValueType exitOld = exitRate[i]; ValueType exitNew = lambda; for (auto &v : line) { if (v.getColumn() == i) { //diagonal element ValueType newSelfLoop = exitNew - exitOld + v.getValue(); ValueType newRate = newSelfLoop / exitNew; v.setValue(newRate); } else { //modify probability ValueType propOld = v.getValue(); ValueType propNew = propOld * exitOld / exitNew; v.setValue(propNew); } } exitRate[i] = exitNew; } // (4) define vectors/matrices std::vector init(numberOfStates, -1); vd = std::vector> (N + 1, init); vu = std::vector> (N + 1, init); wu = std::vector> (N + 1, init); // (5) calculate vectors and maxNorm for (uint64_t i = 0; i < numberOfStates; i++) { for (uint64_t k = N; k <= N; k--) { calculateVd(relReachability, dir, k, i, T*lambda, vd, fullTransitionMatrix, markovianStates, psiStates, solver); calculateWu(relReachability, dir, k, i, T*lambda, wu, fullTransitionMatrix, markovianStates, psiStates, solver); calculateVu(relReachability, dir, k, i, T*lambda, vu, wu, fullTransitionMatrix, markovianStates, psiStates, solver); //also use iteration to keep maxNorm of vd and vu up to date, so the loop-condition is easy to prove ValueType diff = std::abs(vd[k][i]-vu[k][i]); maxNorm = std::max(maxNorm, diff); } } printTransitions(relReachability, exitRateVector, fullTransitionMatrix, markovianStates, psiStates, psiStates, psiStates, vd,vu,wu); // TODO: delete when develepmont is finished // (6) double lambda lambda=2*lambda; } while (maxNorm>epsilon*(1-kappa)); return vd[0]; } template ::SupportsExponential, int>::type> std::vector SparseMarkovAutomatonCslHelper::computeBoundedUntilProbabilitiesImca(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, std::pair const& boundsPair, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { STORM_LOG_TRACE("Using IMCA's technique to compute bounded until probabilities."); uint64_t numberOfStates = transitionMatrix.getRowGroupCount(); // 'Unpack' the bounds to make them more easily accessible. double lowerBound = boundsPair.first; double upperBound = boundsPair.second; // (1) Compute the accuracy we need to achieve the required error bound. ValueType maxExitRate = 0; for (auto value : exitRateVector) { maxExitRate = std::max(maxExitRate, value); } ValueType delta = (2 * storm::settings::getModule().getPrecision()) / (upperBound * maxExitRate * maxExitRate); // (2) Compute the number of steps we need to make for the interval. uint64_t numberOfSteps = static_cast(std::ceil((upperBound - lowerBound) / delta)); STORM_LOG_INFO("Performing " << numberOfSteps << " iterations (delta=" << delta << ") for interval [" << lowerBound << ", " << upperBound << "]." << std::endl); // (3) Compute the non-goal states and initialize two vectors // * vProbabilistic holds the probability values of probabilistic non-goal states. // * vMarkovian holds the probability values of Markovian non-goal states. storm::storage::BitVector const& markovianNonGoalStates = markovianStates & ~psiStates; storm::storage::BitVector const& probabilisticNonGoalStates = ~markovianStates & ~psiStates; std::vector vProbabilistic(probabilisticNonGoalStates.getNumberOfSetBits()); std::vector vMarkovian(markovianNonGoalStates.getNumberOfSetBits()); computeBoundedReachabilityProbabilities(dir, transitionMatrix, exitRateVector, psiStates, markovianNonGoalStates, probabilisticNonGoalStates, vMarkovian, vProbabilistic, delta, numberOfSteps, minMaxLinearEquationSolverFactory); // (4) If the lower bound of interval was non-zero, we need to take the current values as the starting values for a subsequent value iteration. if (lowerBound != storm::utility::zero()) { std::vector vAllProbabilistic((~markovianStates).getNumberOfSetBits()); std::vector vAllMarkovian(markovianStates.getNumberOfSetBits()); // Create the starting value vectors for the next value iteration based on the results of the previous one. storm::utility::vector::setVectorValues(vAllProbabilistic, psiStates % ~markovianStates, storm::utility::one()); storm::utility::vector::setVectorValues(vAllProbabilistic, ~psiStates % ~markovianStates, vProbabilistic); storm::utility::vector::setVectorValues(vAllMarkovian, psiStates % markovianStates, storm::utility::one()); storm::utility::vector::setVectorValues(vAllMarkovian, ~psiStates % markovianStates, vMarkovian); // Compute the number of steps to reach the target interval. numberOfSteps = static_cast(std::ceil(lowerBound / delta)); STORM_LOG_INFO("Performing " << numberOfSteps << " iterations (delta=" << delta << ") for interval [0, " << lowerBound << "]." << std::endl); // Compute the bounded reachability for interval [0, b-a]. computeBoundedReachabilityProbabilities(dir, transitionMatrix, exitRateVector, storm::storage::BitVector(numberOfStates), markovianStates, ~markovianStates, vAllMarkovian, vAllProbabilistic, delta, numberOfSteps, minMaxLinearEquationSolverFactory); // Create the result vector out of vAllProbabilistic and vAllMarkovian and return it. std::vector result(numberOfStates, storm::utility::zero()); storm::utility::vector::setVectorValues(result, ~markovianStates, vAllProbabilistic); storm::utility::vector::setVectorValues(result, markovianStates, vAllMarkovian); return result; } else { // Create the result vector out of 1_G, vProbabilistic and vMarkovian and return it. std::vector result(numberOfStates); storm::utility::vector::setVectorValues(result, psiStates, storm::utility::one()); storm::utility::vector::setVectorValues(result, probabilisticNonGoalStates, vProbabilistic); storm::utility::vector::setVectorValues(result, markovianNonGoalStates, vMarkovian); return result; } } template ::SupportsExponential, int>::type> std::vector SparseMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, std::pair const& boundsPair, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { auto const& markovAutomatonSettings = storm::settings::getModule(); if (markovAutomatonSettings.getTechnique() == storm::settings::modules::MarkovAutomatonSettings::BoundedReachabilityTechnique::Imca) { return computeBoundedUntilProbabilitiesImca(dir, transitionMatrix, exitRateVector, markovianStates, psiStates, boundsPair, minMaxLinearEquationSolverFactory); } else { STORM_LOG_ASSERT(markovAutomatonSettings.getTechnique() == storm::settings::modules::MarkovAutomatonSettings::BoundedReachabilityTechnique::UnifPlus, "Unknown solution technique."); return unifPlus(dir, boundsPair, exitRateVector, transitionMatrix, markovianStates, psiStates, minMaxLinearEquationSolverFactory); } } template ::SupportsExponential, int>::type> std::vector SparseMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, std::pair const& boundsPair, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { STORM_LOG_THROW(false, storm::exceptions::InvalidOperationException, "Computing bounded until probabilities is unsupported for this value type."); } template std::vector SparseMarkovAutomatonCslHelper::computeUntilProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, bool qualitative, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { return std::move(storm::modelchecker::helper::SparseMdpPrctlHelper::computeUntilProbabilities(dir, transitionMatrix, backwardTransitions, phiStates, psiStates, qualitative, false, minMaxLinearEquationSolverFactory).values); } template std::vector SparseMarkovAutomatonCslHelper::computeReachabilityRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::BitVector const& psiStates, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { // Get a reward model where the state rewards are scaled accordingly std::vector stateRewardWeights(transitionMatrix.getRowGroupCount(), storm::utility::zero()); for (auto const markovianState : markovianStates) { stateRewardWeights[markovianState] = storm::utility::one() / exitRateVector[markovianState]; } std::vector totalRewardVector = rewardModel.getTotalActionRewardVector(transitionMatrix, stateRewardWeights); RewardModelType scaledRewardModel(boost::none, std::move(totalRewardVector)); return SparseMdpPrctlHelper::computeReachabilityRewards(dir, transitionMatrix, backwardTransitions, scaledRewardModel, psiStates, false, false, minMaxLinearEquationSolverFactory).values; } template std::vector SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { uint64_t numberOfStates = transitionMatrix.getRowGroupCount(); // If there are no goal states, we avoid the computation and directly return zero. if (psiStates.empty()) { return std::vector(numberOfStates, storm::utility::zero()); } // Likewise, if all bits are set, we can avoid the computation and set. if (psiStates.full()) { return std::vector(numberOfStates, storm::utility::one()); } // Otherwise, reduce the long run average probabilities to long run average rewards. // Every Markovian goal state gets reward one. std::vector stateRewards(transitionMatrix.getRowGroupCount(), storm::utility::zero()); storm::utility::vector::setVectorValues(stateRewards, markovianStates & psiStates, storm::utility::one()); storm::models::sparse::StandardRewardModel rewardModel(std::move(stateRewards)); return computeLongRunAverageRewards(dir, transitionMatrix, backwardTransitions, exitRateVector, markovianStates, rewardModel, minMaxLinearEquationSolverFactory); } template std::vector SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { uint64_t numberOfStates = transitionMatrix.getRowGroupCount(); // Start by decomposing the Markov automaton into its MECs. storm::storage::MaximalEndComponentDecomposition mecDecomposition(transitionMatrix, backwardTransitions); // Get some data members for convenience. std::vector const& nondeterministicChoiceIndices = transitionMatrix.getRowGroupIndices(); // Now start with compute the long-run average for all end components in isolation. std::vector lraValuesForEndComponents; // While doing so, we already gather some information for the following steps. std::vector stateToMecIndexMap(numberOfStates); storm::storage::BitVector statesInMecs(numberOfStates); for (uint64_t currentMecIndex = 0; currentMecIndex < mecDecomposition.size(); ++currentMecIndex) { storm::storage::MaximalEndComponent const& mec = mecDecomposition[currentMecIndex]; // Gather information for later use. for (auto const& stateChoicesPair : mec) { uint64_t state = stateChoicesPair.first; statesInMecs.set(state); stateToMecIndexMap[state] = currentMecIndex; } // Compute the LRA value for the current MEC. lraValuesForEndComponents.push_back(computeLraForMaximalEndComponent(dir, transitionMatrix, exitRateVector, markovianStates, rewardModel, mec, minMaxLinearEquationSolverFactory)); } // For fast transition rewriting, we build some auxiliary data structures. storm::storage::BitVector statesNotContainedInAnyMec = ~statesInMecs; uint64_t firstAuxiliaryStateIndex = statesNotContainedInAnyMec.getNumberOfSetBits(); uint64_t lastStateNotInMecs = 0; uint64_t numberOfStatesNotInMecs = 0; std::vector statesNotInMecsBeforeIndex; statesNotInMecsBeforeIndex.reserve(numberOfStates); for (auto state : statesNotContainedInAnyMec) { while (lastStateNotInMecs <= state) { statesNotInMecsBeforeIndex.push_back(numberOfStatesNotInMecs); ++lastStateNotInMecs; } ++numberOfStatesNotInMecs; } uint64_t numberOfSspStates = numberOfStatesNotInMecs + mecDecomposition.size(); // Finally, we are ready to create the SSP matrix and right-hand side of the SSP. std::vector b; typename storm::storage::SparseMatrixBuilder sspMatrixBuilder(0, numberOfSspStates , 0, false, true, numberOfSspStates); // If the source state is not contained in any MEC, we copy its choices (and perform the necessary modifications). uint64_t currentChoice = 0; for (auto state : statesNotContainedInAnyMec) { sspMatrixBuilder.newRowGroup(currentChoice); for (uint64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice, ++currentChoice) { std::vector auxiliaryStateToProbabilityMap(mecDecomposition.size()); b.push_back(storm::utility::zero()); for (auto element : transitionMatrix.getRow(choice)) { if (statesNotContainedInAnyMec.get(element.getColumn())) { // If the target state is not contained in an MEC, we can copy over the entry. sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue()); } else { // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector // so that we are able to write the cumulative probability to the MEC into the matrix. auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue(); } } // Now insert all (cumulative) probability values that target an MEC. for (uint64_t mecIndex = 0; mecIndex < auxiliaryStateToProbabilityMap.size(); ++mecIndex) { if (auxiliaryStateToProbabilityMap[mecIndex] != 0) { sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + mecIndex, auxiliaryStateToProbabilityMap[mecIndex]); } } } } // Now we are ready to construct the choices for the auxiliary states. for (uint64_t mecIndex = 0; mecIndex < mecDecomposition.size(); ++mecIndex) { storm::storage::MaximalEndComponent const& mec = mecDecomposition[mecIndex]; sspMatrixBuilder.newRowGroup(currentChoice); for (auto const& stateChoicesPair : mec) { uint64_t state = stateChoicesPair.first; boost::container::flat_set const& choicesInMec = stateChoicesPair.second; for (uint64_t choice = nondeterministicChoiceIndices[state]; choice < nondeterministicChoiceIndices[state + 1]; ++choice) { // If the choice is not contained in the MEC itself, we have to add a similar distribution to the auxiliary state. if (choicesInMec.find(choice) == choicesInMec.end()) { std::vector auxiliaryStateToProbabilityMap(mecDecomposition.size()); b.push_back(storm::utility::zero()); for (auto element : transitionMatrix.getRow(choice)) { if (statesNotContainedInAnyMec.get(element.getColumn())) { // If the target state is not contained in an MEC, we can copy over the entry. sspMatrixBuilder.addNextValue(currentChoice, statesNotInMecsBeforeIndex[element.getColumn()], element.getValue()); } else { // If the target state is contained in MEC i, we need to add the probability to the corresponding field in the vector // so that we are able to write the cumulative probability to the MEC into the matrix. auxiliaryStateToProbabilityMap[stateToMecIndexMap[element.getColumn()]] += element.getValue(); } } // Now insert all (cumulative) probability values that target an MEC. for (uint64_t targetMecIndex = 0; targetMecIndex < auxiliaryStateToProbabilityMap.size(); ++targetMecIndex) { if (auxiliaryStateToProbabilityMap[targetMecIndex] != 0) { sspMatrixBuilder.addNextValue(currentChoice, firstAuxiliaryStateIndex + targetMecIndex, auxiliaryStateToProbabilityMap[targetMecIndex]); } } ++currentChoice; } } } // For each auxiliary state, there is the option to achieve the reward value of the LRA associated with the MEC. ++currentChoice; b.push_back(lraValuesForEndComponents[mecIndex]); } // Finalize the matrix and solve the corresponding system of equations. storm::storage::SparseMatrix sspMatrix = sspMatrixBuilder.build(currentChoice, numberOfSspStates, numberOfSspStates); std::vector x(numberOfSspStates); // Check for requirements of the solver. storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(true, dir); requirements.clearBounds(); STORM_LOG_THROW(requirements.empty(), storm::exceptions::UncheckedRequirementException, "Cannot establish requirements for solver."); std::unique_ptr> solver = minMaxLinearEquationSolverFactory.create(sspMatrix); solver->setHasUniqueSolution(); solver->setLowerBound(storm::utility::zero()); solver->setUpperBound(*std::max_element(lraValuesForEndComponents.begin(), lraValuesForEndComponents.end())); solver->setRequirementsChecked(); solver->solveEquations(dir, x, b); // Prepare result vector. std::vector result(numberOfStates); // Set the values for states not contained in MECs. storm::utility::vector::setVectorValues(result, statesNotContainedInAnyMec, x); // Set the values for all states in MECs. for (auto state : statesInMecs) { result[state] = x[firstAuxiliaryStateIndex + stateToMecIndexMap[state]]; } return result; } template std::vector SparseMarkovAutomatonCslHelper::computeReachabilityTimes(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { // Get a reward model representing expected sojourn times std::vector rewardValues(transitionMatrix.getRowCount(), storm::utility::zero()); for (auto const markovianState : markovianStates) { rewardValues[transitionMatrix.getRowGroupIndices()[markovianState]] = storm::utility::one() / exitRateVector[markovianState]; } storm::models::sparse::StandardRewardModel rewardModel(boost::none, std::move(rewardValues)); return SparseMdpPrctlHelper::computeReachabilityRewards(dir, transitionMatrix, backwardTransitions, rewardModel, psiStates, false, false, minMaxLinearEquationSolverFactory).values; } template ValueType SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponent(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { // If the mec only consists of a single state, we compute the LRA value directly if (++mec.begin() == mec.end()) { uint64_t state = mec.begin()->first; STORM_LOG_THROW(markovianStates.get(state), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported."); ValueType result = rewardModel.hasStateRewards() ? rewardModel.getStateReward(state) : storm::utility::zero(); if (rewardModel.hasStateActionRewards() || rewardModel.hasTransitionRewards()) { STORM_LOG_ASSERT(mec.begin()->second.size() == 1, "Markovian state has nondeterministic behavior."); uint64_t choice = *mec.begin()->second.begin(); result += exitRateVector[state] * rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, storm::utility::zero()); } return result; } // Solve MEC with the method specified in the settings storm::solver::LraMethod method = storm::settings::getModule().getLraMethod(); if (method == storm::solver::LraMethod::LinearProgramming) { return computeLraForMaximalEndComponentLP(dir, transitionMatrix, exitRateVector, markovianStates, rewardModel, mec); } else if (method == storm::solver::LraMethod::ValueIteration) { return computeLraForMaximalEndComponentVI(dir, transitionMatrix, exitRateVector, markovianStates, rewardModel, mec, minMaxLinearEquationSolverFactory); } else { STORM_LOG_THROW(false, storm::exceptions::InvalidSettingsException, "Unsupported technique."); } } template ValueType SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentLP(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec) { std::unique_ptr> lpSolverFactory(new storm::utility::solver::LpSolverFactory()); std::unique_ptr> solver = lpSolverFactory->create("LRA for MEC"); solver->setOptimizationDirection(invert(dir)); // First, we need to create the variables for the problem. std::map stateToVariableMap; for (auto const& stateChoicesPair : mec) { std::string variableName = "x" + std::to_string(stateChoicesPair.first); stateToVariableMap[stateChoicesPair.first] = solver->addUnboundedContinuousVariable(variableName); } storm::expressions::Variable k = solver->addUnboundedContinuousVariable("k", storm::utility::one()); solver->update(); // Now we encode the problem as constraints. std::vector const& nondeterministicChoiceIndices = transitionMatrix.getRowGroupIndices(); for (auto const& stateChoicesPair : mec) { uint64_t state = stateChoicesPair.first; // Now, based on the type of the state, create a suitable constraint. if (markovianStates.get(state)) { STORM_LOG_ASSERT(stateChoicesPair.second.size() == 1, "Markovian state " << state << " is not deterministic: It has " << stateChoicesPair.second.size() << " choices."); uint64_t choice = *stateChoicesPair.second.begin(); storm::expressions::Expression constraint = stateToVariableMap.at(state); for (auto element : transitionMatrix.getRow(nondeterministicChoiceIndices[state])) { constraint = constraint - stateToVariableMap.at(element.getColumn()) * solver->getManager().rational((element.getValue())); } constraint = constraint + solver->getManager().rational(storm::utility::one() / exitRateVector[state]) * k; storm::expressions::Expression rightHandSide = solver->getManager().rational(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, (ValueType) (storm::utility::one() / exitRateVector[state]))); if (dir == OptimizationDirection::Minimize) { constraint = constraint <= rightHandSide; } else { constraint = constraint >= rightHandSide; } solver->addConstraint("state" + std::to_string(state), constraint); } else { // For probabilistic states, we want to add the constraint x_s <= sum P(s, a, s') * x_s' where a is the current action // and the sum ranges over all states s'. for (auto choice : stateChoicesPair.second) { storm::expressions::Expression constraint = stateToVariableMap.at(state); for (auto element : transitionMatrix.getRow(choice)) { constraint = constraint - stateToVariableMap.at(element.getColumn()) * solver->getManager().rational(element.getValue()); } storm::expressions::Expression rightHandSide = solver->getManager().rational(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, storm::utility::zero())); if (dir == OptimizationDirection::Minimize) { constraint = constraint <= rightHandSide; } else { constraint = constraint >= rightHandSide; } solver->addConstraint("state" + std::to_string(state), constraint); } } } solver->optimize(); return solver->getContinuousValue(k); } template ValueType SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentVI(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, RewardModelType const& rewardModel, storm::storage::MaximalEndComponent const& mec, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory) { // Initialize data about the mec storm::storage::BitVector mecStates(transitionMatrix.getRowGroupCount(), false); storm::storage::BitVector mecChoices(transitionMatrix.getRowCount(), false); for (auto const& stateChoicesPair : mec) { mecStates.set(stateChoicesPair.first); for (auto const& choice : stateChoicesPair.second) { mecChoices.set(choice); } } storm::storage::BitVector markovianMecStates = mecStates & markovianStates; storm::storage::BitVector probabilisticMecStates = mecStates & ~markovianStates; storm::storage::BitVector probabilisticMecChoices = transitionMatrix.getRowFilter(probabilisticMecStates) & mecChoices; STORM_LOG_THROW(!markovianMecStates.empty(), storm::exceptions::InvalidOperationException, "Markov Automaton has Zeno behavior. Computation of Long Run Average values not supported."); // Get the uniformization rate ValueType uniformizationRate = storm::utility::vector::max_if(exitRateVector, markovianMecStates); // To ensure that the model is aperiodic, we need to make sure that every Markovian state gets a self loop. // Hence, we increase the uniformization rate a little. uniformizationRate += storm::utility::one(); // Todo: try other values such as *=1.01 // Get the transitions of the submodel, that is // * a matrix aMarkovian with all (uniformized) transitions from Markovian mec states to all Markovian mec states. // * a matrix aMarkovianToProbabilistic with all (uniformized) transitions from Markovian mec states to all probabilistic mec states. // * a matrix aProbabilistic with all transitions from probabilistic mec states to other probabilistic mec states. // * a matrix aProbabilisticToMarkovian with all transitions from probabilistic mec states to all Markovian mec states. typename storm::storage::SparseMatrix aMarkovian = transitionMatrix.getSubmatrix(true, markovianMecStates, markovianMecStates, true); typename storm::storage::SparseMatrix aMarkovianToProbabilistic = transitionMatrix.getSubmatrix(true, markovianMecStates, probabilisticMecStates); typename storm::storage::SparseMatrix aProbabilistic = transitionMatrix.getSubmatrix(false, probabilisticMecChoices, probabilisticMecStates); typename storm::storage::SparseMatrix aProbabilisticToMarkovian = transitionMatrix.getSubmatrix(false, probabilisticMecChoices, markovianMecStates); // The matrices with transitions from Markovian states need to be uniformized. uint64_t subState = 0; for (auto state : markovianMecStates) { ValueType uniformizationFactor = exitRateVector[state] / uniformizationRate; for (auto& entry : aMarkovianToProbabilistic.getRow(subState)) { entry.setValue(entry.getValue() * uniformizationFactor); } for (auto& entry : aMarkovian.getRow(subState)) { if (entry.getColumn() == subState) { entry.setValue(storm::utility::one() - uniformizationFactor * (storm::utility::one() - entry.getValue())); } else { entry.setValue(entry.getValue() * uniformizationFactor); } } ++subState; } // Compute the rewards obtained in a single uniformization step std::vector markovianChoiceRewards; markovianChoiceRewards.reserve(aMarkovian.getRowCount()); for (auto const& state : markovianMecStates) { ValueType stateRewardScalingFactor = storm::utility::one() / uniformizationRate; ValueType actionRewardScalingFactor = exitRateVector[state] / uniformizationRate; assert(transitionMatrix.getRowGroupSize(state) == 1); uint64_t choice = transitionMatrix.getRowGroupIndices()[state]; markovianChoiceRewards.push_back(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, stateRewardScalingFactor, actionRewardScalingFactor)); } std::vector probabilisticChoiceRewards; probabilisticChoiceRewards.reserve(aProbabilistic.getRowCount()); for (auto const& state : probabilisticMecStates) { uint64_t groupStart = transitionMatrix.getRowGroupIndices()[state]; uint64_t groupEnd = transitionMatrix.getRowGroupIndices()[state + 1]; for (uint64_t choice = probabilisticMecChoices.getNextSetIndex(groupStart); choice < groupEnd; choice = probabilisticMecChoices.getNextSetIndex(choice + 1)) { probabilisticChoiceRewards.push_back(rewardModel.getTotalStateActionReward(state, choice, transitionMatrix, storm::utility::zero())); } } // start the iterations ValueType precision = storm::utility::convertNumber(storm::settings::getModule().getPrecision()) / uniformizationRate; std::vector v(aMarkovian.getRowCount(), storm::utility::zero()); std::vector w = v; std::vector x(aProbabilistic.getRowGroupCount(), storm::utility::zero()); std::vector b = probabilisticChoiceRewards; // Check for requirements of the solver. // The solution is unique as we assume non-zeno MAs. storm::solver::MinMaxLinearEquationSolverRequirements requirements = minMaxLinearEquationSolverFactory.getRequirements(true, dir); requirements.clearLowerBounds(); STORM_LOG_THROW(requirements.empty(), storm::exceptions::UncheckedRequirementException, "Cannot establish requirements for solver."); auto solver = minMaxLinearEquationSolverFactory.create(std::move(aProbabilistic)); solver->setLowerBound(storm::utility::zero()); solver->setHasUniqueSolution(true); solver->setRequirementsChecked(true); solver->setCachingEnabled(true); while (true) { // Compute the expected total rewards for the probabilistic states solver->solveEquations(dir, x, b); // now compute the values for the markovian states. We also keep track of the maximal and minimal difference between two values (for convergence checking) auto vIt = v.begin(); uint64_t row = 0; ValueType newValue = markovianChoiceRewards[row] + aMarkovianToProbabilistic.multiplyRowWithVector(row, x) + aMarkovian.multiplyRowWithVector(row, w); ValueType maxDiff = newValue - *vIt; ValueType minDiff = maxDiff; *vIt = newValue; for (++vIt, ++row; row < aMarkovian.getRowCount(); ++vIt, ++row) { newValue = markovianChoiceRewards[row] + aMarkovianToProbabilistic.multiplyRowWithVector(row, x) + aMarkovian.multiplyRowWithVector(row, w); ValueType diff = newValue - *vIt; maxDiff = std::max(maxDiff, diff); minDiff = std::min(minDiff, diff); *vIt = newValue; } // Check for convergence if (maxDiff - minDiff < precision) { break; } // update the rhs of the MinMax equation system ValueType referenceValue = v.front(); storm::utility::vector::applyPointwise(v, w, [&referenceValue] (ValueType const& v_i) -> ValueType { return v_i - referenceValue; }); aProbabilisticToMarkovian.multiplyWithVector(w, b); storm::utility::vector::addVectors(b, probabilisticChoiceRewards, b); } return v.front() * uniformizationRate; } template std::vector SparseMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, std::pair const& boundsPair, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template std::vector SparseMarkovAutomatonCslHelper::computeUntilProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, bool qualitative, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template std::vector SparseMarkovAutomatonCslHelper::computeReachabilityRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::BitVector const& psiStates, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template std::vector SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template std::vector SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template std::vector SparseMarkovAutomatonCslHelper::computeReachabilityTimes(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template void SparseMarkovAutomatonCslHelper::computeBoundedReachabilityProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRates, storm::storage::BitVector const& goalStates, storm::storage::BitVector const& markovianNonGoalStates, storm::storage::BitVector const& probabilisticNonGoalStates, std::vector& markovianNonGoalValues, std::vector& probabilisticNonGoalValues, double delta, uint64_t numberOfSteps, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template double SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponent(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template double SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentLP(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); template double SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentVI(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template std::vector SparseMarkovAutomatonCslHelper::computeBoundedUntilProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, std::pair const& boundsPair, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template std::vector SparseMarkovAutomatonCslHelper::computeUntilProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, storm::storage::BitVector const& phiStates, storm::storage::BitVector const& psiStates, bool qualitative, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template std::vector SparseMarkovAutomatonCslHelper::computeReachabilityRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::BitVector const& psiStates, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template std::vector SparseMarkovAutomatonCslHelper::computeLongRunAverageProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template std::vector SparseMarkovAutomatonCslHelper::computeLongRunAverageRewards(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template std::vector SparseMarkovAutomatonCslHelper::computeReachabilityTimes(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, storm::storage::SparseMatrix const& backwardTransitions, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::storage::BitVector const& psiStates, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template void SparseMarkovAutomatonCslHelper::computeBoundedReachabilityProbabilities(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRates, storm::storage::BitVector const& goalStates, storm::storage::BitVector const& markovianNonGoalStates, storm::storage::BitVector const& probabilisticNonGoalStates, std::vector& markovianNonGoalValues, std::vector& probabilisticNonGoalValues, storm::RationalNumber delta, uint64_t numberOfSteps, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template storm::RationalNumber SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponent(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); template storm::RationalNumber SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentLP(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec); template storm::RationalNumber SparseMarkovAutomatonCslHelper::computeLraForMaximalEndComponentVI(OptimizationDirection dir, storm::storage::SparseMatrix const& transitionMatrix, std::vector const& exitRateVector, storm::storage::BitVector const& markovianStates, storm::models::sparse::StandardRewardModel const& rewardModel, storm::storage::MaximalEndComponent const& mec, storm::solver::MinMaxLinearEquationSolverFactory const& minMaxLinearEquationSolverFactory); } } }