diff --git a/CHANGELOG.md b/CHANGELOG.md index 43b7c547a..bec6b578c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,12 @@ The releases of major and minor versions contain an overview of changes since th Version 1.6.x ------------- +## Version 1.6.3 (20xx/xx) +- Simulator supports exact arithmetic +- `storm-pomdp`: States can be labelled with values for observable predicates +- `storm-pomdp`: (Only API) Track state estimates +- `storm-pomdp`: (Only API) Reduce computation of state estimates to computation on unrolled MDP + ## Version 1.6.2 (2020/09) - Prism program simplification improved. - Revamped implementation of long-run-average algorithms, including scheduler export for LRA properties on Markov automata. diff --git a/src/storm-pomdp/generator/NondeterministicBeliefTracker.cpp b/src/storm-pomdp/generator/NondeterministicBeliefTracker.cpp index a2d47e721..c02074a6b 100644 --- a/src/storm-pomdp/generator/NondeterministicBeliefTracker.cpp +++ b/src/storm-pomdp/generator/NondeterministicBeliefTracker.cpp @@ -153,48 +153,12 @@ namespace storm { //return std::equal(lhs.belief.begin(), lhs.belief.end(), rhs.belief.begin()); } - template - SparseBeliefState SparseBeliefState::update(uint64_t action, uint32_t observation) const { - std::map newBelief; - ValueType sum = storm::utility::zero(); - for (auto const& beliefentry : belief) { - assert(manager->getPomdp().getNumberOfChoices(beliefentry.first) > action); - auto row = manager->getPomdp().getNondeterministicChoiceIndices()[beliefentry.first] + action; - for (auto const& transition : manager->getPomdp().getTransitionMatrix().getRow(row)) { - if (observation != manager->getPomdp().getObservation(transition.getColumn())) { - continue; - } - - if (newBelief.count(transition.getColumn()) == 0) { - newBelief[transition.getColumn()] = transition.getValue() * beliefentry.second; - } else { - newBelief[transition.getColumn()] += transition.getValue() * beliefentry.second; - } - sum += transition.getValue() * beliefentry.second; - } - } - std::size_t newHash = 0; - ValueType risk = storm::utility::zero(); - for(auto& entry : newBelief) { - assert(!storm::utility::isZero(sum)); - entry.second /= sum; - //boost::hash_combine(newHash, std::hash()(entry.second)); - boost::hash_combine(newHash, entry.first); - risk += entry.second * manager->getRisk(entry.first); - } - return SparseBeliefState(manager, newBelief, newHash, risk, id); - } template void SparseBeliefState::update(uint32_t newObservation, std::unordered_set>& previousBeliefs) const { updateHelper({{}}, {storm::utility::zero()}, belief.begin(), newObservation, previousBeliefs); } - template - Eigen::Matrix SparseBeliefState::toEigenVector(storm::storage::BitVector const& support) const { - assert(false); - } - template uint64_t SparseBeliefState::getSupportSize() const { return manager->getNumberOfStates(); @@ -385,11 +349,6 @@ namespace storm { return risk; } - template - Eigen::Matrix ObservationDenseBeliefState::toEigenVector(storm::storage::BitVector const& support) const { - return storm::adapters::EigenAdapter::toEigenVector(storm::utility::vector::filterVector(belief, support)); - } - template uint64_t ObservationDenseBeliefState::getSupportSize() const { return belief.size(); @@ -398,8 +357,6 @@ namespace storm { template void ObservationDenseBeliefState::setSupport(storm::storage::BitVector& support) const { storm::utility::vector::setNonzeroIndices(belief, support); - std::cout << "Belief is " << storm::utility::vector::toString(belief) << std::endl; - std::cout << "Support is now " << support << std::endl; } @@ -450,7 +407,6 @@ namespace storm { bool NondeterministicBeliefTracker::track(uint64_t newObservation) { STORM_LOG_THROW(!beliefs.empty(), storm::exceptions::InvalidOperationException, "Cannot track without a belief (need to reset)."); std::unordered_set newBeliefs; - //for (uint64_t action = 0; action < manager->getActionsForObservation(lastObservation); ++action) { storm::utility::Stopwatch trackTimer(true); for (auto const& belief : beliefs) { belief.update(newObservation, newBeliefs); @@ -458,7 +414,6 @@ namespace storm { return false; } } - //} beliefs = newBeliefs; lastObservation = newObservation; return !beliefs.empty(); diff --git a/src/storm-pomdp/generator/NondeterministicBeliefTracker.h b/src/storm-pomdp/generator/NondeterministicBeliefTracker.h index 632c8f1aa..ad5b87143 100644 --- a/src/storm-pomdp/generator/NondeterministicBeliefTracker.h +++ b/src/storm-pomdp/generator/NondeterministicBeliefTracker.h @@ -1,9 +1,12 @@ #pragma once #include "storm/models/sparse/Pomdp.h" -#include "storm/adapters/EigenAdapter.h" namespace storm { namespace generator { + /** + * This class keeps track of common information of a set of beliefs. + * It also keeps a reference to the POMDP. The manager is referenced by all beliefs. + */ template class BeliefStateManager { public: @@ -28,22 +31,41 @@ namespace storm { std::vector> statePerObservationAndOffset; }; + template class SparseBeliefState; template bool operator==(SparseBeliefState const& lhs, SparseBeliefState const& rhs); + + /** + * SparseBeliefState stores beliefs in a sparse format. + */ template class SparseBeliefState { public: SparseBeliefState(std::shared_ptr> const& manager, uint64_t state); - SparseBeliefState update(uint64_t action, uint32_t observation) const; + /** + * Update the belief using the new observation + * @param newObservation + * @param previousBeliefs put the new belief in this set + */ void update(uint32_t newObservation, std::unordered_set& previousBeliefs) const; std::size_t hash() const noexcept; + /** + * Get the estimate to be in the given state + * @param state + * @return + */ ValueType get(uint64_t state) const; + /** + * Get the weighted risk + * @return + */ ValueType getRisk() const; + + // Various getters std::string toString() const; bool isValid() const; - Eigen::Matrix toEigenVector(storm::storage::BitVector const& support) const; uint64_t getSupportSize() const; void setSupport(storm::storage::BitVector&) const; std::map const& getBeliefMap() const; @@ -61,7 +83,9 @@ namespace storm { uint64_t prevId; }; - + /** + * ObservationDenseBeliefState stores beliefs in a dense format (per observation). + */ template class ObservationDenseBeliefState; template @@ -76,7 +100,6 @@ namespace storm { ValueType get(uint64_t state) const; ValueType getRisk() const; std::string toString() const; - Eigen::Matrix toEigenVector(storm::storage::BitVector const& support) const; uint64_t getSupportSize() const; void setSupport(storm::storage::BitVector&) const; friend bool operator==<>(ObservationDenseBeliefState const& lhs, ObservationDenseBeliefState const& rhs); @@ -93,10 +116,15 @@ namespace storm { uint64_t prevId; }; - + /** + * This tracker implements state estimation for POMDPs. + * This corresponds to forward filtering in Junges, Torfah, Seshia. + * + * @tparam ValueType How are probabilities stored + * @tparam BeliefState What format to use for beliefs + */ template class NondeterministicBeliefTracker { - public: struct Options { uint64_t trackTimeOut = 0; @@ -104,15 +132,59 @@ namespace storm { ValueType wiggle; // tolerance, anything above 0 means that we are incomplete. }; NondeterministicBeliefTracker(storm::models::sparse::Pomdp const& pomdp, typename NondeterministicBeliefTracker::Options options = Options()); + /** + * Start with a new trace. + * @param observation The initial observation to start with. + * @return + */ bool reset(uint32_t observation); + /** + * Extend the observed trace with the new observation + * @param newObservation + * @return + */ bool track(uint64_t newObservation); + /** + * Provides access to the current beliefs. + * @return + */ std::unordered_set const& getCurrentBeliefs() const; + /** + * What was the last obervation that we made? + * @return + */ uint32_t getCurrentObservation() const; + /** + * How many beliefs are we currently tracking? + * @return + */ uint64_t getNumberOfBeliefs() const; + /** + * What is the (worst-case/best-case) risk over all beliefs + * @param max Should we take the max or the min? + * @return + */ ValueType getCurrentRisk(bool max=true); + /** + * Sets the state-risk to use for all beliefs. + * @param risk + */ void setRisk(std::vector const& risk); + /** + * What is the dimension of the current set of beliefs, i.e., + * what is the number of states we could possibly be in? + * @return + */ uint64_t getCurrentDimension() const; + /** + * Apply reductions to the belief state + * @return + */ uint64_t reduce(); + /** + * Did we time out during the computation? + * @return + */ bool hasTimedOut() const; private: diff --git a/src/storm-pomdp/transformer/ObservationTraceUnfolder.h b/src/storm-pomdp/transformer/ObservationTraceUnfolder.h index 55036739b..68a4f73c5 100644 --- a/src/storm-pomdp/transformer/ObservationTraceUnfolder.h +++ b/src/storm-pomdp/transformer/ObservationTraceUnfolder.h @@ -2,13 +2,38 @@ namespace storm { namespace pomdp { + /** + * Observation-trace unrolling to allow model checking for monitoring. + * This approach is outlined in Junges, Hazem, Seshia -- Runtime Monitoring for Markov Decision Processes + * @tparam ValueType ValueType for probabilities + */ template class ObservationTraceUnfolder { public: + /** + * Initialize + * @param model the MDP with state-based observations + * @param risk the state risk + * @param exprManager an Expression Manager + */ ObservationTraceUnfolder(storm::models::sparse::Pomdp const& model, std::vector const& risk, std::shared_ptr& exprManager); + /** + * Transform in one shot + * @param observations + * @return + */ std::shared_ptr> transform(std::vector const& observations); + /** + * Transform incrementaly + * @param observation + * @return + */ std::shared_ptr> extend(uint32_t observation); + /** + * When using the incremental approach, reset the observations made so far. + * @param observation The new initial observation + */ void reset(uint32_t observation); private: storm::models::sparse::Pomdp const& model;