From ad73e61f12c190c611dc432f1931193dfd1b5e80 Mon Sep 17 00:00:00 2001
From: TimQu <tim.quatmann@web.de>
Date: Sun, 29 May 2016 16:06:14 +0200
Subject: [PATCH] improvements for preprocessing

Former-commit-id: 8a43c80178bc7ceac75b8d6dfe4bd56929bf9e6e
---
 .../multi-objective/mdp/simple/simple.pctl    |   5 +-
 .../SparseMdpMultiObjectiveModelChecker.cpp   |   7 +-
 ...seMdpMultiObjectivePreprocessingHelper.cpp | 260 +++++++++---------
 ...arseMdpMultiObjectivePreprocessingHelper.h |  45 +--
 ...rseMultiObjectiveModelCheckerInformation.h |  89 +++---
 5 files changed, 206 insertions(+), 200 deletions(-)

diff --git a/examples/multi-objective/mdp/simple/simple.pctl b/examples/multi-objective/mdp/simple/simple.pctl
index 1300542f9..61e4e1a64 100644
--- a/examples/multi-objective/mdp/simple/simple.pctl
+++ b/examples/multi-objective/mdp/simple/simple.pctl
@@ -1,3 +1,2 @@
-Pmax=? [ F "a" ] ;
-Pmax=? [ F "b" ] ;
-multi(P>0.4 [ F "a"], P>0.3 [ F "b"] )
\ No newline at end of file
+multi(P>0.4 [ F "a"], P>0.3 [ F "b"] )
+//multi(Pmin=? [ F<=10 "a"], R<0.3 [ F "b" | "a"] )
\ No newline at end of file
diff --git a/src/modelchecker/multiobjective/SparseMdpMultiObjectiveModelChecker.cpp b/src/modelchecker/multiobjective/SparseMdpMultiObjectiveModelChecker.cpp
index 7d94481f8..5cd49308e 100644
--- a/src/modelchecker/multiobjective/SparseMdpMultiObjectiveModelChecker.cpp
+++ b/src/modelchecker/multiobjective/SparseMdpMultiObjectiveModelChecker.cpp
@@ -35,7 +35,12 @@ namespace storm {
         std::unique_ptr<CheckResult> SparseMdpMultiObjectiveModelChecker<SparseMdpModelType>::checkMultiObjectiveFormula(CheckTask<storm::logic::MultiObjectiveFormula> const& checkTask) {
             
             helper::SparseMultiObjectiveModelCheckerInformation<SparseMdpModelType> info = helper::SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocess(checkTask.getFormula(), this->getModel());
-          
+            
+            std::cout << std::endl;
+            info.printInformationToStream(std::cout);
+            
+            
+            
             return std::unique_ptr<CheckResult>(new ExplicitQualitativeCheckResult());
         }
         
diff --git a/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectivePreprocessingHelper.cpp b/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectivePreprocessingHelper.cpp
index 32701b032..935e4ecba 100644
--- a/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectivePreprocessingHelper.cpp
+++ b/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectivePreprocessingHelper.cpp
@@ -15,7 +15,7 @@
 #include "src/utility/graph.h"
 
 #include "src/exceptions/InvalidPropertyException.h"
-#include "src/exceptions/UnexpectedException.h"
+#include "src/exceptions/NotImplementedException.h"
 
 namespace storm {
     namespace modelchecker {
@@ -24,240 +24,232 @@ namespace storm {
             template<typename SparseMdpModelType>
             SparseMultiObjectiveModelCheckerInformation<SparseMdpModelType> SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocess(storm::logic::MultiObjectiveFormula const& originalFormula, SparseMdpModelType originalModel) {
                 Information info(std::move(originalModel));
-                
                 //Initialize the state mapping.
-                info.setNewToOldStateIndexMapping(storm::utility::vector::buildVectorForRange(0, info.getModel().getNumberOfStates()));
-                
+                info.newToOldStateIndexMapping = storm::utility::vector::buildVectorForRange(0, info.model.getNumberOfStates());
                 //Gather information regarding the individual objectives
-                if(!gatherObjectiveInformation(originalFormula, info)){
-                    STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Could not gather information for objectives " << originalFormula << ".");
-                }
-                
-                // Find out whether negative rewards should be considered.
-                if(!setWhetherNegativeRewardsAreConsidered(info)){
-                    STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Could not find out whether to consider negative rewards " << originalFormula << ".");
+                for(auto const& subFormula : originalFormula.getSubFormulas()){
+                    addObjective(subFormula, info);
                 }
-                
+                // Find out whether negated rewards should be considered.
+                setWhetherNegatedRewardsAreConsidered(info);
                 //Invoke preprocessing on objectives
-                bool success = false;
-                for (auto& obj : info.getObjectives()){
+                for (auto& obj : info.objectives){
                     STORM_LOG_DEBUG("Preprocessing objective " << *obj.originalFormula << ".");
                     if(obj.originalFormula->isProbabilityOperatorFormula()){
-                        success = preprocess(obj.originalFormula->asProbabilityOperatorFormula(), info, obj);
+                        preprocessFormula(obj.originalFormula->asProbabilityOperatorFormula(), info, obj);
                     } else if(obj.originalFormula->isRewardOperatorFormula()){
-                        success = preprocess(obj.originalFormula->asRewardOperatorFormula(), info, obj);
+                        preprocessFormula(obj.originalFormula->asRewardOperatorFormula(), info, obj);
+                    } else {
+                        STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Could not preprocess the subformula " << *obj.originalFormula << " of " << originalFormula << " because it is not supported");
                     }
                 }
-                STORM_LOG_THROW(success, storm::exceptions::InvalidPropertyException, "Could not preprocess for the formula " << originalFormula << ".");
+                //We can now remove all original reward models to save some memory
+                std::set<std::string> origRewardModels =  originalFormula.getReferencedRewardModels();
+                for (auto const& rewModel : origRewardModels){
+                    info.model.removeRewardModel(rewModel);
+                }
                 return info;
             }
             
             template<typename SparseMdpModelType>
-            bool SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::gatherObjectiveInformation(storm::logic::MultiObjectiveFormula const& formula, Information& info) {
-                for(auto const& subF : formula.getSubFormulas()){
-                    if(!subF->isOperatorFormula()){
-                        STORM_LOG_ERROR("Expected an OperatorFormula as subformula of " << formula << " but got " << *subF);
-                        return false;
-                    }
-                    storm::logic::OperatorFormula const& f = subF->asOperatorFormula();
-                    typename Information::ObjectiveInformation objective;
-                    
-                    objective.originalFormula = subF;
-                    
-                    if(f.hasBound()){
-                        objective.threshold = f.getBound().threshold;
-                        // Note that we minimize if the comparison type is an upper bound since we are interested in the EXISTENCE of a scheduler...
-                        objective.originalFormulaMinimizes = !storm::logic::isLowerBound(f.getBound().comparisonType);
-                    } else if (f.hasOptimalityType()){
-                        objective.originalFormulaMinimizes = storm::solver::minimize(f.getOptimalityType());
-                    } else {
-                        STORM_LOG_ERROR("Current objective" << f << "does not specify whether to minimize or maximize");
-                    }
-                    
-                    objective.rewardModelName = "objective" + std::to_string(info.getObjectives().size());
-                    // Make sure the new reward model gets a unique name
-                    while(info.getModel().hasRewardModel(objective.rewardModelName)){
-                        objective.rewardModelName = "_" + objective.rewardModelName;
-                    }
-                    
-                    if(!setStepBoundOfObjective(objective)) {
-                        return false;
-                    }
-                    
-                    info.getObjectives().push_back(std::move(objective));
+            void SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::addObjective(std::shared_ptr<storm::logic::Formula const> const& formula, Information& info) {
+                STORM_LOG_THROW(formula->isOperatorFormula(), storm::exceptions::InvalidPropertyException, "Expected an OperatorFormula as subformula of multi-objective query  but got " << *formula);
+                
+                typename Information::ObjectiveInformation objective;
+                objective.originalFormula = formula;
+                
+                storm::logic::OperatorFormula const& opFormula = formula->asOperatorFormula();
+                if(opFormula.hasBound()){
+                    objective.threshold = opFormula.getBound().threshold;
+                    // Note that we minimize if the comparison type is an upper bound since we are interested in the EXISTENCE of a scheduler.
+                    objective.originalFormulaMinimizes = !storm::logic::isLowerBound(opFormula.getBound().comparisonType);
+                } else if (opFormula.hasOptimalityType()){
+                    objective.originalFormulaMinimizes = storm::solver::minimize(opFormula.getOptimalityType());
+                } else {
+                    STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Current objective " << opFormula << " does not specify whether to minimize or maximize");
+                }
+                objective.rewardModelName = "objective" + std::to_string(info.objectives.size());
+                // Make sure the new reward model gets a unique name
+                while(info.model.hasRewardModel(objective.rewardModelName)){
+                    objective.rewardModelName = "_" + objective.rewardModelName;
                 }
-                return true;
+                setStepBoundOfObjective(objective);
+                info.objectives.push_back(std::move(objective));
             }
             
             template<typename SparseMdpModelType>
-            bool SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::setStepBoundOfObjective(typename Information::ObjectiveInformation& objective){
+            void SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::setStepBoundOfObjective(typename Information::ObjectiveInformation& objective){
                 if(objective.originalFormula->isProbabilityOperatorFormula()){
                     storm::logic::Formula const& f = objective.originalFormula->asProbabilityOperatorFormula().getSubformula();
                     if(f.isBoundedUntilFormula()){
-                        if(f.asBoundedUntilFormula().hasDiscreteTimeBound()){
-                            objective.stepBound = f.asBoundedUntilFormula().getDiscreteTimeBound();
-                        } else {
-                            STORM_LOG_ERROR("Expected a discrete time bound at boundedUntilFormula but got" << f << ".");
-                            return false;
-                        }
+                        STORM_LOG_THROW(f.asBoundedUntilFormula().hasDiscreteTimeBound(), storm::exceptions::InvalidPropertyException, "Expected a boundedUntilFormula with a discrete time bound but got " << f << ".");
+                        objective.stepBound = f.asBoundedUntilFormula().getDiscreteTimeBound();
                     }
                 } else if(objective.originalFormula->isRewardOperatorFormula()){
                     storm::logic::Formula const& f = objective.originalFormula->asRewardOperatorFormula();
                     if(f.isCumulativeRewardFormula()){
-                        if(f.asCumulativeRewardFormula().hasDiscreteTimeBound()){
-                            objective.stepBound = f.asCumulativeRewardFormula().getDiscreteTimeBound();
-                        } else {
-                            STORM_LOG_ERROR("Expected a discrete time bound at cumulativeRewardFormula but got" << f << ".");
-                            return false;
-                        }
+                        STORM_LOG_THROW(f.asCumulativeRewardFormula().hasDiscreteTimeBound(), storm::exceptions::InvalidPropertyException, "Expected a cumulativeRewardFormula with a discrete time bound but got " << f << ".");
+                        objective.stepBound = f.asCumulativeRewardFormula().getDiscreteTimeBound();
                     }
                 } else {
-                    STORM_LOG_ERROR("Expected a Probability or Reward OperatorFormula but got " << *objective.originalFormula << ".");
-                    return false;
+                    STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "Expected a Probability or Reward OperatorFormula but got " << *objective.originalFormula << ".");
                 }
-                return true;
             }
             
             template<typename SparseMdpModelType>
-            bool SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::setWhetherNegativeRewardsAreConsidered(Information& info) {
+            void SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::setWhetherNegatedRewardsAreConsidered(Information& info) {
                 // Negative Rewards are considered  whenever there is an unbounded reward objective that requires to minimize.
                 // If there is no unbounded reward objective, we make a majority vote.
                 uint_fast64_t numOfMinimizingObjectives = 0;
-                for(auto const& obj : info.getObjectives()){
+                for(auto const& obj : info.objectives){
                     if(obj.originalFormula->isRewardOperatorFormula() && !obj.stepBound){
-                        info.setNegativeRewardsConsidered(obj.originalFormulaMinimizes);
-                        return true;
+                        info.negatedRewardsConsidered = obj.originalFormulaMinimizes;
+                        return;
                     }
                     numOfMinimizingObjectives += obj.originalFormulaMinimizes ? 1 : 0;
                 }
                 // Reaching this point means that we make a majority vote
-                info.setNegativeRewardsConsidered(numOfMinimizingObjectives*2 > info.getObjectives().size());
-                return true;
+                info.negatedRewardsConsidered = (numOfMinimizingObjectives*2) > info.objectives.size();
             }
             
             template<typename SparseMdpModelType>
-            bool SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocess(storm::logic::ProbabilityOperatorFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective) {
+            void SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocessFormula(storm::logic::ProbabilityOperatorFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective) {
                 // Check if we need to complement the property, e.g., P<0.3 [ F "a" ] ---> P >=0.7 [ G !"a" ]
                 // This is the case if the formula requires to minimize and positive rewards are considered or vice versa
-                if(info.areNegativeRewardsConsidered() != currentObjective.originalFormulaMinimizes){
-                    STORM_LOG_ERROR("Inverting of properties not yet supported");
+                if(info.negatedRewardsConsidered != currentObjective.originalFormulaMinimizes){
+                    STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Inverting of properties not supported yet");
                     //TODO
-                    return false;
                 }
                 
-                bool success = false;
+                if(info.negatedRewardsConsidered && currentObjective.threshold){
+                    *(currentObjective.threshold) *= -storm::utility::one<ValueType>();
+                }
+                
                 // Invoke preprocessing for subformula
                 if(formula.getSubformula().isUntilFormula()){
-                    success = preprocess(formula.getSubformula().asUntilFormula(), info, currentObjective);
+                    preprocessFormula(formula.getSubformula().asUntilFormula(), info, currentObjective);
                 } else if(formula.getSubformula().isBoundedUntilFormula()){
-                    success = preprocess(formula.getSubformula().asBoundedUntilFormula(), info, currentObjective);
+                    preprocessFormula(formula.getSubformula().asBoundedUntilFormula(), info, currentObjective);
                 } else if(formula.getSubformula().isGloballyFormula()){
-                    success = preprocess(formula.getSubformula().asGloballyFormula(), info, currentObjective);
+                    preprocessFormula(formula.getSubformula().asGloballyFormula(), info, currentObjective);
                 } else if(formula.getSubformula().isEventuallyFormula()){
-                    success = preprocess(formula.getSubformula().asEventuallyFormula(), info, currentObjective);
+                    preprocessFormula(formula.getSubformula().asEventuallyFormula(), info, currentObjective);
+                } else {
+                    STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported.");
                 }
-                STORM_LOG_ERROR_COND(success, "No implementation for the subformula of " << formula << ".");
-                
-                return success;
             }
 
             template<typename SparseMdpModelType>
-            bool SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocess(storm::logic::RewardOperatorFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective) {
+            void SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocessFormula(storm::logic::RewardOperatorFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective) {
                 // Make sure that our decision for negative rewards is consistent with the formula
-                if(info.areNegativeRewardsConsidered() != currentObjective.originalFormulaMinimizes){
-                    STORM_LOG_ERROR("Decided to consider " << (info.areNegativeRewardsConsidered() ? "negative" : "non-negative") << "rewards, but the formula " << formula << (currentObjective.originalFormulaMinimizes ? " minimizes" : "maximizes") << ". Reward objectives should either all minimize or all maximize.");
-                    return false;
-                }
+                STORM_LOG_THROW(info.negatedRewardsConsidered == currentObjective.originalFormulaMinimizes, storm::exceptions::InvalidPropertyException, "Decided to consider " << (info.negatedRewardsConsidered ? "negated" : "non-negated") << "rewards, but the formula " << formula << (currentObjective.originalFormulaMinimizes ? " minimizes" : "maximizes") << ". Reward objectives should either all minimize or all maximize.");
                 
                 // Check if the reward model is uniquely specified
-                if((formula.hasRewardModelName() && info.getModel().hasRewardModel(formula.getRewardModelName()))
-                        || info.getModel().hasUniqueRewardModel()){
-                    STORM_LOG_ERROR("The reward model is not unique and the formula " << formula << " does not specify a reward model.");
-                    return false;
+                STORM_LOG_THROW((formula.hasRewardModelName() && info.model.hasRewardModel(formula.getRewardModelName()))
+                                || info.model.hasUniqueRewardModel(), storm::exceptions::InvalidPropertyException, "The reward model is not unique and the formula " << formula << " does not specify a reward model.");
+                
+                if(info.negatedRewardsConsidered && currentObjective.threshold){
+                    *(currentObjective.threshold) *= -storm::utility::one<ValueType>();
                 }
                 
-                bool success = false;
                 // Invoke preprocessing for subformula
                 if(formula.getSubformula().isEventuallyFormula()){
-                    success = preprocess(formula.getSubformula().asEventuallyFormula(), info, currentObjective, formula.getOptionalRewardModelName());
+                    preprocessFormula(formula.getSubformula().asEventuallyFormula(), info, currentObjective, formula.getOptionalRewardModelName());
                 } else if(formula.getSubformula().isCumulativeRewardFormula()) {
-                    success = preprocess(formula.getSubformula().asCumulativeRewardFormula(), info, currentObjective, formula.getOptionalRewardModelName());
+                    preprocessFormula(formula.getSubformula().asCumulativeRewardFormula(), info, currentObjective, formula.getOptionalRewardModelName());
+                } else {
+                    STORM_LOG_THROW(false, storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " is not supported.");
                 }
-                STORM_LOG_ERROR_COND(success, "No implementation for the subformula of " << formula << ".");
-                
-                return success;
-                
             }
             
             template<typename SparseMdpModelType>
-            bool SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocess(storm::logic::UntilFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective) {
-                bool success = false;
+            void SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocessFormula(storm::logic::UntilFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective) {
                 CheckTask<storm::logic::Formula> phiTask(formula.getLeftSubformula());
                 CheckTask<storm::logic::Formula> psiTask(formula.getRightSubformula());
-                storm::modelchecker::SparsePropositionalModelChecker<SparseMdpModelType> mc(info.getModel());
-                success = mc.canHandle(phiTask) && mc.canHandle(psiTask);
-                STORM_LOG_ERROR_COND(success, "The subformulas of " << formula << " should be propositional.");
+                storm::modelchecker::SparsePropositionalModelChecker<SparseMdpModelType> mc(info.model);
+                STORM_LOG_THROW(mc.canHandle(phiTask) && mc.canHandle(psiTask), storm::exceptions::InvalidPropertyException, "The subformulas of " << formula << " should be propositional.");
                 storm::storage::BitVector phiStates = mc.check(phiTask)->asExplicitQualitativeCheckResult().getTruthValuesVector();
                 storm::storage::BitVector psiStates = mc.check(psiTask)->asExplicitQualitativeCheckResult().getTruthValuesVector();
                 
-                auto duplicatorResult = storm::transformer::StateDuplicator<SparseMdpModelType>::transform(info.getModel(), ~phiStates | psiStates);
+                auto duplicatorResult = storm::transformer::StateDuplicator<SparseMdpModelType>::transform(info.model, ~phiStates | psiStates);
+                info.model = std::move(*duplicatorResult.model);
                 // duplicatorResult.newToOldStateIndexMapping now reffers to the indices of the model we had before preprocessing this formula.
                 // This might not be the actual original model.
                 for(auto & originalModelStateIndex : duplicatorResult.newToOldStateIndexMapping){
-                    originalModelStateIndex = info.getNewToOldStateIndexMapping()[originalModelStateIndex];
+                    originalModelStateIndex = info.newToOldStateIndexMapping[originalModelStateIndex];
                 }
-                info.setNewToOldStateIndexMapping(duplicatorResult.newToOldStateIndexMapping);
+                info.newToOldStateIndexMapping = std::move(duplicatorResult.newToOldStateIndexMapping);
                 
                 // build stateAction reward vector that gives (one*transitionProbability) reward whenever a transition leads from the firstCopy to a psiState
-                storm::storage::BitVector newPsiStates(duplicatorResult.model->getNumberOfStates(), false);
+                storm::storage::BitVector newPsiStates(info.model.getNumberOfStates(), false);
                 for(auto const& oldPsiState : psiStates){
                     //note that psiStates are always located in the second copy
                     newPsiStates.set(duplicatorResult.secondCopyOldToNewStateIndexMapping[oldPsiState], true);
                 }
-                std::vector<ValueType> objectiveRewards = duplicatorResult.model->getTransitionMatrix().getConstrainedRowGroupSumVector(duplicatorResult.firstCopy, newPsiStates);
-                if(info.areNegativeRewardsConsidered()){
+                std::vector<ValueType> objectiveRewards(info.model.getTransitionMatrix().getRowCount(), storm::utility::zero<ValueType>());
+                for (auto const& firstCopyState : duplicatorResult.firstCopy) {
+                    for (uint_fast64_t row = info.model.getTransitionMatrix().getRowGroupIndices()[firstCopyState]; row < info.model.getTransitionMatrix().getRowGroupIndices()[firstCopyState + 1]; ++row) {
+                        objectiveRewards[row] = info.model.getTransitionMatrix().getConstrainedRowSum(row, newPsiStates);
+                    }
+                }
+                if(info.negatedRewardsConsidered){
                     storm::utility::vector::scaleVectorInPlace(objectiveRewards, -storm::utility::one<ValueType>());
                 }
-
-                duplicatorResult.model->getRewardModels().insert(std::make_pair(currentObjective.rewardModelName, RewardModelType(boost::none, objectiveRewards)));
-                info.setModel(std::move(*duplicatorResult.model));
-                
-               return success;
+                std::cout << objectiveRewards.size() << "==" << info.model.getTransitionMatrix().getRowCount() << "!=" << info.model.getNumberOfStates() << std::endl;
+                info.model.addRewardModel(currentObjective.rewardModelName, RewardModelType(boost::none, objectiveRewards));
             }
             
             template<typename SparseMdpModelType>
-            bool SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocess(storm::logic::BoundedUntilFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective) {
-                    return preprocess(storm::logic::UntilFormula(formula.getLeftSubformula().asSharedPointer(), formula.getRightSubformula().asSharedPointer()), info, currentObjective);
+            void SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocessFormula(storm::logic::BoundedUntilFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective) {
+                    preprocessFormula(storm::logic::UntilFormula(formula.getLeftSubformula().asSharedPointer(), formula.getRightSubformula().asSharedPointer()), info, currentObjective);
             }
             
             template<typename SparseMdpModelType>
-            bool SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocess(storm::logic::GloballyFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective) {
+            void SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocessFormula(storm::logic::GloballyFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective) {
                 //TODO
-                STORM_LOG_ERROR("Globally not yet implemented");
-                return false;
+                STORM_LOG_THROW(false, storm::exceptions::NotImplementedException, "Globally not yet implemented");
             }
             
             template<typename SparseMdpModelType>
-            bool SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocess(storm::logic::EventuallyFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective, boost::optional<std::string> const& optionalRewardModelName) {
+            void SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocessFormula(storm::logic::EventuallyFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective, boost::optional<std::string> const& optionalRewardModelName) {
                 if(formula.isReachabilityProbabilityFormula()){
-                    return preprocess(storm::logic::UntilFormula(storm::logic::Formula::getTrueFormula(), formula.getSubformula().asSharedPointer()), info, currentObjective);
+                    preprocessFormula(storm::logic::UntilFormula(storm::logic::Formula::getTrueFormula(), formula.getSubformula().asSharedPointer()), info, currentObjective);
+                    return;
                 }
-                if (!formula.isReachabilityRewardFormula()){
-                    STORM_LOG_ERROR("The formula " << formula << " neither considers reachability Probabilities nor reachability rewards");
-                    return false;
+                STORM_LOG_THROW(formula.isReachabilityRewardFormula(), storm::exceptions::InvalidPropertyException, "The formula " << formula << " neither considers reachability Probabilities nor reachability rewards");
+                
+                CheckTask<storm::logic::Formula> targetTask(formula.getSubformula());
+                storm::modelchecker::SparsePropositionalModelChecker<SparseMdpModelType> mc(info.model);
+                STORM_LOG_THROW(mc.canHandle(targetTask), storm::exceptions::InvalidPropertyException, "The subformula of " << formula << " should be propositional.");
+                storm::storage::BitVector targetStates = mc.check(targetTask)->asExplicitQualitativeCheckResult().getTruthValuesVector();
+                
+                STORM_LOG_WARN("There is no check for reward finiteness yet"); //TODO
+                auto duplicatorResult = storm::transformer::StateDuplicator<SparseMdpModelType>::transform(info.model, targetStates);
+                info.model = std::move(*duplicatorResult.model);
+                // duplicatorResult.newToOldStateIndexMapping now reffers to the indices of the model we had before preprocessing this formula.
+                // This might not be the actual original model.
+                for(auto & originalModelStateIndex : duplicatorResult.newToOldStateIndexMapping){
+                    originalModelStateIndex = info.newToOldStateIndexMapping[originalModelStateIndex];
                 }
-                //TODO
-                STORM_LOG_ERROR("Rewards not yet implemented");
-                return false;
+                info.newToOldStateIndexMapping = std::move(duplicatorResult.newToOldStateIndexMapping);
+                
+                // Add a reward model that gives zero reward to the states of the second copy.
+                std::vector<ValueType> objectiveRewards = info.model.getRewardModel(optionalRewardModelName ? optionalRewardModelName.get() : "").getTotalRewardVector(info.model.getTransitionMatrix());
+                storm::utility::vector::setVectorValues(objectiveRewards, duplicatorResult.secondCopy, storm::utility::zero<ValueType>());
+                if(info.negatedRewardsConsidered){
+                    storm::utility::vector::scaleVectorInPlace(objectiveRewards, -storm::utility::one<ValueType>());
+                }
+                info.model.addRewardModel(currentObjective.rewardModelName, RewardModelType(boost::none, objectiveRewards));
             }
             
             template<typename SparseMdpModelType>
-            bool SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocess(storm::logic::CumulativeRewardFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective, boost::optional<std::string> const& optionalRewardModelName) {
-                //TODO
-                STORM_LOG_ERROR("Rewards not yet implemented");
-                return false;
+            void SparseMdpMultiObjectivePreprocessingHelper<SparseMdpModelType>::preprocessFormula(storm::logic::CumulativeRewardFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective, boost::optional<std::string> const& optionalRewardModelName) {
+                std::vector<ValueType> objectiveRewards = info.model.getRewardModel(optionalRewardModelName ? optionalRewardModelName.get() : "").getTotalRewardVector(info.model.getTransitionMatrix());
+                if(info.negatedRewardsConsidered){
+                    storm::utility::vector::scaleVectorInPlace(objectiveRewards, -storm::utility::one<ValueType>());
+                }
+                info.model.addRewardModel(currentObjective.rewardModelName, RewardModelType(boost::none, objectiveRewards));
             }
-
             
             template class SparseMdpMultiObjectivePreprocessingHelper<storm::models::sparse::Mdp<double>>;
 
diff --git a/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectivePreprocessingHelper.h b/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectivePreprocessingHelper.h
index a09939ee6..5adf1bda9 100644
--- a/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectivePreprocessingHelper.h
+++ b/src/modelchecker/multiobjective/helper/SparseMdpMultiObjectivePreprocessingHelper.h
@@ -3,7 +3,6 @@
 
 #include "src/logic/Formulas.h"
 #include "src/modelchecker/multiobjective/helper/SparseMultiObjectiveModelCheckerInformation.h"
-#include "src/storage/BitVector.h"
 
 namespace storm {
     namespace modelchecker {
@@ -30,28 +29,38 @@ namespace storm {
                 
             private:
                 
-                static bool gatherObjectiveInformation(storm::logic::MultiObjectiveFormula const& formula, Information& info);
-                static bool setStepBoundOfObjective(typename Information::ObjectiveInformation& currentObjective);
-                static bool setWhetherNegativeRewardsAreConsidered(Information& info);
+                /*!
+                 * Inserts the information regarding the given formula (i.e. objective) into info.objectives
+                 *
+                 * @param formula OperatorFormula representing the objective
+                 * @param the information collected so far
+                 */
+                static void addObjective(std::shared_ptr<storm::logic::Formula const> const& formula, Information& info);
                 
                 /*!
-                 * Apply the neccessary preprocessing for the given formula.
-                 * @param formula the current (sub)formula
-                 * @param info the current state of the preprocessing.
-                 * @return true iff there was no error
+                 * Sets the timeBound for the given objective
                  */
-                // State formulas (will transform the formula and the reward model)
-                static bool preprocess(storm::logic::ProbabilityOperatorFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective);
-                static bool preprocess(storm::logic::RewardOperatorFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective);
+                static void setStepBoundOfObjective(typename Information::ObjectiveInformation& currentObjective);
                 
-                // Path formulas (will transform the model)
-                static bool preprocess(storm::logic::UntilFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective);
-                static bool preprocess(storm::logic::BoundedUntilFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective);
-                static bool preprocess(storm::logic::GloballyFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective);
-                static bool preprocess(storm::logic::EventuallyFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective, boost::optional<std::string> const& optionalRewardModelName = boost::none);
-                static bool preprocess(storm::logic::CumulativeRewardFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective, boost::optional<std::string> const& optionalRewardModelName = boost::none);
+                /*!
+                 * Sets whether we should consider negated rewards
+                 */
+                static void setWhetherNegatedRewardsAreConsidered(Information& info);
                 
-                static storm::storage::BitVector checkPropositionalFormula(storm::logic::Formula propFormula, SparseMdpModelType const& model);
+                /*!
+                 * Apply the neccessary preprocessing for the given formula.
+                 * @param formula the current (sub)formula
+                 * @param info the information collected so far
+                 * @param currentObjective the currently considered objective. The given formula should be a a (sub)formula of this objective
+                 * @param optionalRewardModelName the reward model name that is considered for the formula (if available)
+                 */
+                static void preprocessFormula(storm::logic::ProbabilityOperatorFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective);
+                static void preprocessFormula(storm::logic::RewardOperatorFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective);
+                static void preprocessFormula(storm::logic::UntilFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective);
+                static void preprocessFormula(storm::logic::BoundedUntilFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective);
+                static void preprocessFormula(storm::logic::GloballyFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective);
+                static void preprocessFormula(storm::logic::EventuallyFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective, boost::optional<std::string> const& optionalRewardModelName = boost::none);
+                static void preprocessFormula(storm::logic::CumulativeRewardFormula const& formula, Information& info, typename Information::ObjectiveInformation& currentObjective, boost::optional<std::string> const& optionalRewardModelName = boost::none);
                 
             };
             
diff --git a/src/modelchecker/multiobjective/helper/SparseMultiObjectiveModelCheckerInformation.h b/src/modelchecker/multiobjective/helper/SparseMultiObjectiveModelCheckerInformation.h
index bc6be45b1..3ef2a3a6d 100644
--- a/src/modelchecker/multiobjective/helper/SparseMultiObjectiveModelCheckerInformation.h
+++ b/src/modelchecker/multiobjective/helper/SparseMultiObjectiveModelCheckerInformation.h
@@ -3,6 +3,7 @@
 
 #include <vector>
 #include <memory>
+#include <iomanip>
 
 #include "src/logic/Formulas.h"
 
@@ -13,8 +14,7 @@ namespace storm {
         namespace helper {
             
             template <class SparseModelType>
-            class SparseMultiObjectiveModelCheckerInformation {
-            public :
+            struct SparseMultiObjectiveModelCheckerInformation {
                 
                 typedef typename SparseModelType::ValueType ValueType;
                 typedef typename SparseModelType::RewardModelType RewardModelType;
@@ -25,6 +25,27 @@ namespace storm {
                     boost::optional<double> threshold;
                     std::string rewardModelName;
                     boost::optional<uint_fast64_t> stepBound;
+                    
+                    void printInformationToStream(std::ostream& out) const {
+                        out << std::setw(30) << originalFormula->toString();
+                        out << " \t(";
+                        out << (originalFormulaMinimizes ? "minimizes, \t" : "maximizes, \t");
+                        out << "intern threshold:";
+                        if(threshold){
+                            out << std::setw(5) << (*threshold) << ",";
+                        } else {
+                            out << " none,";
+                        }
+                        out << " \t";
+                        out << "intern reward model: " << std::setw(10) << rewardModelName << ", \t";
+                        out << "step bound:";
+                        if(stepBound) {
+                            out << std::setw(5) << (*stepBound);
+                        } else {
+                            out << " none";
+                        }
+                        out << ")" << std::endl;
+                     }
                 };
                 
                 
@@ -36,53 +57,33 @@ namespace storm {
                     //Intentionally left empty
                 }
                 
-                void setModel(SparseModelType&& newModel){
-                    model = newModel;
-                }
-                
-                void setModel(SparseModelType const& newModel){
-                    model = newModel;
-                }
-                
-                SparseModelType const& getModel() const {
-                    return model;
-                }
-                
-                void setNewToOldStateIndexMapping(std::vector<uint_fast64_t> const& newMapping){
-                    newToOldStateIndexMapping = newMapping;
-                }
-                
-                void setNewToOldStateIndexMapping(std::vector<uint_fast64_t>&& newMapping){
-                    newToOldStateIndexMapping = newMapping;
-                }
-                
-                std::vector<uint_fast64_t>const& getNewToOldStateIndexMapping() const{
-                    return newToOldStateIndexMapping;
-                }
-                
-                bool areNegativeRewardsConsidered() {
-                    return negativeRewardsConsidered;
-                }
-                
-                void setNegativeRewardsConsidered(bool value){
-                    negativeRewardsConsidered = value;
-                }
-                
-                std::vector<ObjectiveInformation>& getObjectives() {
-                    return objectives;
-                }
-                std::vector<ObjectiveInformation>const& getObjectives() const {
-                    return objectives;
-                }
-                
-                
-            private:
                 SparseModelType model;
                 std::vector<uint_fast64_t> newToOldStateIndexMapping;
-                bool negativeRewardsConsidered;
+                bool negatedRewardsConsidered;
                 
                 std::vector<ObjectiveInformation> objectives;
                 
+                void printInformationToStream(std::ostream& out) {
+                    out << "---------------------------------------------------------------------------------------------------------------------------------------" << std::endl;
+                    out << "                                                   Multi-objective Model Checker Information                                           " << std::endl;
+                    out << "---------------------------------------------------------------------------------------------------------------------------------------" << std::endl;
+                    out << std::endl;
+                    out << "Objectives:" << std::endl;
+                    out << "--------------------------------------------------------------" << std::endl;
+                    for (auto const& obj : objectives) {
+                        obj.printInformationToStream(out);
+                    }
+                    out << "--------------------------------------------------------------" << std::endl;
+                    out << std::endl;
+                    out << "Preprocessed Model Information:" << std::endl;
+                    model.printModelInformationToStream(out);
+                    out << std::endl;
+                    if(negatedRewardsConsidered){
+                        out << "The rewards in the preprocessed model are negated" << std::endl;
+                        out << std::endl;
+                    }
+                    out << "---------------------------------------------------------------------------------------------------------------------------------------" << std::endl;
+                }
                 
                 
             };