From 4456069e81e995d844bb7e447832f5f258488da5 Mon Sep 17 00:00:00 2001 From: Matthias Volk Date: Thu, 12 Oct 2017 18:19:52 +0200 Subject: [PATCH] Fixed typo --- src/storm/models/sparse/MarkovAutomaton.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/storm/models/sparse/MarkovAutomaton.h b/src/storm/models/sparse/MarkovAutomaton.h index 249766b62..89c73f83c 100644 --- a/src/storm/models/sparse/MarkovAutomaton.h +++ b/src/storm/models/sparse/MarkovAutomaton.h @@ -22,7 +22,7 @@ namespace storm { * For hybrid states (i.e., states with Markovian and probabilistic transitions), it is assumed that the first * choice corresponds to the markovian transitions. * - * @param transitionMatrix The matrix representing the transitions in the model in terms of rates (markocian choices) and probabilities (probabilistic choices). + * @param transitionMatrix The matrix representing the transitions in the model in terms of rates (Markovian choices) and probabilities (probabilistic choices). * @param stateLabeling The labeling of the states. * @param markovianStates A bit vector indicating the Markovian states of the automaton (i.e., states with at least one markovian transition). * @param rewardModels A mapping of reward model names to reward models. @@ -38,7 +38,7 @@ namespace storm { * For hybrid states (i.e., states with Markovian and probabilistic transitions), it is assumed that the first * choice corresponds to the markovian transitions. * - * @param transitionMatrix The matrix representing the transitions in the model in terms of rates (markocian choices) and probabilities (probabilistic choices). + * @param transitionMatrix The matrix representing the transitions in the model in terms of rates (Markovian choices) and probabilities (probabilistic choices). * @param stateLabeling The labeling of the states. * @param markovianStates A bit vector indicating the Markovian states of the automaton (i.e., states with at least one markovian transition). * @param rewardModels A mapping of reward model names to reward models.