diff --git a/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp b/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp index 41b768058..90d556133 100644 --- a/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp +++ b/src/storm/modelchecker/csl/helper/SparseCtmcCslHelper.cpp @@ -632,19 +632,21 @@ namespace storm { } // Use Fox-Glynn to get the truncation points and the weights. - std::tuple> foxGlynnResult = storm::utility::numerical::getFoxGlynnCutoff(lambda, 1e+300, storm::settings::getModule().getPrecision() / 8.0); - STORM_LOG_DEBUG("Fox-Glynn cutoff points: left=" << std::get<0>(foxGlynnResult) << ", right=" << std::get<1>(foxGlynnResult)); +// std::tuple> foxGlynnResult = storm::utility::numerical::getFoxGlynnCutoff(lambda, 1e+300, storm::settings::getModule().getPrecision() / 8.0); + + storm::utility::numerical::FoxGlynnResult foxGlynnResult = storm::utility::numerical::foxGlynn(lambda, storm::settings::getModule().getPrecision() / 8.0); + STORM_LOG_DEBUG("Fox-Glynn cutoff points: left=" << foxGlynnResult.left << ", right=" << foxGlynnResult.right); // Scale the weights so they add up to one. - for (auto& element : std::get<3>(foxGlynnResult)) { - element /= std::get<2>(foxGlynnResult); + for (auto& element : foxGlynnResult.weights) { + element /= foxGlynnResult.totalWeight; } // If the cumulative reward is to be computed, we need to adjust the weights. if (useMixedPoissonProbabilities) { ValueType sum = storm::utility::zero(); - for (auto& element : std::get<3>(foxGlynnResult)) { + for (auto& element : foxGlynnResult.weights) { sum += element; element = (1 - sum) / uniformizationRate; } @@ -654,10 +656,10 @@ namespace storm { // Initialize result. std::vector result; - uint_fast64_t startingIteration = std::get<0>(foxGlynnResult); + uint_fast64_t startingIteration = foxGlynnResult.left; if (startingIteration == 0) { result = values; - storm::utility::vector::scaleVectorInPlace(result, std::get<3>(foxGlynnResult)[0]); + storm::utility::vector::scaleVectorInPlace(result, foxGlynnResult.weights.front()); ++startingIteration; } else { if (useMixedPoissonProbabilities) { @@ -672,9 +674,9 @@ namespace storm { std::unique_ptr> solver = linearEquationSolverFactory.create(env, std::move(uniformizedMatrix), storm::solver::LinearEquationSolverTask::Multiply); solver->setCachingEnabled(true); - if (!useMixedPoissonProbabilities && std::get<0>(foxGlynnResult) > 1) { + if (!useMixedPoissonProbabilities && foxGlynnResult.left > 1) { // Perform the matrix-vector multiplications (without adding). - solver->repeatedMultiply(values, addVector, std::get<0>(foxGlynnResult) - 1); + solver->repeatedMultiply(values, addVector, foxGlynnResult.left - 1); } else if (useMixedPoissonProbabilities) { std::function addAndScale = [&uniformizationRate] (ValueType const& a, ValueType const& b) { return a + b / uniformizationRate; }; @@ -689,10 +691,10 @@ namespace storm { // multiplication, scale and add the result. ValueType weight = 0; std::function addAndScale = [&weight] (ValueType const& a, ValueType const& b) { return a + weight * b; }; - for (uint_fast64_t index = startingIteration; index <= std::get<1>(foxGlynnResult); ++index) { + for (uint_fast64_t index = startingIteration; index <= foxGlynnResult.right; ++index) { solver->repeatedMultiply(values, addVector, 1); - weight = std::get<3>(foxGlynnResult)[index - std::get<0>(foxGlynnResult)]; + weight = foxGlynnResult.weights[index - foxGlynnResult.left]; storm::utility::vector::applyPointwise(result, values, result, addAndScale); } diff --git a/src/storm/utility/numerical.cpp b/src/storm/utility/numerical.cpp new file mode 100644 index 000000000..cec474ff7 --- /dev/null +++ b/src/storm/utility/numerical.cpp @@ -0,0 +1,275 @@ +#include "storm/utility/numerical.h" + +#include +#include + +#include "storm/utility/macros.h" +#include "storm/utility/constants.h" +#include "storm/exceptions/InvalidArgumentException.h" +#include "storm/exceptions/PrecisionExceededException.h" + +namespace storm { + namespace utility { + namespace numerical { + + template + FoxGlynnResult::FoxGlynnResult() : left(0), right(0), totalWeight(storm::utility::zero()) { + // Intentionally left empty. + } + + /*! + * The following implementation of Fox and Glynn's algorithm is taken from David Jansen's patched version + * in MRMC, which is based on his paper: + * + * https://pms.cs.ru.nl/iris-diglib/src/getContent.php?id=2011-Jansen-UnderstandingFoxGlynn + * + * We have only adapted the code to match more of C++'s and our coding guidelines. + */ + + template + FoxGlynnResult foxGlynnFinder(ValueType lambda, ValueType epsilon) { + ValueType tau = std::numeric_limits::min(); + ValueType omega = std::numeric_limits::max(); + ValueType const sqrt_2_pi = boost::math::constants::root_two_pi(); + ValueType const log10_e = std::log10(boost::math::constants::e()); + + uint64_t m = static_cast(lambda); + + int64_t left = 0; + int64_t right = 0; + + // tau is only used in underflow checks, which we are going to do in the logarithm domain. + tau = log(tau); + + // In error bound comparisons, we always compare with epsilon*sqrt_2_pi. + epsilon *= sqrt_2_pi; + + // Compute left truncation point. + if (m < 25) { + // For lambda below 25 the exponential can be smaller than tau. If that is the case we expect + // underflows and warn the user. + if (-lambda <= tau) { + STORM_LOG_WARN("Fox-Glynn: 0 < lambda < 25, underflow near Poi(" << lambda << ", 0) = " << std::exp(-lambda) << ". The results are unreliable."); + } + + // Zero is used as left truncation point for lambda <= 25. + left = 0; + } else { + // Compute the left truncation point for lambda >= 25 (for lambda < 25 we use zero as left truncation point). + + ValueType const bl = (1 + 1 / lambda) * std::exp((1/lambda) * 0.125); + ValueType const sqrt_lambda = std::sqrt(lambda); + int64_t k; + + // Start looking for the left truncation point: + // * start search at k=4 (taken from original Fox-Glynn paper) + // * increase the left truncation point until we fulfil the error condition + + for (k = 4;; ++k) { + ValueType max_err; + + left = m - static_cast(std::ceil(k*sqrt_lambda + 0.5)); + + // For small lambda the above calculation can yield negative truncation points, crop them here. + if (left <= 0) { + left = 0; + break; + } + + // Note that Propositions 2-4 in Fox--Glynn mix up notation: they write Phi where they mean + // 1 - Phi. (In Corollaries 1 and 2, phi is used correctly again.) + max_err = bl * exp(-0.5 * (k*k)) / k; + if (max_err * 2 <= epsilon) { + // If the error on the left hand side is smaller, we can be more lenient on the right hand + // side. To this end, we now set epsilon to the part of the error that has not yet been eaten + // up by the left-hand truncation. + epsilon -= max_err; + break; + } + } + + // Finally the left truncation point is found. + } + + // Compute right truncation point. + { + ValueType lambda_max; + int64_t m_max, k; + + // According to Fox-Glynn, if lambda < 400 we should take lambda = 400, otherwise use the original + // value. This is for computing the right truncation point. + if (m < 400) { + lambda_max = 400; + m_max = 400; + epsilon *= 0.662608824988162441697980; + /* i.e. al = (1+1/400) * exp(1/16) * sqrt_2; epsilon /= al; */ + } else { + lambda_max = lambda; + m_max = m; + epsilon *= (1 - 1 / (lambda + 1)) * 0.664265347050632847802225; + /* i.e. al = (1+1/lambda) * exp(1/16) * sqrt_2; epsilon /= al; */ + } + + // Find right truncation point. + + // This loop is a modification to the original Fox-Glynn paper. + // The search for the right truncation point is only terminated by the error condition and not by + // the stop index from the FG paper. This can yield more accurate results if necessary. + for (k = 4;; ++k) { + // dkl_inv is between 1 - 1e-33 and 1 if lambda_max >= 400 and k >= 4; this will always be + // rounded to 1.0. We therefore leave the factor out. + // double dkl_inv=1 - exp(-266/401.0 * (k*sqrt(2*lambda_max) + 1.5)); + + // actually: "k * (dkl_inv*epsilon/al) >= exp(-0.5 * k^2)", but epsilon has been changed appropriately. + if (k * epsilon >= exp(-0.5*(k*k))) { + break; + } + } + right = m_max + static_cast(std::ceil(k * std::sqrt(2 * lambda_max) + 0.5)); + if (right > m_max + static_cast(std::ceil((lambda_max + 1) * 0.5))) { + STORM_LOG_WARN("Fox-Glynn: right = " << right << " >> lambda = " << lambda_max << ", cannot bound the right tail. The results are unreliable."); + } + } + + // Time to set the initial value for weights. + FoxGlynnResult fgresult; + fgresult.left = static_cast(left); + fgresult.right = static_cast(right); + fgresult.weights.resize(fgresult.right - fgresult.left + 1); + + fgresult.weights[m - left] = omega / (1.0e+10 * (right - left)); + + if (m >= 25) { + // Perform underflow check. + ValueType result, log_c_m_inf; + int64_t i; + + // we are going to compare with tau - log(w[m]). + tau -= std::log(fgresult.weights[m - left]); + + // We take the c_m_inf = 0.14627 / sqrt( m ), as for lambda >= 25 + // c_m = 1 / ( sqrt( 2.0 * pi * m ) ) * exp( m - lambda - 1 / ( 12.0 * m ) ) => c_m_inf. + // Note that m-lambda is in the interval (-1,0], and -1/(12*m) is in [-1/(12*25),0). + // So, exp(m-lambda - 1/(12*m)) is in (exp(-1-1/(12*25)),exp(0)). + // Therefore, we can improve the lower bound on c_m to exp(-1-1/(12*25)) / sqrt(2*pi) = ~0.14627. + // Its logarithm is -1 - 1/(12*25) - log(2*pi) * 0.5 = ~ -1.922272 (rounded towards -infinity). + log_c_m_inf = -1.922272 - log((double) m) * 0.5; + + // We use FG's Proposition 6 directly (and not Corollary 4 i and ii), as k_prime may be too large + // if pFG->left == 0. + i = m - left; + + // Equivalent to 2*i <= m, equivalent to i <= lambda/2. + if (i <= left) { + // Use Proposition 6 (i). Note that Fox--Glynn are off by one in the proof of this proposition; + // they sum up to i-1, but should have summed up to i. */ + result = log_c_m_inf + - i * (i+1) * (0.5 + (2*i+1)/(6*lambda)) / lambda; + } else { + // Use Corollary 4 (iii). Note that k_prime <= sqrt(m+1)/m is a misprint for k_prime <= m/sqrt(m+1), + // which is equivalent to left >= 0, which holds trivially. + result = -lambda; + if (left != 0) { + // Also use Proposition 6 (ii). + double result_1 = log_c_m_inf + i * log(1 - i/(double) (m+1)); + + // Take the maximum. + if (result_1 > result) { + result = result_1; + } + } + } + if (result <= tau) { + int64_t const log10_result = static_cast(std::floor(result * log10_e)); + STORM_LOG_WARN("Fox-Glynn: lambda >= 25, underflow near Poi(" << lambda << "," << left << ") <= " << std::exp(result - log10_result/log10_e) << log10_result << ". The results are unreliable."); + } + + // We still have to perform an underflow check for the right truncation point when lambda >= 400. + if (m >= 400) { + // Use Proposition 5 of Fox--Glynn. + i = right - m; + result = log_c_m_inf - i * (i + 1) / (2 * lambda); + if (result <= tau) { + int64_t const log10_result = static_cast(std::floor(result * log10_e)); + STORM_LOG_WARN("Fox-Glynn: lambda >= 25, underflow near Poi(" << lambda << "," << right << ") <= " << std::exp(result - log10_result/log10_e) << log10_result << ". The results are unreliable."); + } + } + } + + return fgresult; + } + + template + FoxGlynnResult foxGlynnWeighter(ValueType lambda, ValueType epsilon) { + ValueType tau = std::numeric_limits::min(); + + // The magic m point. + uint64_t m = static_cast(lambda); + int64_t j, t; + + FoxGlynnResult result = foxGlynnFinder(lambda, epsilon); + + // Fill the left side of the array. + for (j = m - result.left; j > 0; --j) { + result.weights[j - 1] = (j + result.left) / lambda * result.weights[j]; + } + + t = result.right - result.left; + + // Fill the right side of the array, have two cases lambda < 400 & lambda >= 400. + if (m < 400) { + // Perform the underflow check, according to Fox-Glynn. + STORM_LOG_THROW(result.right <= 600, storm::exceptions::PrecisionExceededException, "Fox-Glynn: " << result.right << " > 600, underflow is possible."); + + // Compute weights. + for (j = m - result.left; j < t; ++j) { + ValueType q = lambda / (j + 1 + result.left); + if (result.weights[j] > tau / q) { + result.weights[j + 1] = q * result.weights[j]; + } else { + t = j; + result.right = j + result.left; + + // It's time to compute W. + break; + } + } + } else { + // Compute weights. + for (j = m - result.left; j < t; ++j) { + result.weights[j + 1] = lambda / (j + 1 + result.left) * result.weights[j]; + } + } + + // It is time to compute the normalization weight W. + result.totalWeight = storm::utility::zero(); + j = 0; + + // t was set above. + while(j < t) { + if (result.weights[j] <= result.weights[t]) { + result.totalWeight += result.weights[j]; + j++; + } else { + result.totalWeight += result.weights[t]; + t--; + } + } + result.totalWeight += result.weights[j]; + + STORM_LOG_TRACE("Fox-Glynn: ltp = " << result.left << ", rtp = " << result.right << ", w = " << result.totalWeight << "."); + + return result; + } + + template + FoxGlynnResult foxGlynn(ValueType lambda, ValueType epsilon) { + STORM_LOG_THROW(lambda > 0, storm::exceptions::InvalidArgumentException, "Fox-Glynn requires positive lambda."); + return foxGlynnWeighter(lambda, epsilon); + } + + template FoxGlynnResult foxGlynn(double lambda, double epsilon); + + } + } +} diff --git a/src/storm/utility/numerical.h b/src/storm/utility/numerical.h index bf9a533b4..7bd18cebe 100644 --- a/src/storm/utility/numerical.h +++ b/src/storm/utility/numerical.h @@ -1,131 +1,23 @@ #include -#include -#include - -#include "storm/utility/macros.h" -#include "storm/utility/constants.h" -#include "storm/exceptions/InvalidArgumentException.h" -#include "storm/exceptions/OutOfRangeException.h" +#include namespace storm { namespace utility { namespace numerical { + + template + struct FoxGlynnResult { + FoxGlynnResult(); + + uint64_t left; + uint64_t right; + ValueType totalWeight; + std::vector weights; + }; + template - std::tuple> getFoxGlynnCutoff(ValueType lambda, ValueType overflow, ValueType accuracy) { - STORM_LOG_THROW(lambda != storm::utility::zero(), storm::exceptions::InvalidArgumentException, "Error in Fox-Glynn algorithm: lambda must not be zero."); + FoxGlynnResult foxGlynn(ValueType lambda, ValueType epsilon); - // This code is a modified version of the one in PRISM. According to their implementation, for lambda - // smaller than 400, we compute the result using the naive method. - if (lambda < 400) { - ValueType eToPowerMinusLambda = std::exp(-lambda); - ValueType targetValue = (1 - accuracy) / eToPowerMinusLambda; - std::vector weights; - - ValueType exactlyKEvents = 1; - ValueType atMostKEvents = exactlyKEvents; - weights.push_back(exactlyKEvents * eToPowerMinusLambda); - - uint_fast64_t k = 1; - do { - exactlyKEvents *= lambda / k; - atMostKEvents += exactlyKEvents; - weights.push_back(exactlyKEvents * eToPowerMinusLambda); - ++k; - } while (atMostKEvents < targetValue); - - return std::make_tuple(0, k - 1, 1.0, weights); - } else { - STORM_LOG_THROW(accuracy >= 1e-10, storm::exceptions::InvalidArgumentException, "Error in Fox-Glynn algorithm: the accuracy must not be below 1e-10."); - - // Factor from Fox&Glynn's paper. The paper does not explain where it comes from. - ValueType factor = 1e+10; - - // Now start the Finder algorithm to find the truncation points. - ValueType m = std::floor(lambda); - uint_fast64_t leftTruncationPoint = 0, rightTruncationPoint = 0; - { - // Factors used by the corollaries explained in Fox & Glynns paper. - // Square root of pi. - ValueType sqrtpi = 1.77245385090551602729; - - // Square root of 2. - ValueType sqrt2 = 1.41421356237309504880; - - // Set up a_\lambda, b_\lambda, and the square root of lambda. - ValueType aLambda = 0, bLambda = 0, sqrtLambda = 0; - if (m < 400) { - sqrtLambda = std::sqrt(400.0); - aLambda = (1.0 + 1.0 / 400.0) * std::exp(0.0625) * sqrt2; - bLambda = (1.0 + 1.0 / 400.0) * std::exp(0.125 / 400.0); - } else { - sqrtLambda = std::sqrt(lambda); - aLambda = (1.0 + 1.0 / lambda) * std::exp(0.0625) * sqrt2; - bLambda = (1.0 + 1.0 / lambda) * std::exp(0.125 / lambda); - } - - // Use Corollary 1 from the paper to find the right truncation point. - uint_fast64_t k = 4; - - ValueType dkl = 1.0 / (1 - std::exp(-(2.0 / 9.0) * (k * sqrt2 * sqrtLambda + 1.5))); - - // According to David Jansen the upper bound can be ignored to achieve more accurate results. - // Right hand side of the equation in Corollary 1. - while ((accuracy / 2.0) < (aLambda * dkl * std::exp(-(k*k / 2.0)) / (k * sqrt2 * sqrtpi))) { - ++k; - - // d(k,Lambda) from the paper. - dkl = 1.0 / (1 - std::exp(-(2.0 / 9.0)*(k * sqrt2 * sqrtLambda + 1.5))); - } - - // Left hand side of the equation in Corollary 1. - rightTruncationPoint = static_cast(std::ceil((m + k * sqrt2 * sqrtLambda + 1.5))); - - // Use Corollary 2 to find left truncation point. - k = 4; - - // Right hand side of the equation in Corollary 2. - while ((accuracy / 2.0) < ((bLambda * std::exp(-(k*k / 2.0))) / (k * sqrt2 * sqrtpi))) { - ++k; - } - - // Left hand side of the equation in Corollary 2. - leftTruncationPoint = static_cast(std::max(std::trunc(m - k * sqrtLambda - 1.5), storm::utility::zero())); - - // Check for underflow. - STORM_LOG_THROW(std::trunc((m - k * sqrtLambda - 1.5)) >= 0, storm::exceptions::OutOfRangeException, "Error in Fox-Glynn algorithm: Underflow of left truncation point."); - } - - std::vector weights(rightTruncationPoint - leftTruncationPoint + 1); - weights[m - leftTruncationPoint] = overflow / (factor * (rightTruncationPoint - leftTruncationPoint)); - for (uint_fast64_t j = m; j > leftTruncationPoint; --j) { - weights[j - 1 - leftTruncationPoint] = (j / lambda) * weights[j - leftTruncationPoint]; - } - - for (uint_fast64_t j = m; j < rightTruncationPoint; ++j) { - weights[j + 1 - leftTruncationPoint] = (lambda / (j + 1)) * weights[j - leftTruncationPoint]; - } - - - // Compute the total weight and start with smallest to avoid roundoff errors. - ValueType totalWeight = storm::utility::zero(); - - uint_fast64_t s = leftTruncationPoint; - uint_fast64_t t = rightTruncationPoint; - while (s < t) { - if (weights[s - leftTruncationPoint] <= weights[t - leftTruncationPoint]) { - totalWeight += weights[s - leftTruncationPoint]; - ++s; - } else { - totalWeight += weights[t - leftTruncationPoint]; - --t; - } - } - - totalWeight += weights[s - leftTruncationPoint]; - - return std::make_tuple(leftTruncationPoint, rightTruncationPoint, totalWeight, weights); - } - } } } }