From 8b4393d4d30f3cdbb570f50988946b222c2c61b0 Mon Sep 17 00:00:00 2001 From: Daniel Weindl Date: Mon, 21 Oct 2024 12:28:10 +0200 Subject: [PATCH] clang-format --- .../exampleSteadystateScaledTest.h | 202 +++-- examples/parpeamici/steadystate/main.cpp | 45 +- .../steadystate/main_multicondition.cpp | 79 +- .../parpeamici/steadystate/main_simulator.cpp | 35 +- .../steadyStateMultiConditionDataprovider.cpp | 18 +- .../steadyStateMultiConditionDataprovider.h | 19 +- .../steadystate/steadystateProblem.cpp | 94 +- .../steadystate/steadystateProblem.h | 17 +- .../steadystateProblemParallel.cpp | 75 +- .../steadystate/steadystateProblemParallel.h | 41 +- examples/parpeloadbalancer/main.cpp | 25 +- include/parpeamici/amiciMisc.h | 14 +- include/parpeamici/amiciSimulationRunner.h | 113 ++- include/parpeamici/hierarchicalOptimization.h | 252 +++--- ...lOptimizationAnalyticalParameterProvider.h | 46 +- .../parpeamici/multiConditionDataProvider.h | 222 +++-- include/parpeamici/multiConditionProblem.h | 223 ++--- include/parpeamici/optimizationApplication.h | 44 +- include/parpeamici/simulationResultWriter.h | 56 +- include/parpeamici/standaloneSimulator.h | 98 +-- include/parpecommon/costFunction.h | 57 +- include/parpecommon/functions.h | 130 ++- include/parpecommon/hdf5Misc.h | 253 +++--- include/parpecommon/logging.h | 42 +- include/parpecommon/misc.h | 121 ++- include/parpecommon/model.h | 92 +- include/parpecommon/parpeException.h | 4 +- .../parpeloadbalancer/loadBalancerMaster.h | 46 +- .../parpeloadbalancer/loadBalancerWorker.h | 9 +- .../localOptimizationCeres.h | 5 +- .../parpeoptimization/localOptimizationDlib.h | 4 +- .../localOptimizationFides.h | 7 +- .../parpeoptimization/localOptimizationFsqp.h | 10 +- .../localOptimizationIpopt.h | 3 +- .../localOptimizationIpoptTNLP.h | 153 ++-- .../localOptimizationToms611.h | 3 +- .../parpeoptimization/minibatchOptimization.h | 750 +++++++++------- .../multiStartOptimization.h | 11 +- .../parpeoptimization/optimizationOptions.h | 50 +- .../parpeoptimization/optimizationProblem.h | 117 +-- .../optimizationResultWriter.h | 61 +- include/parpeoptimization/optimizer.h | 3 +- src/parpeamici/amiciMisc.cpp | 21 +- src/parpeamici/amiciSimulationRunner.cpp | 135 ++- src/parpeamici/hierarchicalOptimization.cpp | 808 +++++++++--------- ...ptimizationAnalyticalParameterProvider.cpp | 63 +- src/parpeamici/multiConditionDataProvider.cpp | 532 +++++------- src/parpeamici/multiConditionProblem.cpp | 779 +++++++++-------- src/parpeamici/optimizationApplication.cpp | 159 ++-- src/parpeamici/simulationResultWriter.cpp | 177 ++-- src/parpeamici/standaloneSimulator.cpp | 602 ++++++------- src/parpecommon/costFunction.cpp | 6 +- src/parpecommon/functions.cpp | 5 +- src/parpecommon/hdf5Misc.cpp | 603 +++++++------ src/parpecommon/logging.cpp | 102 +-- src/parpecommon/misc.cpp | 132 ++- src/parpecommon/model.cpp | 64 +- src/parpecommon/parpeException.cpp | 8 +- src/parpeloadbalancer/loadBalancerMaster.cpp | 129 +-- src/parpeloadbalancer/loadBalancerWorker.cpp | 27 +- .../localOptimizationCeres.cpp | 190 ++-- .../localOptimizationDlib.cpp | 44 +- .../localOptimizationFides.cpp | 178 ++-- .../localOptimizationFsqp.cpp | 302 ++++--- .../localOptimizationIpopt.cpp | 93 +- .../localOptimizationIpoptTNLP.cpp | 206 +++-- .../localOptimizationToms611.cpp | 177 ++-- .../minibatchOptimization.cpp | 276 +++--- .../multiStartOptimization.cpp | 124 +-- src/parpeoptimization/optimizationOptions.cpp | 207 ++--- src/parpeoptimization/optimizationProblem.cpp | 349 +++++--- .../optimizationResultWriter.cpp | 199 +++-- templates/main.cpp | 70 +- templates/main_debug.cpp | 37 +- templates/main_nominal.cpp | 44 +- templates/main_simulate.cpp | 42 +- .../parpeamici/amiciSimulationRunnerTest.cpp | 10 +- .../hierarchicalOptimizationTest.cpp | 545 ++++++------ .../parpeamici/multiConditionProblemTest.cpp | 1 - .../parpeamici/simulationResultWriterTest.cpp | 72 +- tests/parpecommon/commonTests.cpp | 180 ++-- tests/parpecommon/hdf5MiscTests.cpp | 82 +- tests/parpecommon/testingMisc.cpp | 99 ++- tests/parpecommon/testingMisc.h | 33 +- .../loadBalancerMasterTest.cpp | 77 +- .../localOptimizationCeresTest.cpp | 31 +- .../localOptimizationFidesTest.cpp | 29 +- .../localOptimizationFsqpTest.cpp | 36 +- .../localOptimizationIpoptTest.cpp | 49 +- .../localOptimizationToms611Test.cpp | 95 +- tests/parpeoptimization/main.cpp | 8 +- .../minibatchOptimizationTest.cpp | 134 +-- .../multiStartOptimizationTest.cpp | 2 +- .../optimizationOptionsTest.cpp | 52 +- .../optimizationProblemTest.cpp | 112 ++- .../optimizationResultWriterTest.cpp | 16 +- .../quadraticTestProblem.cpp | 106 +-- .../parpeoptimization/quadraticTestProblem.h | 178 ++-- 98 files changed, 6717 insertions(+), 5769 deletions(-) mode change 100755 => 100644 include/parpeoptimization/minibatchOptimization.h mode change 100755 => 100644 src/parpeoptimization/minibatchOptimization.cpp diff --git a/examples/parpeamici/steadystate/exampleSteadystateScaledTest.h b/examples/parpeamici/steadystate/exampleSteadystateScaledTest.h index 73e9f9a0e..3660c64bc 100644 --- a/examples/parpeamici/steadystate/exampleSteadystateScaledTest.h +++ b/examples/parpeamici/steadystate/exampleSteadystateScaledTest.h @@ -1,15 +1,15 @@ #include -#include -#include -#include #include +#include +#include +#include #include "../../../tests/parpecommon/testingMisc.h" #ifdef PARPE_ENABLE_IPOPT -#include #include +#include #include "wrapfunctions.h" @@ -19,8 +19,7 @@ class steadystateProblemTests : public ::testing::Test { -protected: - + protected: /* const std::vector t { 1.0e8 }; const std::vector k { 0.1, 0.4, 0.7, 1.0 }; @@ -29,23 +28,26 @@ class steadystateProblemTests : public ::testing::Test { 0.437977375496898, 0.033333333333333}; */ - const int scalingParameterIdx = 5; - const int offsetParameterIdx = 6; - const int scaledObservableIdx = 3; - const int offsettedObservableIdx = 4; - const std::vector t { 1.0e8 }; + int const scalingParameterIdx = 5; + int const offsetParameterIdx = 6; + int const scaledObservableIdx = 3; + int const offsettedObservableIdx = 4; + std::vector const t{1.0e8}; // const std::vector k { }; - //const std::vector p { 1.0, 0.5, 0.4, 2.0, 0.1, 0.2, 0.2, 0.2, 2.0, 0.2, 3.0, 0.2, 0.2 }; - const std::vector x0 { 0.1, 0.4, 0.7 }; - const std::vector xSteadystateExp {0.456644592142075, - 0.437977375496898, - 0.033333333333333}; - const std::vector yExp {0.456644592142075, - 0.437977375496898, - 0.033333333333333, - 2.0 * 0.456644592142075, - 3.0 + 0.437977375496898, - 0.456644592142075}; + // const std::vector p { 1.0, 0.5, 0.4, 2.0, 0.1, 0.2, 0.2, + // 0.2, 2.0, 0.2, 3.0, 0.2, 0.2 }; + std::vector const x0{0.1, 0.4, 0.7}; + std::vector const xSteadystateExp{ + 0.456644592142075, + 0.437977375496898, + 0.033333333333333}; + std::vector const yExp{ + 0.456644592142075, + 0.437977375496898, + 0.033333333333333, + 2.0 * 0.456644592142075, + 3.0 + 0.437977375496898, + 0.456644592142075}; }; TEST_F(steadystateProblemTests, testSteadystate) { @@ -61,13 +63,15 @@ TEST_F(steadystateProblemTests, testSteadystate) { auto rdata = amici::runAmiciSimulation(*solver, nullptr, *model); // verify steadystate concentrations - parpe::checkEqualArray(xSteadystateExp.data(), - rdata->x.data(), - static_cast(xSteadystateExp.size()), - 1e-5, 1e-5); + parpe::checkEqualArray( + xSteadystateExp.data(), + rdata->x.data(), + static_cast(xSteadystateExp.size()), + 1e-5, + 1e-5); // verify likelihood for matching measurement / simulation - amici::ExpData edata {*model}; + amici::ExpData edata{*model}; edata.setObservedData(yExp); edata.setObservedDataStdDev(std::vector(yExp.size(), 1.0)); rdata = amici::runAmiciSimulation(*solver, &edata, *model); @@ -75,8 +79,10 @@ TEST_F(steadystateProblemTests, testSteadystate) { EXPECT_EQ(rdata->status, amici::AMICI_SUCCESS); EXPECT_NEAR(1e-5, rdata->chi2, 1e-5); - EXPECT_NEAR(parpe::getLogLikelihoodOffset(edata.nt() * edata.nytrue()), - rdata->llh, 1e-5); + EXPECT_NEAR( + parpe::getLogLikelihoodOffset(edata.nt() * edata.nytrue()), + rdata->llh, + 1e-5); } TEST_F(steadystateProblemTests, testSteadystateMultiCond) { @@ -90,57 +96,62 @@ TEST_F(steadystateProblemTests, testSteadystateMultiCond) { model->setTimepoints(t); model->setInitialStates(x0); - //model->setParameters(p); + // model->setParameters(p); - parpe::MultiConditionDataProviderDefault dp(std::move(model), - modelNonOwning->getSolver()); + parpe::MultiConditionDataProviderDefault dp( + std::move(model), modelNonOwning->getSolver()); dp.edata_.emplace_back(amici::ExpData(*modelNonOwning)); dp.edata_[0].fixedParameters = modelNonOwning->getFixedParameters(); dp.edata_[0].setObservedData(yExp); dp.edata_[0].setObservedDataStdDev(std::vector(yExp.size(), 1.0)); - //parpe::AmiciSummedGradientFunction(&dp, nullptr); + // parpe::AmiciSummedGradientFunction(&dp, nullptr); parpe::MultiConditionProblem problem(&dp); double cost; problem.cost_fun_->evaluate(p, cost, gsl::span()); - EXPECT_NEAR(-parpe::getLogLikelihoodOffset( - dp.edata_[0].getObservedData().size()), cost, 1e-5); + EXPECT_NEAR( + -parpe::getLogLikelihoodOffset(dp.edata_[0].getObservedData().size()), + cost, + 1e-5); } - TEST_F(steadystateProblemTests, testSteadystateHierarchical) { // introduce scaling parameters auto model = amici::generic_model::getModel(); - //model->setFixedParameters(k); + // model->setFixedParameters(k); model->setInitialStates(x0); - //model->setParameters(p); + // model->setParameters(p); model->setTimepoints(t); auto modelNonOwning = model.get(); - const double scalingExp = 2.0; // scaling parameter - const double offsetExp = 2.0; // offset parameter - const std::vector pReduced { 1.0, 0.5, 0.4, 2.0, 0.1, /*2.0, 3.0,*/ 0.2, 4.0 }; + double const scalingExp = 2.0; // scaling parameter + double const offsetExp = 2.0; // offset parameter + std::vector const pReduced{ + 1.0, 0.5, 0.4, 2.0, 0.1, /*2.0, 3.0,*/ 0.2, 4.0}; auto yScaledExp = yExp; yScaledExp[scaledObservableIdx] = scalingExp * yExp[0]; yScaledExp[offsettedObservableIdx] = offsetExp + yExp[1]; - parpe::MultiConditionDataProviderDefault dp(std::move(model), modelNonOwning->getSolver()); + parpe::MultiConditionDataProviderDefault dp( + std::move(model), modelNonOwning->getSolver()); // x0? dp.edata_.emplace_back(amici::ExpData(*modelNonOwning)); dp.edata_[0].fixedParameters = modelNonOwning->getFixedParameters(); dp.edata_[0].setObservedData(yScaledExp); dp.edata_[0].setObservedDataStdDev(std::vector(yExp.size(), 1.0)); - //parpe::MultiConditionProblem problem(&dp); + // parpe::MultiConditionProblem problem(&dp); - auto scalings = std::make_unique(); + auto scalings = + std::make_unique(); scalings->conditionsForParameter.push_back({0}); scalings->optimizationParameterIndices.push_back(scalingParameterIdx); // x[scalingIdx][conditionIdx] -> std::vector of observableIndicies scalings->mapping.resize(1); scalings->mapping[0][0] = {scaledObservableIdx}; - auto offsets = std::make_unique(); + auto offsets = + std::make_unique(); offsets->conditionsForParameter.push_back({0}); offsets->optimizationParameterIndices.push_back(offsetParameterIdx); // x[scalingIdx][conditionIdx] -> std::vector of observableIndicies @@ -149,39 +160,40 @@ TEST_F(steadystateProblemTests, testSteadystateHierarchical) { auto sigmas = std::make_unique(); - auto gradFun = std::make_unique(&dp, nullptr, nullptr); - parpe::HierarchicalOptimizationWrapper hier(gradFun.get(), - std::move(scalings), - std::move(offsets), - std::move(sigmas), - dp.getNumberOfSimulationConditions(), - modelNonOwning->nytrue, - parpe::ErrorModel::normal); + auto gradFun = std::make_unique( + &dp, nullptr, nullptr); + parpe::HierarchicalOptimizationWrapper hier( + gradFun.get(), + std::move(scalings), + std::move(offsets), + std::move(sigmas), + dp.getNumberOfSimulationConditions(), + modelNonOwning->nytrue, + parpe::ErrorModel::normal); // TODO: need to adapt to changed model -// double cost; -// hier.evaluate(pReduced, cost, gsl::span(), nullptr, nullptr); -// EXPECT_NEAR(-parpe::getLogLikelihoodOffset( -// dp.edata[0].getObservedData().size()), cost, 1e-5); - -// const std::vector pFull { 1.0, 0.5, 0.4, 2.0, -// 0.1, scalingExp, offsetExp, 0.2, 4.0 }; -// hier.fun->evaluate(pFull, {0}, cost, gsl::span(), nullptr, nullptr); -// EXPECT_NEAR(-parpe::getLogLikelihoodOffset( -// dp.edata[0].getObservedData().size()), cost, 1e-5); + // double cost; + // hier.evaluate(pReduced, cost, gsl::span(), nullptr, nullptr); + // EXPECT_NEAR(-parpe::getLogLikelihoodOffset( + // dp.edata[0].getObservedData().size()), cost, 1e-5); + + // const std::vector pFull { 1.0, 0.5, 0.4, 2.0, + // 0.1, scalingExp, offsetExp, 0.2, 4.0 + // }; + // hier.fun->evaluate(pFull, {0}, cost, gsl::span(), nullptr, + // nullptr); EXPECT_NEAR(-parpe::getLogLikelihoodOffset( + // dp.edata[0].getObservedData().size()), cost, 1e-5); } - - -//TEST(steadystateProblemTests, testOptimizationHierarchical) { -// /* setup model & solver */ -// // introduce scaling parameters -// auto model = getModel(); -// //model->setFixedParameters(k); -// //model->setInitialStates(x0); -// //model->setParameters(p); -// model->setTimepoints(t); -// model->requireSensitivitiesForAllParameters(); -// auto modelNonOwning = model.get(); +// TEST(steadystateProblemTests, testOptimizationHierarchical) { +// /* setup model & solver */ +// // introduce scaling parameters +// auto model = getModel(); +// //model->setFixedParameters(k); +// //model->setInitialStates(x0); +// //model->setParameters(p); +// model->setTimepoints(t); +// model->requireSensitivitiesForAllParameters(); +// auto modelNonOwning = model.get(); // auto solver = model->getSolver(); // solver->setSensitivityMethod(amici::SensitivityMethod::adjoint); @@ -189,12 +201,14 @@ TEST_F(steadystateProblemTests, testSteadystateHierarchical) { // /* generate scaled data */ // const double scalingExp = 2.0; // scaling parameter // const double offsetExp = 2.0; // offset parameter -// const std::vector pReduced { 1.0, 0.5, 0.4, /*2.0, 0.1,*/ 2.0, 3.0, 0.2, 4.0 }; +// const std::vector pReduced { 1.0, 0.5, 0.4, /*2.0, +// 0.1,*/ 2.0, 3.0, 0.2, 4.0 }; // auto yScaledExp = yExp; // yScaledExp[scaledObservableIdx] = scalingExp * yExp[0]; // yScaledExp[offsettedObservableIdx] = offsetExp + yExp[1]; -// parpe::MultiConditionDataProviderDefault dp(std::move(model), std::move(solver)); +// parpe::MultiConditionDataProviderDefault dp(std::move(model), +// std::move(solver)); // // x0? // dp.edata.push_back(amici::ExpData(*modelNonOwning)); // dp.edata[0].fixedParameters = modelNonOwning->getFixedParameters(); @@ -204,7 +218,8 @@ TEST_F(steadystateProblemTests, testSteadystateHierarchical) { // /* setup hierarchical optimization */ // // one scaling parameter -// auto scalings = std::make_unique(); +// auto scalings = +// std::make_unique(); // scalings->conditionsForParameter.push_back({0}); // scalings->optimizationParameterIndices.push_back(scalingParameterIdx); // // x[scalingIdx][conditionIdx] -> std::vector of observableIndicies @@ -212,18 +227,21 @@ TEST_F(steadystateProblemTests, testSteadystateHierarchical) { // scalings->mapping[0][0] = {scaledObservableIdx}; // // analytical offset parameter -// auto offsets = std::make_unique(); +// auto offsets = +// std::make_unique(); // offsets->conditionsForParameter.push_back({0}); // offsets->optimizationParameterIndices.push_back(offsetParameterIdx); // // x[scalingIdx][conditionIdx] -> std::vector of observableIndicies // offsets->mapping.resize(1); // offsets->mapping[0][0] = {offsettedObservableIdx}; -// auto sigmas = std::make_unique(); +// auto sigmas = +// std::make_unique(); // // create wrapper -// auto gradFun = std::make_unique(&dp, nullptr, nullptr); -// auto hier = std::make_unique(std::move(gradFun), +// auto gradFun = std::make_unique(&dp, +// nullptr, nullptr); auto hier = +// std::make_unique(std::move(gradFun), // std::move(scalings), // std::move(offsets), // std::move(sigmas), @@ -234,14 +252,19 @@ TEST_F(steadystateProblemTests, testSteadystateHierarchical) { // // evaluate and ensure scaling factor is computed so that y_mes = y_sim // double cost; // hier->evaluate(pReduced, cost, gsl::span(), nullptr, nullptr); -// DOUBLES_EQUAL(-parpe::getLogLikelihoodOffset(dp.edata[0].getObservedData().size()), cost, 1e-5); +// DOUBLES_EQUAL(-parpe::getLogLikelihoodOffset(dp.edata[0].getObservedData().size()), +// cost, 1e-5); // const std::vector pFull { 1.0, 0.5, 0.4, 2.0, -// 0.1, scalingExp, offsetExp, 1.0, 0.2, 4.0 }; -// hier->fun->evaluate(pFull, {0}, cost, gsl::span(), nullptr, nullptr); -// DOUBLES_EQUAL(-parpe::getLogLikelihoodOffset(dp.edata[0].getObservedData().size()), cost, 1e-5); - -// parpe::OptimizationProblemImpl problem(std::move(hier), std::make_unique()); +// 0.1, scalingExp, offsetExp, 1.0, +// 0.2, 4.0 }; +// hier->fun->evaluate(pFull, {0}, cost, gsl::span(), nullptr, +// nullptr); +// DOUBLES_EQUAL(-parpe::getLogLikelihoodOffset(dp.edata[0].getObservedData().size()), +// cost, 1e-5); + +// parpe::OptimizationProblemImpl problem(std::move(hier), +// std::make_unique()); // // std::vector startingPoint = pReduced; // // for(auto& pp : startingPoint) // // pp += 1; @@ -256,7 +279,8 @@ TEST_F(steadystateProblemTests, testSteadystateHierarchical) { // //auto result = optimizer.optimize(&problem); // // check status, cost, parameter // //CHECK_EQUAL(1, std::get<0>(result)); -// //DOUBLES_EQUAL(-parpe::getLogLikelihoodOffset(dp.edata[0].my.size()), std::get<1>(result), 1e-5); +// //DOUBLES_EQUAL(-parpe::getLogLikelihoodOffset(dp.edata[0].my.size()), +// std::get<1>(result), 1e-5); // //DOUBLES_EQUAL(-1.0, std::get<2>(result).at(0), 1e-8); // //std::cout<(result); diff --git a/examples/parpeamici/steadystate/main.cpp b/examples/parpeamici/steadystate/main.cpp index e3f3f8993..e2c13dd1b 100644 --- a/examples/parpeamici/steadystate/main.cpp +++ b/examples/parpeamici/steadystate/main.cpp @@ -2,27 +2,30 @@ #include -#include -#include #include +#include +#include /** * @file * - * This is an example for parameter estimation for the Steadystate ODE example model included in AMICI. - * It demonstrates how to use IpOpt or CERES to solve a ODE-constrained optimization problem for which - * the ODE system has been implemented in AMICI. - * For cases where the ODE has to be evaluated several times per objective function evaluation - * see examples example_steadystate_parallel and example_steadystate_multicondition. + * This is an example for parameter estimation for the Steadystate ODE example + * model included in AMICI. It demonstrates how to use IpOpt or CERES to solve a + * ODE-constrained optimization problem for which the ODE system has been + * implemented in AMICI. For cases where the ODE has to be evaluated several + * times per objective function evaluation see examples + * example_steadystate_parallel and example_steadystate_multicondition. */ -int main(int argc, char **argv) { - if(argc != 2) { - std::cerr<<"Error: wrong number of arguments. Exactly one argument for data file expected."; +int main(int argc, char** argv) { + if (argc != 2) { + std::cerr << "Error: wrong number of arguments. Exactly one argument " + "for data file expected."; return EXIT_FAILURE; } - std::string dataFileName = argv[1]; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) + std::string dataFileName = + argv[1]; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic) ExampleSteadystateProblem problem(dataFileName); parpe::OptimizationOptions options = problem.getOptimizationOptions(); @@ -43,7 +46,6 @@ int main(int argc, char **argv) { status += parpe::getLocalOptimum(&problem); - #ifdef PARPE_DLIB_ENABLED printf("#########\n"); printf("# Dlib #\n"); @@ -69,17 +71,16 @@ int main(int argc, char **argv) { return status; } -//TODO - +// TODO -//void ExampleSteadystateProblem::logOptimizerFinished( -// double optimalCost, const double *optimalParameters, double masterTime, -// int exitStatus) { -// printf("Minimal cost: %f\n", optimalCost); -// printf("Optimal parameters : "); -// printArray(optimalParameters, model->np); -// printf("\n"); -// printf("True parameters were: "); +// void ExampleSteadystateProblem::logOptimizerFinished( +// double optimalCost, const double *optimalParameters, double masterTime, +// int exitStatus) { +// printf("Minimal cost: %f\n", optimalCost); +// printf("Optimal parameters : "); +// printArray(optimalParameters, model->np); +// printf("\n"); +// printf("True parameters were: "); // hsize_t length; // double *ptrue; diff --git a/examples/parpeamici/steadystate/main_multicondition.cpp b/examples/parpeamici/steadystate/main_multicondition.cpp index e1d317708..ee317d274 100644 --- a/examples/parpeamici/steadystate/main_multicondition.cpp +++ b/examples/parpeamici/steadystate/main_multicondition.cpp @@ -5,12 +5,12 @@ #include #endif -#include -#include +#include #include #include -#include #include +#include +#include #include #include @@ -29,49 +29,48 @@ */ /** - * @brief The SteadystateApplication class subclasses parpe::OptimizationApplication - * which provides a frame for a standalone program to solve a multi-start local optimization - * problem. + * @brief The SteadystateApplication class subclasses + * parpe::OptimizationApplication which provides a frame for a standalone + * program to solve a multi-start local optimization problem. */ class SteadystateApplication : public parpe::OptimizationApplication { -public: + public: using OptimizationApplication::OptimizationApplication; ~SteadystateApplication() override = default; - void initProblem(std::string const& inFileArgument, - std::string const& outFileArgument) override - { + void initProblem( + std::string const& inFileArgument, + std::string const& outFileArgument) override { - // The same file should only be opened/created once, an then only be reopened + // The same file should only be opened/created once, an then only be + // reopened h5File = parpe::hdf5CreateFile(outFileArgument, true); logParPEVersion(h5File); dataProvider = std::make_unique( - amici::generic_model::getModel(), inFileArgument); + amici::generic_model::getModel(), inFileArgument); // read options from file - auto optimizationOptions = parpe::OptimizationOptions::fromHDF5( - dataProvider->getHdf5File()); + auto optimizationOptions = + parpe::OptimizationOptions::fromHDF5(dataProvider->getHdf5File()); - // Create one instance for the problem, one for the application for clear ownership + // Create one instance for the problem, one for the application for + // clear ownership auto multiCondProb = new parpe::MultiConditionProblem( - dataProvider.get(), - &loadBalancer, - std::make_unique(), - // TODO remove this resultwriter - std::make_unique( - h5File, - std::string("/multistarts/")) - ); + dataProvider.get(), + &loadBalancer, + std::make_unique(), + // TODO remove this resultwriter + std::make_unique( + h5File, std::string("/multistarts/"))); // If hierarchical optimization was requested, wrap the original problem - if(optimizationOptions->hierarchicalOptimization) { + if (optimizationOptions->hierarchicalOptimization) { problem.reset(new parpe::HierarchicalOptimizationProblemWrapper( - std::unique_ptr(multiCondProb), - dataProvider.get()) - ); + std::unique_ptr(multiCondProb), + dataProvider.get())); } else { problem.reset(multiCondProb); } @@ -79,17 +78,16 @@ class SteadystateApplication : public parpe::OptimizationApplication { problem->setOptimizationOptions(*optimizationOptions); // On master, copy input data to result file - if(parpe::getMpiRank() < 1) + if (parpe::getMpiRank() < 1) dataProvider->copyInputData(h5File); // TODO: we can set the correct start? auto ms = new parpe::MultiConditionProblemMultiStartOptimizationProblem( - dataProvider.get(), - problem->getOptimizationOptions(), - multiCondProb->getResultWriter(), - &loadBalancer, - std::make_unique() - ); + dataProvider.get(), + problem->getOptimizationOptions(), + multiCondProb->getResultWriter(), + &loadBalancer, + std::make_unique()); multiStartOptimizationProblem.reset(ms); } @@ -97,12 +95,12 @@ class SteadystateApplication : public parpe::OptimizationApplication { }; /** - * @brief The SteadystateLocalOptimizationApplication class overrides the multi-start optimization - * in the base class and performs only a single optimization run. This is mostly for debugging. + * @brief The SteadystateLocalOptimizationApplication class overrides the + * multi-start optimization in the base class and performs only a single + * optimization run. This is mostly for debugging. */ class SteadystateLocalOptimizationApplication : public SteadystateApplication { -public: - + public: using SteadystateApplication::SteadystateApplication; void runMaster() override { @@ -111,8 +109,7 @@ class SteadystateLocalOptimizationApplication : public SteadystateApplication { } }; - -int main(int argc, char **argv) { +int main(int argc, char** argv) { int status = EXIT_SUCCESS; // SteadystateLocalOptimizationApplication app(argc, argv); @@ -121,5 +118,3 @@ int main(int argc, char **argv) { return status; } - - diff --git a/examples/parpeamici/steadystate/main_simulator.cpp b/examples/parpeamici/steadystate/main_simulator.cpp index b59714a6d..73120e81a 100644 --- a/examples/parpeamici/steadystate/main_simulator.cpp +++ b/examples/parpeamici/steadystate/main_simulator.cpp @@ -18,10 +18,9 @@ namespace amici::generic_model { std::unique_ptr getModel(); } - void printUsage() { - std::cerr<<"Error: wrong number of arguments.\n"; - std::cerr<<"Usage: ... CONDITION_FILE_NAME CONDITION_FILE_PATH " + std::cerr << "Error: wrong number of arguments.\n"; + std::cerr << "Usage: ... CONDITION_FILE_NAME CONDITION_FILE_PATH " "PARAMETER_FILE_NAME PARAMETER_FILE_PATH " "OUTFILENAME OUTFILEPATH " "--at-optimum|--along-trajectory|--nominal " @@ -29,31 +28,31 @@ void printUsage() { // |--parameter-matrix=PATH-UNSUPPORTED } -int main(int argc, char **argv) { +int main(int argc, char** argv) { int status = EXIT_SUCCESS; - if(argc != 10) { + if (argc != 10) { printUsage(); return EXIT_FAILURE; } bool computeInner; - if(std::string(argv[argc -1]) == "--compute-inner") { + if (std::string(argv[argc - 1]) == "--compute-inner") { computeInner = true; - } else if(std::string(argv[argc -1]) == "--nocompute-inner") { + } else if (std::string(argv[argc - 1]) == "--nocompute-inner") { computeInner = false; } else { printUsage(); return EXIT_FAILURE; } - if(std::string(argv[argc -2]) == "--mpi") { + if (std::string(argv[argc - 2]) == "--mpi") { #ifdef PARPE_ENABLE_MPI MPI_Init(&argc, &argv); #else throw std::runtime_error("parPE was built without MPI support."); #endif - } else if(std::string(argv[argc -2]) == "--nompi") { + } else if (std::string(argv[argc - 2]) == "--nompi") { ; } else { printUsage(); @@ -70,14 +69,18 @@ int main(int argc, char **argv) { std::string simulationMode = argv[7]; SteadyStateMultiConditionDataProvider dp( - amici::generic_model::getModel(), - conditionFileName, - conditionFilePath); + amici::generic_model::getModel(), conditionFileName, conditionFilePath); - status = parpe::runSimulator(dp, simulationMode, - conditionFileName, conditionFilePath, - parameterFileName, parameterFilePath, - resultFileName, resultPath, computeInner); + status = parpe::runSimulator( + dp, + simulationMode, + conditionFileName, + conditionFilePath, + parameterFileName, + parameterFilePath, + resultFileName, + resultPath, + computeInner); parpe::finalizeMpiIfNeeded(); diff --git a/examples/parpeamici/steadystate/steadyStateMultiConditionDataprovider.cpp b/examples/parpeamici/steadystate/steadyStateMultiConditionDataprovider.cpp index e8fcd64cd..8d17b63af 100644 --- a/examples/parpeamici/steadystate/steadyStateMultiConditionDataprovider.cpp +++ b/examples/parpeamici/steadystate/steadyStateMultiConditionDataprovider.cpp @@ -6,29 +6,25 @@ #include SteadyStateMultiConditionDataProvider::SteadyStateMultiConditionDataProvider( - std::unique_ptr model, - std::string const& hdf5Filename, - std::string const& rootPath) - : MultiConditionDataProviderHDF5(std::move(model), hdf5Filename, rootPath) -{ + std::unique_ptr model, + std::string const& hdf5Filename, + std::string const& rootPath) + : MultiConditionDataProviderHDF5(std::move(model), hdf5Filename, rootPath) { solver_ = MultiConditionDataProviderHDF5::getModel()->getSolver(); setupModelAndSolver(); } - -std::unique_ptr SteadyStateMultiConditionDataProvider::getSolver() const -{ +std::unique_ptr +SteadyStateMultiConditionDataProvider::getSolver() const { return std::unique_ptr(solver_->clone()); } - void SteadyStateMultiConditionDataProvider::setupModelAndSolver() const { // calculate sensitivities for all parameters - //model.requireSensitivitiesForAllParameters(); + // model.requireSensitivitiesForAllParameters(); solver_->setSensitivityOrder(amici::SensitivityOrder::first); // solver_->setSensitivityMethod(amici::SensitivityMethod::adjoint); // solver_->setMaxSteps(10000); // solver_->setNewtonMaxSteps(40); } - diff --git a/examples/parpeamici/steadystate/steadyStateMultiConditionDataprovider.h b/examples/parpeamici/steadystate/steadyStateMultiConditionDataprovider.h index 698e54eff..f1e27099e 100644 --- a/examples/parpeamici/steadystate/steadyStateMultiConditionDataprovider.h +++ b/examples/parpeamici/steadystate/steadyStateMultiConditionDataprovider.h @@ -1,37 +1,38 @@ #ifndef STEADYSTATEMULTICONDITIONPROBLEM_H #define STEADYSTATEMULTICONDITIONPROBLEM_H +#include "steadystateProblem.h" #include #include -#include "steadystateProblem.h" -#include #include +#include -#include #include +#include /** * @brief The SteadyStateMultiConditionDataProvider class provides the interface - * to a HDF5 data file. Some non-default paths within the hdf5 file are set here. + * to a HDF5 data file. Some non-default paths within the hdf5 file are set + * here. */ class SteadyStateMultiConditionDataProvider : public parpe::MultiConditionDataProviderHDF5 { public: - SteadyStateMultiConditionDataProvider(std::unique_ptr model, - const std::string &hdf5Filename, - const std::string &rootPath = ""); + SteadyStateMultiConditionDataProvider( + std::unique_ptr model, + std::string const& hdf5Filename, + std::string const& rootPath = ""); std::unique_ptr getSolver() const override; ~SteadyStateMultiConditionDataProvider() override = default; -private: + private: void setupModelAndSolver() const; std::unique_ptr solver_; - }; #endif // STEADYSTATEMULTICONDITIONPROBLEM_H diff --git a/examples/parpeamici/steadystate/steadystateProblem.cpp b/examples/parpeamici/steadystate/steadystateProblem.cpp index 144d1e0e4..125ecf4bf 100644 --- a/examples/parpeamici/steadystate/steadystateProblem.cpp +++ b/examples/parpeamici/steadystate/steadystateProblem.cpp @@ -1,8 +1,8 @@ #include "steadystateProblem.h" #include -#include #include +#include #include "wrapfunctions.h" @@ -12,8 +12,8 @@ #include -ExampleSteadystateProblem::ExampleSteadystateProblem(const std::string &dataFileName) -{ +ExampleSteadystateProblem::ExampleSteadystateProblem( + std::string const& dataFileName) { [[maybe_unused]] auto lock = parpe::hdf5MutexGetLock(); file.openFile(dataFileName, H5F_ACC_RDONLY); @@ -23,25 +23,23 @@ ExampleSteadystateProblem::ExampleSteadystateProblem(const std::string &dataFile optimizationOptions.maxOptimizerIterations = 100; OptimizationProblem::setOptimizationOptions(optimizationOptions); - cost_fun_ = std::make_unique(file.getId()); + cost_fun_ = + std::make_unique(file.getId()); } -void ExampleSteadystateProblem::fillInitialParameters(gsl::span buffer) const -{ +void ExampleSteadystateProblem::fillInitialParameters( + gsl::span buffer) const { std::fill(buffer.begin(), buffer.end(), 0.0); - } -void ExampleSteadystateProblem::fillParametersMin(gsl::span buffer) const -{ +void ExampleSteadystateProblem::fillParametersMin( + gsl::span buffer) const { std::fill(buffer.begin(), buffer.end(), -5.0); - } -void ExampleSteadystateProblem::fillParametersMax(gsl::span buffer) const -{ +void ExampleSteadystateProblem::fillParametersMax( + gsl::span buffer) const { std::fill(buffer.begin(), buffer.end(), 5.0); - } void ExampleSteadystateGradientFunction::requireSensitivities( @@ -58,7 +56,8 @@ void ExampleSteadystateGradientFunction::requireSensitivities( void ExampleSteadystateGradientFunction::setupUserData(int conditionIdx) { hsize_t m = 0, n = 0; [[maybe_unused]] auto lock = parpe::hdf5MutexGetLock(); - model->setTimepoints(amici::hdf5::getDoubleDataset2D(fileId, "/parameters/t", m, n)); + model->setTimepoints( + amici::hdf5::getDoubleDataset2D(fileId, "/parameters/t", m, n)); // set model constants readFixedParameters(conditionIdx); @@ -74,44 +73,59 @@ void ExampleSteadystateGradientFunction::setupExpData(int conditionIdx) { readMeasurement(conditionIdx); } -std::vector ExampleSteadystateGradientFunction::getParameterIds() const -{ +std::vector +ExampleSteadystateGradientFunction::getParameterIds() const { return parpe::hdf5Read1dStringDataset(fileId, "/parameters/parameterNames"); } - -void ExampleSteadystateGradientFunction::readFixedParameters(int conditionIdx) const { +void ExampleSteadystateGradientFunction::readFixedParameters( + int conditionIdx) const { std::vector k(model->nk()); - parpe::hdf5Read2DDoubleHyperslab(fileId, "/fixedParameters/k", k.size(), - 1, 0, conditionIdx, k); + parpe::hdf5Read2DDoubleHyperslab( + fileId, "/fixedParameters/k", k.size(), 1, 0, conditionIdx, k); model->setFixedParameters(k); } -void ExampleSteadystateGradientFunction::readMeasurement(int conditionIdx) const { - edata->setObservedData( - parpe::hdf5Get3DDoubleHyperslab(fileId, "/measurements/y", - 1, edata->nt(), edata->nytrue(), - conditionIdx, 0, 0)); - edata->setObservedDataStdDev( - parpe::hdf5Get3DDoubleHyperslab(fileId, "/measurements/ysigma", - 1, edata->nt(), edata->nytrue(), - conditionIdx, 0, 0)); +void ExampleSteadystateGradientFunction::readMeasurement( + int conditionIdx) const { + edata->setObservedData(parpe::hdf5Get3DDoubleHyperslab( + fileId, + "/measurements/y", + 1, + edata->nt(), + edata->nytrue(), + conditionIdx, + 0, + 0)); + edata->setObservedDataStdDev(parpe::hdf5Get3DDoubleHyperslab( + fileId, + "/measurements/ysigma", + 1, + edata->nt(), + edata->nytrue(), + conditionIdx, + 0, + 0)); } -ExampleSteadystateGradientFunction::ExampleSteadystateGradientFunction(hid_t fileId) - : fileId(fileId), model(amici::generic_model::getModel()), solver(model->getSolver()) -{ +ExampleSteadystateGradientFunction::ExampleSteadystateGradientFunction( + hid_t fileId) + : fileId(fileId) + , model(amici::generic_model::getModel()) + , solver(model->getSolver()) { setupUserData(0); setupExpData(0); } parpe::FunctionEvaluationStatus ExampleSteadystateGradientFunction::evaluate( - gsl::span parameters, double &fval, - gsl::span gradient, parpe::Logger * /*logger*/, - double * /*cpuTime*/) const -{ + gsl::span parameters, + double& fval, + gsl::span gradient, + parpe::Logger* /*logger*/, + double* /*cpuTime*/) const { - model->setParameters(std::vector(parameters.begin(), parameters.end())); + model->setParameters( + std::vector(parameters.begin(), parameters.end())); // printArray(parameters, udata->np);printf("\n"); @@ -125,10 +139,10 @@ parpe::FunctionEvaluationStatus ExampleSteadystateGradientFunction::evaluate( for (int i = 0; i < model->np(); ++i) gradient[i] = -rdata->sllh[i]; - return rdata->status == 0 ? parpe::functionEvaluationSuccess : parpe::functionEvaluationFailure; + return rdata->status == 0 ? parpe::functionEvaluationSuccess + : parpe::functionEvaluationFailure; } -int ExampleSteadystateGradientFunction::numParameters() const -{ +int ExampleSteadystateGradientFunction::numParameters() const { return model->np(); } diff --git a/examples/parpeamici/steadystate/steadystateProblem.h b/examples/parpeamici/steadystate/steadystateProblem.h index e39dc665d..99994c02a 100644 --- a/examples/parpeamici/steadystate/steadystateProblem.h +++ b/examples/parpeamici/steadystate/steadystateProblem.h @@ -7,21 +7,21 @@ #include - /** * @brief Cost function for the AMICI steady-state example */ class ExampleSteadystateGradientFunction : public parpe::GradientFunction { -public: + public: explicit ExampleSteadystateGradientFunction(hid_t fileId); using GradientFunction::evaluate; parpe::FunctionEvaluationStatus evaluate( - gsl::span parameters, - double &fval, - gsl::span gradient, - parpe::Logger *logger, double *cpuTime) const override; + gsl::span parameters, + double& fval, + gsl::span gradient, + parpe::Logger* logger, + double* cpuTime) const override; int numParameters() const override; void setupUserData(int conditionIdx); @@ -29,7 +29,7 @@ class ExampleSteadystateGradientFunction : public parpe::GradientFunction { std::vector getParameterIds() const override; -private: + private: void requireSensitivities(bool sensitivitiesRequired) const; void readFixedParameters(int conditionIdx) const; void readMeasurement(int conditionIdx) const; @@ -41,7 +41,6 @@ class ExampleSteadystateGradientFunction : public parpe::GradientFunction { std::unique_ptr solver; }; - /** * @brief Optimization problem for the AMICI steady-state example */ @@ -54,7 +53,7 @@ class ExampleSteadystateProblem : public parpe::OptimizationProblem { void fillParametersMin(gsl::span buffer) const override; void fillParametersMax(gsl::span buffer) const override; -private: + private: H5::H5File file; }; diff --git a/examples/parpeamici/steadystate/steadystateProblemParallel.cpp b/examples/parpeamici/steadystate/steadystateProblemParallel.cpp index d264b11f1..a38cfcae0 100644 --- a/examples/parpeamici/steadystate/steadystateProblemParallel.cpp +++ b/examples/parpeamici/steadystate/steadystateProblemParallel.cpp @@ -11,34 +11,45 @@ #include -ExampleSteadystateGradientFunctionParallel::ExampleSteadystateGradientFunctionParallel(parpe::LoadBalancerMaster *loadBalancer, const std::string &dataFileName) - :loadBalancer(loadBalancer), - model(std::unique_ptr(getModel())) -{ +ExampleSteadystateGradientFunctionParallel:: + ExampleSteadystateGradientFunctionParallel( + parpe::LoadBalancerMaster* loadBalancer, + std::string const& dataFileName) + : loadBalancer(loadBalancer) + , model(std::unique_ptr(getModel())) { dataProvider = std::make_unique( - std::unique_ptr(model->clone()), - dataFileName); + std::unique_ptr(model->clone()), dataFileName); numConditions = dataProvider->getNumberOfSimulationConditions(); } -parpe::FunctionEvaluationStatus ExampleSteadystateGradientFunctionParallel::evaluate(gsl::span parameters, double &fval, gsl::span gradient, parpe::Logger *logger, double *cpuTime) const +parpe::FunctionEvaluationStatus +ExampleSteadystateGradientFunctionParallel::evaluate( + gsl::span parameters, + double& fval, + gsl::span gradient, + parpe::Logger* logger, + double* cpuTime) const { if (parpe::getMpiCommSize() > 1) { - return evaluateParallel(parameters, fval, gradient) ? parpe::functionEvaluationFailure : parpe::functionEvaluationSuccess; + return evaluateParallel(parameters, fval, gradient) + ? parpe::functionEvaluationFailure + : parpe::functionEvaluationSuccess; } else { - return evaluateSerial(parameters, fval, gradient) ? parpe::functionEvaluationFailure : parpe::functionEvaluationSuccess; + return evaluateSerial(parameters, fval, gradient) + ? parpe::functionEvaluationFailure + : parpe::functionEvaluationSuccess; } } -int ExampleSteadystateGradientFunctionParallel::numParameters() const -{ +int ExampleSteadystateGradientFunctionParallel::numParameters() const { return model->np(); } -int ExampleSteadystateGradientFunctionParallel::evaluateParallel(gsl::span parameters, - double &objFunVal, - gsl::span objFunGrad) const { +int ExampleSteadystateGradientFunctionParallel::evaluateParallel( + gsl::span parameters, + double& objFunVal, + gsl::span objFunGrad) const { // TODO: always computes gradient; ignores simulation status // create load balancer job for each simulation @@ -51,7 +62,7 @@ int ExampleSteadystateGradientFunctionParallel::evaluateParallel(gsl::spanjobDone = &numJobsFinished; job->jobDoneChangedCondition = &simulationsCond; job->jobDoneChangedMutex = &simulationsMutex; @@ -60,9 +71,12 @@ int ExampleSteadystateGradientFunctionParallel::evaluateParallel(gsl::spansendBuffer.data(), &i, sizeof(int)); - memcpy(job->sendBuffer.data() + sizeof(int), &needGradient, sizeof(int)); - memcpy(job->sendBuffer.data() + 2 * sizeof(int), parameters.data(), - model->np() * sizeof(double)); + memcpy( + job->sendBuffer.data() + sizeof(int), &needGradient, sizeof(int)); + memcpy( + job->sendBuffer.data() + 2 * sizeof(int), + parameters.data(), + model->np() * sizeof(double)); loadBalancer->queueJob(job); } @@ -84,7 +98,7 @@ int ExampleSteadystateGradientFunctionParallel::evaluateParallel(gsl::span parameters, - double &objFunVal, - gsl::span objFunGrad) const { +int ExampleSteadystateGradientFunctionParallel::evaluateSerial( + gsl::span parameters, + double& objFunVal, + gsl::span objFunGrad) const { int status = 0; - model->setParameters(std::vector(parameters.begin(), parameters.end())); + model->setParameters( + std::vector(parameters.begin(), parameters.end())); // printArray(parameters, udata->np);printf("\n"); objFunVal = 0; @@ -131,17 +147,19 @@ int ExampleSteadystateGradientFunctionParallel::evaluateSerial(gsl::span &buffer, - int jobId) { +void ExampleSteadystateGradientFunctionParallel::messageHandler( + std::vector& buffer, + int jobId) { // int mpiRank; // MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); - // parpe::logmessage(parpe::LOGLVL_DEBUG, "Worker #%d: Job #%d received.", mpiRank, jobId); + // parpe::logmessage(parpe::LOGLVL_DEBUG, "Worker #%d: Job #%d + // received.", mpiRank, jobId); // unpack parameters int conditionIdx = *(int*)buffer.data(); int needGradient = *(int*)(buffer.data() + sizeof(int)); - double *pstart = reinterpret_cast(buffer.data() + 2 * sizeof(int)); + double* pstart = reinterpret_cast(buffer.data() + 2 * sizeof(int)); model->setParameters(std::vector(pstart, pstart + model->np())); // read data for current conditions @@ -160,11 +178,10 @@ void ExampleSteadystateGradientFunctionParallel::messageHandler(std::vectorllh); // pack results buffer.resize(sizeof(double) * (model->nplist() + 1)); - double *doubleBuffer = (double *) buffer.data(); + double* doubleBuffer = (double*)buffer.data(); doubleBuffer[0] = rdata->llh; if (needGradient) for (int i = 0; i < model->nplist(); ++i) doubleBuffer[1 + i] = rdata->sllh[i]; - } diff --git a/examples/parpeamici/steadystate/steadystateProblemParallel.h b/examples/parpeamici/steadystate/steadystateProblemParallel.h index 279796395..b3bd09ca8 100644 --- a/examples/parpeamici/steadystate/steadystateProblemParallel.h +++ b/examples/parpeamici/steadystate/steadystateProblemParallel.h @@ -10,39 +10,46 @@ #include /** - * @brief The ExampleSteadystateGradientFunctionParallel class evaluates an ODE-constrained objective function in paralell. + * @brief The ExampleSteadystateGradientFunctionParallel class evaluates an + * ODE-constrained objective function in paralell. */ -class ExampleSteadystateGradientFunctionParallel : public parpe::GradientFunction { -public: - ExampleSteadystateGradientFunctionParallel(parpe::LoadBalancerMaster *loadBalancer, const std::string &dataFileName); +class ExampleSteadystateGradientFunctionParallel + : public parpe::GradientFunction { + public: + ExampleSteadystateGradientFunctionParallel( + parpe::LoadBalancerMaster* loadBalancer, + std::string const& dataFileName); parpe::FunctionEvaluationStatus evaluate( - gsl::span parameters, - double &fval, - gsl::span gradient, parpe::Logger* logger, double *cpuTime) const override; + gsl::span parameters, + double& fval, + gsl::span gradient, + parpe::Logger* logger, + double* cpuTime) const override; int numParameters() const override; void setupUserData(int conditionIdx); void setupExpData(int conditionIdx); - void messageHandler(std::vector &buffer, int jobId); + void messageHandler(std::vector& buffer, int jobId); + private: + int evaluateParallel( + gsl::span parameters, + double& objFunVal, + gsl::span objFunGrad) const; -private: - - int evaluateParallel(gsl::span parameters, double &objFunVal, - gsl::span objFunGrad) const; - - int evaluateSerial(gsl::span parameters, double &objFunVal, - gsl::span objFunGrad) const; - + int evaluateSerial( + gsl::span parameters, + double& objFunVal, + gsl::span objFunGrad) const; void requireSensitivities(bool sensitivitiesRequired) const; void readFixedParameters(int conditionIdx) const; void readMeasurement(int conditionIdx) const; - parpe::LoadBalancerMaster *loadBalancer = nullptr; // non-owning + parpe::LoadBalancerMaster* loadBalancer = nullptr; // non-owning std::unique_ptr edata; std::unique_ptr model; diff --git a/examples/parpeloadbalancer/main.cpp b/examples/parpeloadbalancer/main.cpp index 430f54540..011ff22e5 100644 --- a/examples/parpeloadbalancer/main.cpp +++ b/examples/parpeloadbalancer/main.cpp @@ -1,11 +1,11 @@ #include #include -#include +#include #include -#include +#include #include -#include +#include #include @@ -35,24 +35,25 @@ int master() { std::mutex mutex; for (int i = 0; i < numJobs; ++i) { - parpe::JobData *job = &jobdata[i]; + parpe::JobData* job = &jobdata[i]; job->jobDone = &numJobsFinished; job->jobDoneChangedCondition = &cond; job->jobDoneChangedMutex = &mutex; job->sendBuffer.resize(sizeof(double)); - *(double *)job->sendBuffer.data() = i; + *(double*)job->sendBuffer.data() = i; lbm.queueJob(job); } // wait for simulations to finish std::unique_lock lock(mutex); - cond.wait(lock, [&numJobsFinished, &numJobs]{ - return numJobsFinished == numJobs;}); + cond.wait(lock, [&numJobsFinished, &numJobs] { + return numJobsFinished == numJobs; + }); // check results int errors = 0; for (int i = 0; i < numJobs; ++i) { - auto buffer = (double *)(jobdata[i].recvBuffer.data()); + auto buffer = (double*)(jobdata[i].recvBuffer.data()); if (*buffer != 2 * i) printf("ERROR: %d was %f\n", i, *buffer); @@ -69,16 +70,16 @@ int master() { * @param buffer * @param jobId */ -void duplicatingMessageHandler(std::vector &buffer, int /*jobId*/) { +void duplicatingMessageHandler(std::vector& buffer, int /*jobId*/) { // read message - double value = *reinterpret_cast(buffer.data()); + double value = *reinterpret_cast(buffer.data()); // printf("Received %f\n", value); // sleep(1); // prepare result buffer.resize(sizeof(double)); - auto result = reinterpret_cast(buffer.data()); + auto result = reinterpret_cast(buffer.data()); *result = value * 2; // printf("Sending %f\n", *result); } @@ -88,7 +89,7 @@ void worker() { lbw.run(duplicatingMessageHandler); } -int main(int argc, char **argv) { +int main(int argc, char** argv) { int status = 0; MPI_Init(&argc, &argv); diff --git a/include/parpeamici/amiciMisc.h b/include/parpeamici/amiciMisc.h index f80eb05b1..fc0e224a7 100644 --- a/include/parpeamici/amiciMisc.h +++ b/include/parpeamici/amiciMisc.h @@ -10,10 +10,10 @@ using amici::getUnscaledParameter; using amici::getScaledParameter; -std::unique_ptr -run_amici_simulation(amici::Solver& solver, - amici::ExpData const* edata, - amici::Model& model, - bool rethrow = false, - Logger* logger = nullptr); -} +std::unique_ptr run_amici_simulation( + amici::Solver& solver, + amici::ExpData const* edata, + amici::Model& model, + bool rethrow = false, + Logger* logger = nullptr); +} // namespace parpe diff --git a/include/parpeamici/amiciSimulationRunner.h b/include/parpeamici/amiciSimulationRunner.h index 2710c8a61..0922dcef9 100644 --- a/include/parpeamici/amiciSimulationRunner.h +++ b/include/parpeamici/amiciSimulationRunner.h @@ -13,10 +13,10 @@ #include #include +#include #include -#include #include -#include +#include #include #include @@ -38,17 +38,15 @@ using LoadBalancerMaster = int; * @brief The AmiciSimulationRunner class queues AMICI simulations, waits for * the results and calls a user-provided aggregation function */ -class AmiciSimulationRunner -{ +class AmiciSimulationRunner { public: using messageHandlerFunc = - std::function& buffer, int jobId)>; + std::function& buffer, int jobId)>; /** * @brief Data to be sent to a worker to run a simulation */ - struct AmiciWorkPackageSimple - { + struct AmiciWorkPackageSimple { AmiciWorkPackageSimple() = default; std::vector optimizationParameters; amici::SensitivityOrder sensitivityOrder; @@ -60,8 +58,7 @@ class AmiciSimulationRunner /** * @brief Result from a single AMICI simulation */ - struct AmiciResultPackageSimple - { + struct AmiciResultPackageSimple { AmiciResultPackageSimple() = default; double llh; double simulationTimeSeconds; @@ -89,12 +86,13 @@ class AmiciSimulationRunner * completed. May be nullptr. * @param logPrefix */ - AmiciSimulationRunner(const std::vector& optimizationParameters, - amici::SensitivityOrder sensitivityOrder, - const std::vector& conditionIndices, - callbackJobFinishedType callbackJobFinished = nullptr, - callbackAllFinishedType aggregate = nullptr, - std::string logPrefix = ""); + AmiciSimulationRunner( + std::vector const& optimizationParameters, + amici::SensitivityOrder sensitivityOrder, + std::vector const& conditionIndices, + callbackJobFinishedType callbackJobFinished = nullptr, + callbackAllFinishedType aggregate = nullptr, + std::string logPrefix = ""); AmiciSimulationRunner(AmiciSimulationRunner const& other) = delete; @@ -105,8 +103,9 @@ class AmiciSimulationRunner * @param maxSimulationsPerPackage * @return */ - int runDistributedMemory(LoadBalancerMaster* loadBalancer, - const int maxSimulationsPerPackage = 1); + int runDistributedMemory( + LoadBalancerMaster* loadBalancer, + int const maxSimulationsPerPackage = 1); #endif /** @@ -116,20 +115,22 @@ class AmiciSimulationRunner * @param sequential Run sequential (not in parallel) * @return */ - int runSharedMemory(const messageHandlerFunc& messageHandler, - bool sequential = false); + int runSharedMemory( + messageHandlerFunc const& messageHandler, + bool sequential = false); private: #ifdef PARPE_ENABLE_MPI - void queueSimulation(LoadBalancerMaster* loadBalancer, - JobData* d, - int* jobDone, - std::condition_variable* jobDoneChangedCondition, - std::mutex* jobDoneChangedMutex, - int jobIdx, - const std::vector& optimizationParameters, - amici::SensitivityOrder sensitivityOrder, - const std::vector& conditionIndices) const; + void queueSimulation( + LoadBalancerMaster* loadBalancer, + JobData* d, + int* jobDone, + std::condition_variable* jobDoneChangedCondition, + std::mutex* jobDoneChangedMutex, + int jobIdx, + const std::vector& optimizationParameters, + amici::SensitivityOrder sensitivityOrder, + const std::vector& conditionIndices) const; #endif std::vector const& optimization_parameters_; @@ -142,43 +143,41 @@ class AmiciSimulationRunner std::string log_prefix_; }; -void -swap(AmiciSimulationRunner::AmiciResultPackageSimple& first, - AmiciSimulationRunner::AmiciResultPackageSimple& second) noexcept; +void swap( + AmiciSimulationRunner::AmiciResultPackageSimple& first, + AmiciSimulationRunner::AmiciResultPackageSimple& second) noexcept; -bool -operator==(AmiciSimulationRunner::AmiciResultPackageSimple const& lhs, - AmiciSimulationRunner::AmiciResultPackageSimple const& rhs); +bool operator==( + AmiciSimulationRunner::AmiciResultPackageSimple const& lhs, + AmiciSimulationRunner::AmiciResultPackageSimple const& rhs); } // namespace parpe namespace boost::serialization { -template -void -serialize(Archive& ar, - parpe::AmiciSimulationRunner::AmiciWorkPackageSimple& u, - const unsigned int /*version*/) -{ - ar& u.optimizationParameters; - ar& u.sensitivityOrder; - ar& u.conditionIndices; - ar& u.logPrefix; +template +void serialize( + Archive& ar, + parpe::AmiciSimulationRunner::AmiciWorkPackageSimple& u, + unsigned int const /*version*/) { + ar & u.optimizationParameters; + ar & u.sensitivityOrder; + ar & u.conditionIndices; + ar & u.logPrefix; } -template -void -serialize(Archive& ar, - parpe::AmiciSimulationRunner::AmiciResultPackageSimple& u, - const unsigned int /*version*/) -{ - ar& u.llh; - ar& u.simulationTimeSeconds; - ar& u.gradient; - ar& u.modelOutput; - ar& u.modelSigmas; - ar& u.modelStates; - ar& u.status; +template +void serialize( + Archive& ar, + parpe::AmiciSimulationRunner::AmiciResultPackageSimple& u, + unsigned int const /*version*/) { + ar & u.llh; + ar & u.simulationTimeSeconds; + ar & u.gradient; + ar & u.modelOutput; + ar & u.modelSigmas; + ar & u.modelStates; + ar & u.status; } } // namespace boost::serialization diff --git a/include/parpeamici/hierarchicalOptimization.h b/include/parpeamici/hierarchicalOptimization.h index 6620d93a1..e6d890f7d 100644 --- a/include/parpeamici/hierarchicalOptimization.h +++ b/include/parpeamici/hierarchicalOptimization.h @@ -9,13 +9,9 @@ #include - namespace parpe { -enum class ErrorModel -{ - normal -}; // TODO logNormal, Laplace +enum class ErrorModel { normal }; // TODO logNormal, Laplace class AnalyticalParameterProvider; class AnalyticalParameterHdf5Reader; @@ -32,8 +28,7 @@ class HierarchicalOptimizationWrapper; * Computes the negative log likelihood for normally distributed measurement * (others to be added). */ -class HierarchicalOptimizationWrapper : public GradientFunction -{ +class HierarchicalOptimizationWrapper : public GradientFunction { public: /** * @brief For testing @@ -43,7 +38,7 @@ class HierarchicalOptimizationWrapper : public GradientFunction * @param numTimepoints */ HierarchicalOptimizationWrapper( - AmiciSummedGradientFunction *wrapped_function, + AmiciSummedGradientFunction* wrapped_function, int numConditions = 0, int numObservables = 0); @@ -57,9 +52,9 @@ class HierarchicalOptimizationWrapper : public GradientFunction * @param errorModel */ HierarchicalOptimizationWrapper( - AmiciSummedGradientFunction *wrapped_function, + AmiciSummedGradientFunction* wrapped_function, const H5::H5File& file, - const std::string& hdf5RootPath, + std::string const& hdf5RootPath, int numConditions, int numObservables, ErrorModel errorModel); @@ -75,7 +70,7 @@ class HierarchicalOptimizationWrapper : public GradientFunction * @param errorModel */ HierarchicalOptimizationWrapper( - AmiciSummedGradientFunction *wrapped_function, + AmiciSummedGradientFunction* wrapped_function, std::unique_ptr scalingReader, std::unique_ptr offsetReader, std::unique_ptr sigmaReader, @@ -85,19 +80,21 @@ class HierarchicalOptimizationWrapper : public GradientFunction using GradientFunction::evaluate; - FunctionEvaluationStatus evaluate(gsl::span parameters, - double& fval, - gsl::span gradient, - Logger* logger, - double* cpuTime) const override; + FunctionEvaluationStatus evaluate( + gsl::span parameters, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime) const override; - FunctionEvaluationStatus evaluate(gsl::span reducedParameters, - double& fval, - gsl::span gradient, - std::vector& fullParameters, - std::vector& fullGradient, - Logger* logger, - double* cpuTime) const; + FunctionEvaluationStatus evaluate( + gsl::span reducedParameters, + double& fval, + gsl::span gradient, + std::vector& fullParameters, + std::vector& fullGradient, + Logger* logger, + double* cpuTime) const; /** * @brief Get parameters for initial function evaluation @@ -121,10 +118,11 @@ class HierarchicalOptimizationWrapper : public GradientFunction * @return Vector of double vectors containing AMICI ReturnData::y (nt x ny, * column-major) */ - [[nodiscard]] std::tuple>, - std::vector>> + [[nodiscard]] std::tuple< + std::vector>, + std::vector>> getUnscaledModelOutputsAndSigmas( - const gsl::span reducedParameters, + gsl::span const reducedParameters, Logger* logger, double* cpuTime) const; @@ -147,12 +145,12 @@ class HierarchicalOptimizationWrapper : public GradientFunction * @return the computed offset parameters */ [[nodiscard]] std::vector computeAnalyticalOffsets( - const std::vector>& measurements, + std::vector> const& measurements, std::vector>& modelOutputsUnscaled) const; [[nodiscard]] std::vector computeAnalyticalSigmas( std::vector> const& measurements, - const std::vector>& modelOutputsScaled) const; + std::vector> const& modelOutputsScaled) const; void applyOptimalOffsets( std::vector const& offsetParameters, @@ -166,7 +164,7 @@ class HierarchicalOptimizationWrapper : public GradientFunction */ void fillInAnalyticalSigmas( std::vector>& allSigmas, - const std::vector& analyticalSigmas) const; + std::vector const& analyticalSigmas) const; /** * @brief Evaluate `fun` using the computed optimal scaling and offset @@ -187,9 +185,9 @@ class HierarchicalOptimizationWrapper : public GradientFunction std::vector const& sigmas, std::vector> const& measurements, std::vector> const& modelOutputsScaled, - std::vector > &fullSigmaMatrices, + std::vector>& fullSigmaMatrices, double& fval, - const gsl::span gradient, + gsl::span const gradient, std::vector& fullGradient, Logger* logger, double* cpuTime) const; @@ -202,7 +200,8 @@ class HierarchicalOptimizationWrapper : public GradientFunction [[nodiscard]] int numProportionalityFactors() const; - [[nodiscard]] std::vector const& getProportionalityFactorIndices() const; + [[nodiscard]] std::vector const& + getProportionalityFactorIndices() const; [[nodiscard]] int numOffsetParameters() const; @@ -221,7 +220,7 @@ class HierarchicalOptimizationWrapper : public GradientFunction private: void init(); /** Objective function of inner optimization problem */ - AmiciSummedGradientFunction *wrapped_function_; + AmiciSummedGradientFunction* wrapped_function_; /** Reads scaling parameter information from HDF5 file */ std::unique_ptr scalingReader; @@ -246,21 +245,19 @@ class HierarchicalOptimizationWrapper : public GradientFunction ErrorModel errorModel = ErrorModel::normal; }; - /** * @brief The HierarchicalOptimizationProblemWrapper class wraps an * OptimizationProblem and hides the analytically optimized parameters (from * starting point, parameter bounds, ...) * */ -class HierarchicalOptimizationProblemWrapper : public OptimizationProblem -{ +class HierarchicalOptimizationProblemWrapper : public OptimizationProblem { public: HierarchicalOptimizationProblemWrapper() = default; HierarchicalOptimizationProblemWrapper( std::unique_ptr problemToWrap, - const MultiConditionDataProviderHDF5* dataProvider); + MultiConditionDataProviderHDF5 const* dataProvider); HierarchicalOptimizationProblemWrapper( std::unique_ptr problemToWrap, @@ -276,15 +273,14 @@ class HierarchicalOptimizationProblemWrapper : public OptimizationProblem void fillParametersMax(gsl::span buffer) const override; - void fillFilteredParams(std::vector const& fullParams, - gsl::span buffer) const; + void fillFilteredParams( + std::vector const& fullParams, + gsl::span buffer) const; - OptimizationOptions const& getOptimizationOptions() const override - { + OptimizationOptions const& getOptimizationOptions() const override { return wrapped_problem_->getOptimizationOptions(); } - void setOptimizationOptions(OptimizationOptions const& options) override - { + void setOptimizationOptions(OptimizationOptions const& options) override { wrapped_problem_->setOptimizationOptions(options); } @@ -301,8 +297,7 @@ class HierarchicalOptimizationProblemWrapper : public OptimizationProblem * parameters of the inner optimization problem on each function evaluation * which would be hidden from the (outer) optimizer otherwise. */ -class HierarchicalOptimizationReporter : public OptimizationReporter -{ +class HierarchicalOptimizationReporter : public OptimizationReporter { public: HierarchicalOptimizationReporter( HierarchicalOptimizationWrapper* gradFun, @@ -311,28 +306,30 @@ class HierarchicalOptimizationReporter : public OptimizationReporter using GradientFunction::evaluate; - FunctionEvaluationStatus evaluate(gsl::span parameters, - double& fval, - gsl::span gradient, - Logger* logger = nullptr, - double* cpuTime = nullptr) const override; + FunctionEvaluationStatus evaluate( + gsl::span parameters, + double& fval, + gsl::span gradient, + Logger* logger = nullptr, + double* cpuTime = nullptr) const override; // bool starting(gsl::span initialParameters) const override; // TODO: always update final parameters bool iterationFinished( - gsl::span parameters, + gsl::span parameters, double objectiveFunctionValue, - gsl::span objectiveFunctionGradient) const override; + gsl::span objectiveFunctionGradient) const override; bool afterCostFunctionCall( - gsl::span parameters, + gsl::span parameters, double objectiveFunctionValue, gsl::span objectiveFunctionGradient) const override; - void finished(double optimalCost, - gsl::span parameters, - int exitStatus) const override; + void finished( + double optimalCost, + gsl::span parameters, + int exitStatus) const override; std::vector const& getFinalParameters() const override; @@ -357,10 +354,10 @@ class HierarchicalOptimizationReporter : public OptimizationReporter * @param result Buffer to write the filtered list to. Must be at least of * length valuesToFilter.size()-sortedIndicesToExclude.size(). */ -void -fillFilteredParams(std::vector const& valuesToFilter, - const std::vector& sortedIndicesToExclude, - gsl::span result); +void fillFilteredParams( + std::vector const& valuesToFilter, + std::vector const& sortedIndicesToExclude, + gsl::span result); /** * @brief Get value to use for scaling parameter during simulation prior to @@ -368,8 +365,7 @@ fillFilteredParams(std::vector const& valuesToFilter, * @param scaling Expected scale of the parameter * @return default value */ -double -getDefaultScalingFactor(amici::ParameterScaling scaling); +double getDefaultScalingFactor(amici::ParameterScaling scaling); /** * @brief Get value to use for offset parameter during simulation prior to @@ -377,8 +373,7 @@ getDefaultScalingFactor(amici::ParameterScaling scaling); * @param scaling Expected scale of the parameter * @return default value */ -double -getDefaultOffsetParameter(amici::ParameterScaling scaling); +double getDefaultOffsetParameter(amici::ParameterScaling scaling); /** * @brief Compute the proportionality factor for the given observable. @@ -395,45 +390,42 @@ getDefaultOffsetParameter(amici::ParameterScaling scaling); * @param numObservables Number of observables * @return */ -double -computeAnalyticalScalings( +double computeAnalyticalScalings( int scalingIdx, - const std::vector>& modelOutputsUnscaled, - const std::vector>& measurements, - const AnalyticalParameterProvider& scalingReader, + std::vector> const& modelOutputsUnscaled, + std::vector> const& measurements, + AnalyticalParameterProvider const& scalingReader, int numObservables); -double -computeAnalyticalOffsets( +double computeAnalyticalOffsets( int offsetIdx, - const std::vector>& modelOutputsUnscaled, - const std::vector>& measurements, + std::vector> const& modelOutputsUnscaled, + std::vector> const& measurements, AnalyticalParameterProvider& offsetReader, int numObservables); -double -computeAnalyticalSigmas( +double computeAnalyticalSigmas( int sigmaIdx, - const std::vector>& modelOutputsScaled, - const std::vector>& measurements, - const AnalyticalParameterProvider& sigmaReader, + std::vector> const& modelOutputsScaled, + std::vector> const& measurements, + AnalyticalParameterProvider const& sigmaReader, int numObservables, double epsilonAbs = 1e-12, double epsilonRel = 0.01); -void -applyOptimalScaling(int scalingIdx, - double scalingLin, - std::vector>& modelOutputs, - AnalyticalParameterProvider const& scalingReader, - int numObservables); +void applyOptimalScaling( + int scalingIdx, + double scalingLin, + std::vector>& modelOutputs, + AnalyticalParameterProvider const& scalingReader, + int numObservables); -void -applyOptimalOffset(int offsetIdx, - double offsetLin, - std::vector>& modelOutputs, - AnalyticalParameterProvider const& offsetReader, - int numObservables); +void applyOptimalOffset( + int offsetIdx, + double offsetLin, + std::vector>& modelOutputs, + AnalyticalParameterProvider const& offsetReader, + int numObservables); /** * @brief Assemble full parameter vector of wrapped problem from scaling @@ -442,48 +434,50 @@ applyOptimalOffset(int offsetIdx, * @param scalingFactors * @return Full parameter vector for `fun` */ -std::vector -spliceParameters(const gsl::span reducedParameters, - const std::vector& proportionalityFactorIndices, - const std::vector& offsetParameterIndices, - const std::vector& sigmaParameterIndices, - const std::vector& scalingFactors, - const std::vector& offsetParameters, - const std::vector& sigmaParameters); +std::vector spliceParameters( + gsl::span const reducedParameters, + std::vector const& proportionalityFactorIndices, + std::vector const& offsetParameterIndices, + std::vector const& sigmaParameterIndices, + std::vector const& scalingFactors, + std::vector const& offsetParameters, + std::vector const& sigmaParameters); /** * @brief Remove inner parameters * @return Outer parameters */ template -std::vector -removeInnerParameters(const gsl::span allParameters, - const std::vector& proportionalityFactorIndices, - const std::vector& offsetParameterIndices, - const std::vector& sigmaParameterIndices) -{ +std::vector removeInnerParameters( + gsl::span const allParameters, + std::vector const& proportionalityFactorIndices, + std::vector const& offsetParameterIndices, + std::vector const& sigmaParameterIndices) { std::vector outerParameters( allParameters.size() - proportionalityFactorIndices.size() - offsetParameterIndices.size() - sigmaParameterIndices.size()); int nextOuterIdx = 0; - for(int idxFull = 0; idxFull < static_cast(allParameters.size()); + for (int idxFull = 0; idxFull < static_cast(allParameters.size()); ++idxFull) { // skip if current parameter is scaling/offset/sigma - if(std::find(proportionalityFactorIndices.begin(), - proportionalityFactorIndices.end(), idxFull) - != std::end(proportionalityFactorIndices)) + if (std::find( + proportionalityFactorIndices.begin(), + proportionalityFactorIndices.end(), + idxFull) != std::end(proportionalityFactorIndices)) continue; - if(std::find(offsetParameterIndices.begin(), - offsetParameterIndices.end(), idxFull) - != std::end(offsetParameterIndices)) + if (std::find( + offsetParameterIndices.begin(), + offsetParameterIndices.end(), + idxFull) != std::end(offsetParameterIndices)) continue; - if(std::find(sigmaParameterIndices.begin(), - sigmaParameterIndices.end(), idxFull) - != std::end(sigmaParameterIndices)) + if (std::find( + sigmaParameterIndices.begin(), + sigmaParameterIndices.end(), + idxFull) != std::end(sigmaParameterIndices)) continue; // otherwise copy @@ -503,11 +497,10 @@ removeInnerParameters(const gsl::span allParameters, * @param parameterPath * @return */ -std::vector -getOuterParameters(std::vector const& fullParameters, - H5::H5File const& parameterFile, - std::string const& parameterPath); - +std::vector getOuterParameters( + std::vector const& fullParameters, + H5::H5File const& parameterFile, + std::string const& parameterPath); /** * @brief Compute negative log-likelihood for normal distribution based on the @@ -517,11 +510,10 @@ getOuterParameters(std::vector const& fullParameters, * @param sigmas * @return */ -double -computeNegLogLikelihood( +double computeNegLogLikelihood( std::vector> const& measurements, std::vector> const& modelOutputsScaled, - const std::vector>& sigmas); + std::vector> const& sigmas); /** * @brief Compute negative log-likelihood for normal distribution based on the @@ -532,10 +524,10 @@ computeNegLogLikelihood( * @return Negative log-likelihood for the given measurements and simulations, * assuming independently normally distributed noise */ -double -computeNegLogLikelihood(std::vector const& measurements, - std::vector const& modelOutputsScaled, - const std::vector& sigmas); +double computeNegLogLikelihood( + std::vector const& measurements, + std::vector const& modelOutputsScaled, + std::vector const& sigmas); /** * @brief If sensitivities are computed w.r.t. analytically computed parameters @@ -545,10 +537,10 @@ computeNegLogLikelihood(std::vector const& measurements, * @param analyticalIndices * @param threshold */ -void -checkGradientForAnalyticalParameters(std::vector const& gradient, - std::vector const& analyticalIndices, - double threshold); +void checkGradientForAnalyticalParameters( + std::vector const& gradient, + std::vector const& analyticalIndices, + double threshold); } // namespace parpe diff --git a/include/parpeamici/hierarchicalOptimizationAnalyticalParameterProvider.h b/include/parpeamici/hierarchicalOptimizationAnalyticalParameterProvider.h index 40ff62f74..fafa3f19e 100644 --- a/include/parpeamici/hierarchicalOptimizationAnalyticalParameterProvider.h +++ b/include/parpeamici/hierarchicalOptimizationAnalyticalParameterProvider.h @@ -1,21 +1,19 @@ #ifndef HIERARCHICALOPTIMIZATIONANALYTICALPARAMETERPROVIDER_H #define HIERARCHICALOPTIMIZATIONANALYTICALPARAMETERPROVIDER_H -#include #include +#include #include namespace parpe { - /** * @brief The AnalyticalParameterProvider class is an interface for providing * information on optimization parameters to be computed analytically * (proportionality factors, offsets, sigmas, ...). */ -class AnalyticalParameterProvider -{ +class AnalyticalParameterProvider { public: virtual ~AnalyticalParameterProvider() = default; @@ -27,8 +25,8 @@ class AnalyticalParameterProvider * (*not* the optimization parameter index). * @return Vector of condition indices */ - virtual std::vector getConditionsForParameter( - int parameterIndex) const = 0; + virtual std::vector + getConditionsForParameter(int parameterIndex) const = 0; /** * @brief Get vector of observable indices for the specified condition for @@ -36,9 +34,8 @@ class AnalyticalParameterProvider * @param parameterIndex * @return */ - virtual std::vector const& getObservablesForParameter( - int parameterIndex, - int conditionIdx) const = 0; + virtual std::vector const& + getObservablesForParameter(int parameterIndex, int conditionIdx) const = 0; /** * @brief Vector with indices of the of the analytically determined @@ -48,13 +45,12 @@ class AnalyticalParameterProvider virtual std::vector getOptimizationParameterIndices() const = 0; }; -class AnalyticalParameterProviderDefault : public AnalyticalParameterProvider -{ +class AnalyticalParameterProviderDefault : public AnalyticalParameterProvider { public: AnalyticalParameterProviderDefault() = default; - std::vector getConditionsForParameter( - int parameterIndex) const override; + std::vector + getConditionsForParameter(int parameterIndex) const override; std::vector const& getObservablesForParameter( int parameterIndex, @@ -75,8 +71,7 @@ class AnalyticalParameterProviderDefault : public AnalyticalParameterProvider * are to be computed analytically. * */ -class AnalyticalParameterHdf5Reader : public AnalyticalParameterProvider -{ +class AnalyticalParameterHdf5Reader : public AnalyticalParameterProvider { public: AnalyticalParameterHdf5Reader() = default; @@ -89,9 +84,10 @@ class AnalyticalParameterHdf5Reader : public AnalyticalParameterProvider * @param mapPath path of to the dataset with the * parameter-observable-condition mapping */ - AnalyticalParameterHdf5Reader(const H5::H5File& file, - std::string analyticalParameterIndicesPath, - std::string mapPath); + AnalyticalParameterHdf5Reader( + const H5::H5File& file, + std::string analyticalParameterIndicesPath, + std::string mapPath); AnalyticalParameterHdf5Reader(AnalyticalParameterHdf5Reader const&) = delete; @@ -104,8 +100,8 @@ class AnalyticalParameterHdf5Reader : public AnalyticalParameterProvider * (*not* the optimization parameter index). * @return Vector of condition indices */ - std::vector getConditionsForParameter( - int parameterIndex) const override; + std::vector + getConditionsForParameter(int parameterIndex) const override; /** * @brief Get vector of observable indices for the specified condition for @@ -144,9 +140,10 @@ class AnalyticalParameterHdf5Reader : public AnalyticalParameterProvider * - observableIdx: index of model output */ void readParameterConditionObservableMappingFromFile(); - std::vector readRawMap(const H5::DataSet &dataset, - hsize_t& nRows, - hsize_t& nCols) const; + std::vector readRawMap( + const H5::DataSet& dataset, + hsize_t& nRows, + hsize_t& nCols) const; H5::H5File file; std::string rootPath; @@ -157,6 +154,5 @@ class AnalyticalParameterHdf5Reader : public AnalyticalParameterProvider std::vector>> mapping; }; - -} +} // namespace parpe #endif // HIERARCHICALOPTIMIZATIONANALYTICALPARAMETERPROVIDER_H diff --git a/include/parpeamici/multiConditionDataProvider.h b/include/parpeamici/multiConditionDataProvider.h index e0d5522bd..620802637 100644 --- a/include/parpeamici/multiConditionDataProvider.h +++ b/include/parpeamici/multiConditionDataProvider.h @@ -19,8 +19,7 @@ namespace parpe { /** * @brief The MultiConditionDataProvider interface */ -class MultiConditionDataProvider -{ +class MultiConditionDataProvider { public: virtual ~MultiConditionDataProvider() = default; @@ -37,58 +36,57 @@ class MultiConditionDataProvider * @param conditionIdx * @return Mapping vector */ - virtual std::vector getSimulationToOptimizationParameterMapping( - int conditionIdx) const = 0; + virtual std::vector + getSimulationToOptimizationParameterMapping(int conditionIdx) const = 0; virtual void mapSimulationToOptimizationGradientAddMultiply( - int conditionIdx, - gsl::span simulation, - gsl::span optimization, - gsl::span parameters, - double coefficient = 1.0) const = 0; + int conditionIdx, + gsl::span simulation, + gsl::span optimization, + gsl::span parameters, + double coefficient = 1.0) const = 0; virtual void mapAndSetOptimizationToSimulationVariables( - int conditionIdx, - gsl::span optimization, - gsl::span simulation, - gsl::span optimizationScale, - gsl::span simulationScale) const = 0; + int conditionIdx, + gsl::span optimization, + gsl::span simulation, + gsl::span optimizationScale, + gsl::span simulationScale) const = 0; /** * @brief Get the parameter scale for the given optimization parameter * @param simulationIdx * @return Parameter scale */ - virtual amici::ParameterScaling getParameterScaleOpt( - int parameterIdx) const = 0; + virtual amici::ParameterScaling + getParameterScaleOpt(int parameterIdx) const = 0; - virtual std::vector getParameterScaleOpt() - const = 0; + virtual std::vector + getParameterScaleOpt() const = 0; /** * @brief Get the parameter scale vector for the given simulation * @param simulationIdx * @return */ - virtual std::vector getParameterScaleSim( - int simulationIdx) const = 0; + virtual std::vector + getParameterScaleSim(int simulationIdx) const = 0; /** * @brief Get the parameter scale for the given parameter and simulation * @param simulationIdx * @return */ - virtual amici::ParameterScaling getParameterScaleSim( - int simulationIdx, - int modelParameterIdx) const = 0; + virtual amici::ParameterScaling + getParameterScaleSim(int simulationIdx, int modelParameterIdx) const = 0; virtual void updateSimulationParametersAndScale( - int conditionIndex, - gsl::span optimizationParams, - amici::Model& model) const = 0; + int conditionIndex, + gsl::span optimizationParams, + amici::Model& model) const = 0; - virtual std::unique_ptr getExperimentalDataForCondition( - int conditionIdx) const = 0; + virtual std::unique_ptr + getExperimentalDataForCondition(int conditionIdx) const = 0; virtual std::vector> getAllMeasurements() const = 0; virtual std::vector> getAllSigmas() const = 0; @@ -110,17 +108,16 @@ class MultiConditionDataProvider virtual std::unique_ptr getSolver() const = 0; }; - /** * @brief In-memory data. * * !!Very limited implementation, currently only for testing!! */ -class MultiConditionDataProviderDefault : public MultiConditionDataProvider -{ +class MultiConditionDataProviderDefault : public MultiConditionDataProvider { public: - MultiConditionDataProviderDefault(std::unique_ptr model, - std::unique_ptr solver); + MultiConditionDataProviderDefault( + std::unique_ptr model, + std::unique_ptr solver); ~MultiConditionDataProviderDefault() override = default; @@ -135,45 +132,43 @@ class MultiConditionDataProviderDefault : public MultiConditionDataProvider int getNumberOfSimulationConditions() const override; std::vector getSimulationToOptimizationParameterMapping( - int conditionIdx) const override; + int conditionIdx) const override; void mapSimulationToOptimizationGradientAddMultiply( - int conditionIdx, - gsl::span simulation, - gsl::span optimization, - gsl::span parameters, - double coefficient = 1.0) const override; + int conditionIdx, + gsl::span simulation, + gsl::span optimization, + gsl::span parameters, + double coefficient = 1.0) const override; void mapAndSetOptimizationToSimulationVariables( - int conditionIdx, - gsl::span optimization, - gsl::span simulation, - gsl::span optimizationScale, - gsl::span simulationScale) const override; + int conditionIdx, + gsl::span optimization, + gsl::span simulation, + gsl::span optimizationScale, + gsl::span simulationScale) const override; - std::vector getParameterScaleOpt() - const override; + std::vector getParameterScaleOpt() const override; - amici::ParameterScaling getParameterScaleOpt( - int optimizationParameterIndex) const override; + amici::ParameterScaling + getParameterScaleOpt(int optimizationParameterIndex) const override; amici::ParameterScaling getParameterScaleSim( - int simulationIdx, - int optimizationParameterIndex) const override; + int simulationIdx, + int optimizationParameterIndex) const override; - std::vector getParameterScaleSim( - int) const override; + std::vector + getParameterScaleSim(int) const override; void updateSimulationParametersAndScale( - int conditionIndex, - gsl::span optimizationParams, - amici::Model& model) const override; + int conditionIndex, + gsl::span optimizationParams, + amici::Model& model) const override; - std::unique_ptr getExperimentalDataForCondition( - int conditionIdx) const override; + std::unique_ptr + getExperimentalDataForCondition(int conditionIdx) const override; - std::vector> getAllMeasurements() - const override; + std::vector> getAllMeasurements() const override; std::vector> getAllSigmas() const override; /** @@ -220,8 +215,7 @@ class MultiConditionDataProviderDefault : public MultiConditionDataProvider */ // TODO split; separate optimization from simulation -class MultiConditionDataProviderHDF5 : public MultiConditionDataProvider -{ +class MultiConditionDataProviderHDF5 : public MultiConditionDataProvider { public: MultiConditionDataProviderHDF5() = default; @@ -232,8 +226,9 @@ class MultiConditionDataProviderHDF5 : public MultiConditionDataProvider * @param hdf5Filename Path to the HDF5 file from which the data is to be * read */ - MultiConditionDataProviderHDF5(std::unique_ptr model, - const std::string& hdf5Filename); + MultiConditionDataProviderHDF5( + std::unique_ptr model, + std::string const& hdf5Filename); /** * @brief See above. @@ -242,12 +237,13 @@ class MultiConditionDataProviderHDF5 : public MultiConditionDataProvider * @param rootPath The name of the HDF5 group under which the data is * stored. */ - MultiConditionDataProviderHDF5(std::unique_ptr model, - std::string const& hdf5Filename, - std::string const& rootPath); + MultiConditionDataProviderHDF5( + std::unique_ptr model, + std::string const& hdf5Filename, + std::string const& rootPath); MultiConditionDataProviderHDF5(MultiConditionDataProviderHDF5 const&) = - delete; + delete; ~MultiConditionDataProviderHDF5() override; @@ -268,34 +264,33 @@ class MultiConditionDataProviderHDF5 : public MultiConditionDataProvider * @return */ std::vector getSimulationToOptimizationParameterMapping( - int conditionIdx) const override; + int conditionIdx) const override; void mapSimulationToOptimizationGradientAddMultiply( - int conditionIdx, - gsl::span simulation, - gsl::span optimization, - gsl::span parameters, - double coefficient = 1.0) const override; + int conditionIdx, + gsl::span simulation, + gsl::span optimization, + gsl::span parameters, + double coefficient = 1.0) const override; void mapAndSetOptimizationToSimulationVariables( - int conditionIdx, - gsl::span optimization, - gsl::span simulation, - gsl::span optimizationScale, - gsl::span simulationScale) const override; + int conditionIdx, + gsl::span optimization, + gsl::span simulation, + gsl::span optimizationScale, + gsl::span simulationScale) const override; - std::vector getParameterScaleOpt() - const override; + std::vector getParameterScaleOpt() const override; - amici::ParameterScaling getParameterScaleOpt( - int parameterIdx) const override; + amici::ParameterScaling + getParameterScaleOpt(int parameterIdx) const override; - std::vector getParameterScaleSim( - int simulationIdx) const override; + std::vector + getParameterScaleSim(int simulationIdx) const override; amici::ParameterScaling getParameterScaleSim( - int simulationIdx, - int modelParameterIdx) const override; + int simulationIdx, + int modelParameterIdx) const override; /** * @brief Check if the data in the HDF5 file has consistent dimensions. @@ -305,39 +300,41 @@ class MultiConditionDataProviderHDF5 : public MultiConditionDataProvider // void printInfo() const; - virtual void readFixedSimulationParameters(int conditionIdx, - gsl::span buffer) const; + virtual void readFixedSimulationParameters( + int conditionIdx, + gsl::span buffer) const; - std::unique_ptr getExperimentalDataForCondition( - int simulationIdx) const override; + std::unique_ptr + getExperimentalDataForCondition(int simulationIdx) const override; /** * @brief Get list of parameters w.r.t. which we need sensitivities * @param mapping Mapping from model simulation to objective parameters * @return AMICI's 'plist' */ - std::vector getSensitivityParameterList(std::vector const& mapping) const; + std::vector + getSensitivityParameterList(std::vector const& mapping) const; std::vector> getAllMeasurements() const override; std::vector> getAllSigmas() const override; std::vector getSigmaForSimulationIndex(int simulationIdx) const; - std::vector getMeasurementForSimulationIndex( - int simulationIdx) const; + std::vector + getMeasurementForSimulationIndex(int simulationIdx) const; /** * @brief Writes lower parameter bounds into the provided buffer * @param buffer allocated memory to write parameter bounds */ - virtual void getOptimizationParametersLowerBounds( - gsl::span buffer) const; + virtual void + getOptimizationParametersLowerBounds(gsl::span buffer) const; /** * @brief Writes upper parameter bounds into the provided buffer * @param buffer allocated memory to write parameter bounds */ - virtual void getOptimizationParametersUpperBounds( - gsl::span buffer) const; + virtual void + getOptimizationParametersUpperBounds(gsl::span buffer) const; /** * @brief Returns the number of optimization parameters of this problem @@ -364,19 +361,18 @@ class MultiConditionDataProviderHDF5 : public MultiConditionDataProvider * @param model Model on which to set parameter values and scale */ void updateSimulationParametersAndScale( - int simulationIdx, - gsl::span optimizationParams, - amici::Model& model) const override; + int simulationIdx, + gsl::span optimizationParams, + amici::Model& model) const override; void copyInputData(const H5::H5File& target); void getSimAndPreeqConditions( - const int simulationIdx, - int& preequilibrationConditionIdx, - int& simulationConditionIdx - ) const; + int const simulationIdx, + int& preequilibrationConditionIdx, + int& simulationConditionIdx) const; - std::vector getReinitializationIndices(const int simulationIdx) const; + std::vector getReinitializationIndices(int const simulationIdx) const; /** * @brief Get a copy of the HDF5 file handle. @@ -397,9 +393,9 @@ class MultiConditionDataProviderHDF5 : public MultiConditionDataProvider * parameters should be taken. * @param edata The object to be updated. */ - void updateFixedSimulationParameters(int simulationIdx, - amici::ExpData& edata) const; - + void updateFixedSimulationParameters( + int simulationIdx, + amici::ExpData& edata) const; private: /** @@ -435,11 +431,11 @@ class MultiConditionDataProviderHDF5 : public MultiConditionDataProvider std::unique_ptr optimization_options_; }; -double -applyChainRule(double gradient, - double parameter, - amici::ParameterScaling oldScale, - amici::ParameterScaling newScale); +double applyChainRule( + double gradient, + double parameter, + amici::ParameterScaling oldScale, + amici::ParameterScaling newScale); } // namespace parpe diff --git a/include/parpeamici/multiConditionProblem.h b/include/parpeamici/multiConditionProblem.h index 7ce634ba4..cb2ae154e 100644 --- a/include/parpeamici/multiConditionProblem.h +++ b/include/parpeamici/multiConditionProblem.h @@ -1,11 +1,11 @@ #ifndef PARPE_AMICI_MULTI_CONDITION_PROBLEM_H #define PARPE_AMICI_MULTI_CONDITION_PROBLEM_H +#include #include +#include #include #include -#include -#include #include #include @@ -38,15 +38,16 @@ class MultiConditionDataProvider; * @return Simulation results */ -AmiciSimulationRunner::AmiciResultPackageSimple runAndLogSimulation(amici::Solver const &solver, - amici::Model &model, - int conditionIdx, - int jobId, - const MultiConditionDataProvider *dataProvider, - OptimizationResultWriter *resultWriter, - bool logLineSearch, - Logger *logger, - bool sendStates = false); +AmiciSimulationRunner::AmiciResultPackageSimple runAndLogSimulation( + amici::Solver const& solver, + amici::Model& model, + int conditionIdx, + int jobId, + MultiConditionDataProvider const* dataProvider, + OptimizationResultWriter* resultWriter, + bool logLineSearch, + Logger* logger, + bool sendStates = false); /** * @brief Run simulations (no gradient) with given parameters and collect @@ -65,15 +66,17 @@ AmiciSimulationRunner::AmiciResultPackageSimple runAndLogSimulation(amici::Solve * @return Simulation status */ FunctionEvaluationStatus getModelOutputsAndSigmas( - MultiConditionDataProvider *dataProvider, - LoadBalancerMaster *loadBalancer, - int maxSimulationsPerPackage, - OptimizationResultWriter *resultWriter, - bool logLineSearch, - gsl::span parameters, - std::vector > &modelOutputs, - std::vector > &modelSigmas, - Logger *logger, double *cpuTime, bool sendStates); + MultiConditionDataProvider* dataProvider, + LoadBalancerMaster* loadBalancer, + int maxSimulationsPerPackage, + OptimizationResultWriter* resultWriter, + bool logLineSearch, + gsl::span parameters, + std::vector>& modelOutputs, + std::vector>& modelSigmas, + Logger* logger, + double* cpuTime, + bool sendStates); /** * @brief Callback function for LoadBalancer @@ -83,10 +86,13 @@ FunctionEvaluationStatus getModelOutputsAndSigmas( * @param buffer In/out: message buffer * @param jobId: In: Identifier of the job (unique up to INT_MAX) */ -void messageHandler(MultiConditionDataProvider *dataProvider, - OptimizationResultWriter *resultWriter, - bool logLineSearch, - std::vector &buffer, int jobId, bool sendStates); +void messageHandler( + MultiConditionDataProvider* dataProvider, + OptimizationResultWriter* resultWriter, + bool logLineSearch, + std::vector& buffer, + int jobId, + bool sendStates); /** * @brief The AmiciSummedGradientFunction class represents a cost function @@ -94,7 +100,7 @@ void messageHandler(MultiConditionDataProvider *dataProvider, */ class AmiciSummedGradientFunction : public SummedGradientFunction { -public: + public: using WorkPackage = AmiciSimulationRunner::AmiciWorkPackageSimple; using ResultPackage = AmiciSimulationRunner::AmiciResultPackageSimple; using ResultMap = std::map; @@ -107,27 +113,27 @@ class AmiciSummedGradientFunction : public SummedGradientFunction { * @param resultWriter */ AmiciSummedGradientFunction( - MultiConditionDataProvider *dataProvider, - LoadBalancerMaster *loadBalancer, - OptimizationResultWriter *resultWriter); + MultiConditionDataProvider* dataProvider, + LoadBalancerMaster* loadBalancer, + OptimizationResultWriter* resultWriter); ~AmiciSummedGradientFunction() override = default; FunctionEvaluationStatus evaluate( - gsl::span parameters, - int dataset, - double &fval, - gsl::span gradient, - Logger *logger, - double *cpuTime) const override; + gsl::span parameters, + int dataset, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime) const override; FunctionEvaluationStatus evaluate( - gsl::span parameters, - std::vector datasets, - double &fval, - gsl::span gradient, - Logger *logger, - double *cpuTime) const override; + gsl::span parameters, + std::vector datasets, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime) const override; /** * @brief Number of optimization parameters @@ -148,26 +154,28 @@ class AmiciSummedGradientFunction : public SummedGradientFunction { */ virtual FunctionEvaluationStatus getModelOutputsAndSigmas( gsl::span parameters, - std::vector > &modelOutputs, - std::vector > &modelSigmas, - Logger *logger, - double *cpuTime) const; + std::vector>& modelOutputs, + std::vector>& modelSigmas, + Logger* logger, + double* cpuTime) const; - [[nodiscard]] virtual std::vector> getAllMeasurements() const; + [[nodiscard]] virtual std::vector> + getAllMeasurements() const; /** * @brief Callback function for LoadBalancer * @param buffer In/out: message buffer * @param jobId: In: Identifier of the job (unique up to INT_MAX) */ - virtual void messageHandler(std::vector &buffer, int jobId) const; + virtual void messageHandler(std::vector& buffer, int jobId) const; - [[nodiscard]] virtual amici::ParameterScaling getParameterScaling(int parameterIndex) const; + [[nodiscard]] virtual amici::ParameterScaling + getParameterScaling(int parameterIndex) const; /** Include model states in result package */ bool sendStates = false; -protected:// for testing + protected: // for testing AmiciSummedGradientFunction() = default; /** @@ -179,12 +187,13 @@ class AmiciSummedGradientFunction : public SummedGradientFunction { * @param numDataIndices * @return Simulation status, != 0 indicates failure */ - virtual int runSimulations(gsl::span optimizationParameters, - double &nllh, - gsl::span objectiveFunctionGradient, - const std::vector &dataIndices, - Logger *logger, - double *cpuTime) const; + virtual int runSimulations( + gsl::span optimizationParameters, + double& nllh, + gsl::span objectiveFunctionGradient, + std::vector const& dataIndices, + Logger* logger, + double* cpuTime) const; /** * @brief Aggregates log-likelihood received from workers. @@ -197,12 +206,12 @@ class AmiciSummedGradientFunction : public SummedGradientFunction { * @return */ - int aggregateLikelihood(JobData &data, double &negLogLikelihood, - gsl::span negLogLikelihoodGradient, - double &simulationTimeInS, - gsl::span optimizationParameters - ) const; - + int aggregateLikelihood( + JobData& data, + double& negLogLikelihood, + gsl::span negLogLikelihoodGradient, + double& simulationTimeInS, + gsl::span optimizationParameters) const; /** * @brief Aggregates log-likelihood gradient received from workers. @@ -213,50 +222,46 @@ class AmiciSummedGradientFunction : public SummedGradientFunction { */ void addSimulationGradientToObjectiveFunctionGradient( - int conditionIdx, - gsl::span simulationGradient, - gsl::span objectiveFunctionGradient, - gsl::span parameters) const; + int conditionIdx, + gsl::span simulationGradient, + gsl::span objectiveFunctionGradient, + gsl::span parameters) const; void setSensitivityOptions(bool sensiRequired) const; -private: + private: // TODO: make owning - MultiConditionDataProvider *dataProvider = nullptr; + MultiConditionDataProvider* dataProvider = nullptr; // Non-owning - LoadBalancerMaster *loadBalancer = nullptr; + LoadBalancerMaster* loadBalancer = nullptr; std::unique_ptr model; std::unique_ptr solver; /** For saving sensitivity options which are changed depending on whether * gradient is needed */ std::unique_ptr solverOriginal; - OptimizationResultWriter *resultWriter = nullptr; // TODO: owning? + OptimizationResultWriter* resultWriter = nullptr; // TODO: owning? bool logLineSearch = false; int maxSimulationsPerPackage = 8; int maxGradientSimulationsPerPackage = 1; }; - - /** * @brief The MultiConditionProblem class represents an optimization problem * based on an MultiConditionGradientFunction (AMICI ODE model) and * MultiConditionDataProvider */ -class MultiConditionProblem - : public MinibatchOptimizationProblem -{ +class MultiConditionProblem : public MinibatchOptimizationProblem { public: MultiConditionProblem() = default; - explicit MultiConditionProblem(MultiConditionDataProvider *dp); + explicit MultiConditionProblem(MultiConditionDataProvider* dp); MultiConditionProblem( - MultiConditionDataProvider *dp, - LoadBalancerMaster *loadBalancer, - std::unique_ptr logger, - std::unique_ptr resultWriter); + MultiConditionDataProvider* dp, + LoadBalancerMaster* loadBalancer, + std::unique_ptr logger, + std::unique_ptr resultWriter); ~MultiConditionProblem() override = default; @@ -266,13 +271,14 @@ class MultiConditionProblem */ virtual int earlyStopping(); - MultiConditionDataProvider *getDataProvider(); - OptimizationResultWriter *getResultWriter(); + MultiConditionDataProvider* getDataProvider(); + OptimizationResultWriter* getResultWriter(); - // virtual std::unique_ptr getInitialParameters(int multiStartIndex) const override; + // virtual std::unique_ptr getInitialParameters(int + // multiStartIndex) const override; - void setInitialParameters(const std::vector &startingPoint); - void setParametersMin(const std::vector &lowerBounds); + void setInitialParameters(std::vector const& startingPoint); + void setParametersMin(std::vector const& lowerBounds); void setParametersMax(std::vector const& upperBounds); void fillParametersMin(gsl::span buffer) const override; @@ -283,10 +289,10 @@ class MultiConditionProblem std::vector getTrainingData() const override; -private: - //TODO std::unique_ptr validationProblem; + private: + // TODO std::unique_ptr validationProblem; - MultiConditionDataProvider *dataProvider = nullptr; + MultiConditionDataProvider* dataProvider = nullptr; std::unique_ptr resultWriter; @@ -295,9 +301,6 @@ class MultiConditionProblem std::vector parametersMax; }; - - - /** * @brief The MultiConditionProblemGeneratorForMultiStart class generates new * MultiConditionProblem instances with proper DataProviders for multi-start @@ -307,34 +310,40 @@ class MultiConditionProblemMultiStartOptimizationProblem : public MultiStartOptimizationProblem { public: MultiConditionProblemMultiStartOptimizationProblem( - MultiConditionDataProviderHDF5 *dp, - OptimizationOptions options, - OptimizationResultWriter *resultWriter, - LoadBalancerMaster *loadBalancer, - std::unique_ptr logger); - + MultiConditionDataProviderHDF5* dp, + OptimizationOptions options, + OptimizationResultWriter* resultWriter, + LoadBalancerMaster* loadBalancer, + std::unique_ptr logger); int getNumberOfStarts() const override; bool restartOnFailure() const override; - std::unique_ptr getLocalProblem( - int multiStartIndex) const override; + std::unique_ptr + getLocalProblem(int multiStartIndex) const override; -private: - MultiConditionDataProviderHDF5 *data_provider_ = nullptr; + private: + MultiConditionDataProviderHDF5* data_provider_ = nullptr; OptimizationOptions options_; - OptimizationResultWriter *result_writer_ = nullptr; - LoadBalancerMaster *load_balancer_ = nullptr; + OptimizationResultWriter* result_writer_ = nullptr; + LoadBalancerMaster* load_balancer_ = nullptr; std::unique_ptr logger_; }; - -void saveSimulation(H5::H5File const& file, const std::string &pathStr, - const std::vector ¶meters, double llh, - gsl::span gradient, double timeElapsedInSeconds, gsl::span, gsl::span, gsl::span, - int jobId, int status, const std::string &label); - +void saveSimulation( + H5::H5File const& file, + std::string const& pathStr, + std::vector const& parameters, + double llh, + gsl::span gradient, + double timeElapsedInSeconds, + gsl::span, + gsl::span, + gsl::span, + int jobId, + int status, + std::string const& label); } // namespace parpe diff --git a/include/parpeamici/optimizationApplication.h b/include/parpeamici/optimizationApplication.h index 8a6e79d9d..034ef8acc 100644 --- a/include/parpeamici/optimizationApplication.h +++ b/include/parpeamici/optimizationApplication.h @@ -1,9 +1,9 @@ #ifndef OPTIMIZATIONAPPLICATION_H #define OPTIMIZATIONAPPLICATION_H -#include -#include #include +#include +#include #ifdef PARPE_ENABLE_MPI #include @@ -41,8 +41,9 @@ class OptimizationApplication { * @param inFileArgument * @param outFileArgument */ - virtual void initProblem(std::string const& inFileArgument, - std::string const& outFileArgument) = 0; + virtual void initProblem( + std::string const& inFileArgument, + std::string const& outFileArgument) = 0; /** * @brief Start the optimization run. Must only be called once. @@ -50,7 +51,7 @@ class OptimizationApplication { * Must be called before any other functions. * @return status code; 0 on success */ - int run(int argc, char **argv); + int run(int argc, char** argv); /** * @brief This is run by the MPI rank 0 process when started with multiple @@ -72,7 +73,6 @@ class OptimizationApplication { virtual void runSingleProcess(); protected: - /** * @brief Receives and writes the total programm runtime * @param begin @@ -85,7 +85,7 @@ class OptimizationApplication { * @return Result file name */ std::string - processResultFilenameCommandLineArgument(const char *commandLineArg); + processResultFilenameCommandLineArgument(char const* commandLineArg); /** * @brief Are we running with MPI and are we master process? @@ -104,7 +104,7 @@ class OptimizationApplication { * @param argc * @param argv */ - static void initMPI(int *argc, char ***argv); + static void initMPI(int* argc, char*** argv); /** * @brief Parse command line options before MPI_INIT is potentially called. @@ -116,7 +116,7 @@ class OptimizationApplication { * @param argv * @return */ - virtual int parseCliOptionsPreMpiInit(int argc, char **argv); + virtual int parseCliOptionsPreMpiInit(int argc, char** argv); /** * @brief Parse command line options after MPI_Init is called. @@ -126,7 +126,7 @@ class OptimizationApplication { * @param argv * @return */ - virtual int parseCliOptionsPostMpiInit(int argc, char **argv); + virtual int parseCliOptionsPostMpiInit(int argc, char** argv); /** * @brief Print CLI usage @@ -134,21 +134,21 @@ class OptimizationApplication { */ virtual void printUsage(char* const argZero); - virtual void logParPEVersion(const H5::H5File &file) const; + virtual void logParPEVersion(const H5::H5File& file) const; -private: + private: /** * @brief initialize MPI, mutexes, ... * @param argc * @param argv */ - int init(int argc, char **argv); + int init(int argc, char** argv); void runMultiStarts() const; -protected: + protected: // command line option parsing - const char *shortOptions = "dhvmt:o:s:"; + char const* shortOptions = "dhvmt:o:s:"; struct option const longOptions[9] = { {"debug", no_argument, nullptr, 'd'}, {"print-worklist", no_argument, nullptr, 'p'}, @@ -160,18 +160,16 @@ class OptimizationApplication { {"first-start-idx", required_argument, nullptr, 's'}, {nullptr, 0, nullptr, 0}}; - enum class OperationType { - parameterEstimation, - gradientCheck - }; + enum class OperationType { parameterEstimation, gradientCheck }; std::string dataFileName; std::string resultFileName; - int first_start_idx {0}; + int first_start_idx{0}; // the need to be filled in by sub - std::unique_ptr multiStartOptimizationProblem; + std::unique_ptr + multiStartOptimizationProblem; std::unique_ptr problem; H5::H5File h5File = 0; OperationType operationType = OperationType::parameterEstimation; @@ -179,14 +177,12 @@ class OptimizationApplication { bool withMPI = false; }; - /** * @brief CPU time for whole application run * @param file * @param timeInSeconds */ -void saveTotalCpuTime(const H5::H5File &file, const double timeInSeconds); - +void saveTotalCpuTime(const H5::H5File& file, double const timeInSeconds); } // namespace parpe diff --git a/include/parpeamici/simulationResultWriter.h b/include/parpeamici/simulationResultWriter.h index 7fccdaef2..b7584140b 100644 --- a/include/parpeamici/simulationResultWriter.h +++ b/include/parpeamici/simulationResultWriter.h @@ -1,11 +1,11 @@ #ifndef PARPE_AMICI_SIMULATION_RESULT_WRITER_H #define PARPE_AMICI_SIMULATION_RESULT_WRITER_H -#include #include +#include -#include #include +#include namespace parpe { @@ -29,8 +29,7 @@ namespace parpe { class SimulationResultWriter { -public: - + public: SimulationResultWriter() = default; /** @@ -38,15 +37,16 @@ class SimulationResultWriter { * @param file HDF5 file object to write to * @param rootPath Path prefix inside HDF5 file */ - SimulationResultWriter(const H5::H5File &file, std::string rootPath); + SimulationResultWriter(const H5::H5File& file, std::string rootPath); /** * @brief SimulationResultWriter * @param hdf5FileName HDF5 file to create or open for appending * @param rootPath Path prefix inside HDF5 file */ - SimulationResultWriter(std::string const& hdf5FileName, - std::string rootPath); + SimulationResultWriter( + std::string const& hdf5FileName, + std::string rootPath); // Implement me SimulationResultWriter(SimulationResultWriter const&) = delete; @@ -71,24 +71,34 @@ class SimulationResultWriter { * position of the result data sets (-> createDatasets) */ - void saveSimulationResults(const amici::ExpData *edata, - const amici::ReturnData *rdata, - int simulationIdx); + void saveSimulationResults( + amici::ExpData const* edata, + amici::ReturnData const* rdata, + int simulationIdx); - void saveTimepoints(gsl::span timepoints, - int simulationIdx) const; + void + saveTimepoints(gsl::span timepoints, int simulationIdx) const; - void saveMeasurements(gsl::span measurements, int nt, - int nytrue, int simulationIdx) const; + void saveMeasurements( + gsl::span measurements, + int nt, + int nytrue, + int simulationIdx) const; - void saveModelOutputs(gsl::span outputs, int nt, - int nytrue, int simulationIdx) const; + void saveModelOutputs( + gsl::span outputs, + int nt, + int nytrue, + int simulationIdx) const; - void saveStates(gsl::span states, int nt, int nx, - int simulationIdx) const; + void saveStates( + gsl::span states, + int nt, + int nx, + int simulationIdx) const; - void saveParameters(gsl::span parameters, - int simulationIdx) const; + void + saveParameters(gsl::span parameters, int simulationIdx) const; void saveLikelihood(double llh, int simulationIdx) const; @@ -96,11 +106,11 @@ class SimulationResultWriter { bool saveX = false; bool saveLlh = false; -// bool saveSllh = false; + // bool saveSllh = false; bool saveYSim = false; bool saveYMes = false; bool save_parameters_ = false; -// bool saveK = false; + // bool saveK = false; std::string yMesPath; std::string ySimPath; @@ -109,7 +119,7 @@ class SimulationResultWriter { std::string timePath; std::string parametersPath; -private: + private: void updatePaths(); std::string rootPath; diff --git a/include/parpeamici/standaloneSimulator.h b/include/parpeamici/standaloneSimulator.h index dd817f317..2f39565c0 100644 --- a/include/parpeamici/standaloneSimulator.h +++ b/include/parpeamici/standaloneSimulator.h @@ -22,8 +22,7 @@ namespace parpe { * trajectory of all multi-start optimization * runs */ -class StandaloneSimulator -{ +class StandaloneSimulator { public: explicit StandaloneSimulator(MultiConditionDataProvider* dp); @@ -40,12 +39,14 @@ class StandaloneSimulator * optimization * @return Number of errors encountered */ - int run(const std::string& resultFile, - const std::string& resultPath, - std::map &optimizationParameters, - LoadBalancerMaster* loadBalancer, - const H5::H5File& conditionFile, - std::string conditionFilePath, bool computeInnerParameters); + int + run(std::string const& resultFile, + std::string const& resultPath, + std::map& optimizationParameters, + LoadBalancerMaster* loadBalancer, + const H5::H5File& conditionFile, + std::string conditionFilePath, + bool computeInnerParameters); void messageHandler(std::vector& buffer, int jobId); @@ -62,9 +63,9 @@ class StandaloneSimulator // enum class SimulatorOpType {finalParameters}; -std::pair -getFunctionEvaluationWithMinimalCost(std::string const& datasetPath, - H5::H5File const& file); +std::pair getFunctionEvaluationWithMinimalCost( + std::string const& datasetPath, + H5::H5File const& file); /** * @brief Read the final parameter set from parPE result file for the given @@ -74,46 +75,45 @@ getFunctionEvaluationWithMinimalCost(std::string const& datasetPath, * @return The final parameter vector */ std::vector -getFinalParameters(const std::string& startIndex, const H5::H5File& file); +getFinalParameters(std::string const& startIndex, const H5::H5File& file); std::vector> -getParameterTrajectory(const std::string& startIndex, H5::H5File const& file); - -int -getNumStarts(const H5::H5File& file, const std::string& rootPath = "/"); - -int -runFinalParameters(parpe::StandaloneSimulator& sim, - const std::string& conditionFileName, - const std::string&, - const std::string& parameterFileName, - const std::string& parameterFilePath, - const std::string& resultFileName, - const std::string& resultPath, - parpe::LoadBalancerMaster* loadBalancer, - bool computeInnerParameters); - -int -runAlongTrajectory(parpe::StandaloneSimulator& sim, - const std::string& conditionFileName, - const std::string& conditionFilePath, - const std::string& parameterFileName, - const std::string& parameterFilePath, - std::string const& resultFileName, - std::string const& resultPath, - parpe::LoadBalancerMaster* loadBalancer, - bool computeInnerParameters); - -int -runSimulator(MultiConditionDataProvider& dp, - std::string const& simulationMode, - const std::string& conditionFileName, - const std::string& conditionFilePath, - const std::string& parameterFileName, - const std::string& parameterFilePath, - std::string const& resultFileName, - std::string const& resultPath, - bool computeInnerParameters); +getParameterTrajectory(std::string const& startIndex, H5::H5File const& file); + +int getNumStarts(const H5::H5File& file, std::string const& rootPath = "/"); + +int runFinalParameters( + parpe::StandaloneSimulator& sim, + std::string const& conditionFileName, + std::string const&, + std::string const& parameterFileName, + std::string const& parameterFilePath, + std::string const& resultFileName, + std::string const& resultPath, + parpe::LoadBalancerMaster* loadBalancer, + bool computeInnerParameters); + +int runAlongTrajectory( + parpe::StandaloneSimulator& sim, + std::string const& conditionFileName, + std::string const& conditionFilePath, + std::string const& parameterFileName, + std::string const& parameterFilePath, + std::string const& resultFileName, + std::string const& resultPath, + parpe::LoadBalancerMaster* loadBalancer, + bool computeInnerParameters); + +int runSimulator( + MultiConditionDataProvider& dp, + std::string const& simulationMode, + std::string const& conditionFileName, + std::string const& conditionFilePath, + std::string const& parameterFileName, + std::string const& parameterFilePath, + std::string const& resultFileName, + std::string const& resultPath, + bool computeInnerParameters); } // namespace parpe diff --git a/include/parpecommon/costFunction.h b/include/parpecommon/costFunction.h index d80a3bb7a..0dc651955 100644 --- a/include/parpecommon/costFunction.h +++ b/include/parpecommon/costFunction.h @@ -2,8 +2,8 @@ #define PARPE_COMMON_COST_FUNCTION_H #include -#include #include +#include namespace parpe { @@ -14,53 +14,54 @@ namespace parpe { * bit more. */ class CostFunction { -public: + public: virtual ~CostFunction() = default; - virtual void evaluate(std::vector const& label, - std::vector const& prediction, - double& cost) const { - evaluate(label, prediction, 0, - std::vector(0), - cost, nullptr); + virtual void evaluate( + std::vector const& label, + std::vector const& prediction, + double& cost) const { + evaluate(label, prediction, 0, std::vector(0), cost, nullptr); } - virtual void evaluate(std::vector const& label, - std::vector const& prediction, - int numParameters, - std::vector predictionGradient, - double &cost, - double *gradient) const = 0; + virtual void evaluate( + std::vector const& label, + std::vector const& prediction, + int numParameters, + std::vector predictionGradient, + double& cost, + double* gradient) const = 0; }; class MeanSquaredError : public CostFunction { -public: + public: using CostFunction::evaluate; - void evaluate(std::vector const& label, - std::vector const& prediction, - int numParameters, - std::vector predictionGradient, - double &cost, - double *gradient) const override { + void evaluate( + std::vector const& label, + std::vector const& prediction, + int numParameters, + std::vector predictionGradient, + double& cost, + double* gradient) const override { assert(label.size() == prediction.size()); cost = 0.0; - for(int i = 0; (unsigned) i < label.size(); ++i) { + for (int i = 0; (unsigned)i < label.size(); ++i) { cost += std::pow(label[i] - prediction[i], 2); } cost /= label.size(); - if(gradient) { - for(int p = 0; p < numParameters; ++p) { + if (gradient) { + for (int p = 0; p < numParameters; ++p) { gradient[p] = 0.0; - for(int i = 0; (unsigned) i < label.size(); ++i) { - gradient[p] += -2.0 * (label[i] - prediction[i]) - * predictionGradient[i][p]; + for (int i = 0; (unsigned)i < label.size(); ++i) { + gradient[p] += -2.0 * (label[i] - prediction[i]) * + predictionGradient[i][p]; } gradient[p] /= label.size(); @@ -69,5 +70,5 @@ class MeanSquaredError : public CostFunction { } }; -} +} // namespace parpe #endif diff --git a/include/parpecommon/functions.h b/include/parpecommon/functions.h index 199035b11..f8e5e4d94 100644 --- a/include/parpecommon/functions.h +++ b/include/parpecommon/functions.h @@ -13,8 +13,7 @@ namespace parpe { -enum FunctionEvaluationStatus -{ +enum FunctionEvaluationStatus { functionEvaluationSuccess, functionEvaluationFailure, }; @@ -23,9 +22,8 @@ enum FunctionEvaluationStatus * @brief The GradientFunction class is an interface for an * arbitrary function f(x) and its gradient. */ -class GradientFunction -{ -public: +class GradientFunction { + public: /** * @brief Evaluate the function f(x) * @param parameters Point x at which to evaluate f(x). Must be of length @@ -37,16 +35,16 @@ class GradientFunction * otherwise */ virtual FunctionEvaluationStatus evaluate( - gsl::span parameters, - double& fval, - gsl::span gradient) const; + gsl::span parameters, + double& fval, + gsl::span gradient) const; virtual FunctionEvaluationStatus evaluate( - gsl::span parameters, - double& fval, - gsl::span gradient, - Logger* logger, - double* cpuTime) const = 0; + gsl::span parameters, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime) const = 0; virtual int numParameters() const = 0; @@ -55,17 +53,14 @@ class GradientFunction virtual ~GradientFunction() = default; }; - /** * @brief The SummedGradientFunction class is an interface for cost functions * and gradients that are a sum of functions evaluated on a number of data * records. To be used e.g. for mini-batch optimization. Template parameter can * be used for data indices or directly for data points. */ -template -class SummedGradientFunction -{ -public: +template class SummedGradientFunction { + public: /** * @brief Evaluate on single data point * @param parameters Parameter vector where the function is to be evaluated @@ -79,12 +74,12 @@ class SummedGradientFunction * @return Evaluation status */ virtual FunctionEvaluationStatus evaluate( - gsl::span parameters, - T dataset, - double& fval, - gsl::span gradient, - Logger* logger, - double* cpuTime) const = 0; + gsl::span parameters, + T dataset, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime) const = 0; /** * @brief Evaluate on vector of data points @@ -99,12 +94,12 @@ class SummedGradientFunction * @return Evaluation status */ virtual FunctionEvaluationStatus evaluate( - gsl::span parameters, - std::vector datasets, - double& fval, - gsl::span gradient, - Logger* logger, - double* cpuTime) const = 0; + gsl::span parameters, + std::vector datasets, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime) const = 0; /** * @brief Get dimension of function parameter vector @@ -117,66 +112,62 @@ class SummedGradientFunction virtual ~SummedGradientFunction() = default; }; - /** * @brief Adapter / wrapper for SummedGradientFunction to GradientFunction. * * Simply evaluates SummedGradientFunction on all datasets. */ -template +template class SummedGradientFunctionGradientFunctionAdapter - : public GradientFunction - , public SummedGradientFunction -{ -public: + : public GradientFunction, + public SummedGradientFunction { + public: /** * @brief SummedGradientFunctionGradientFunctionAdapter * @param gradFun Function to be wrapped * @param datasets Datasets on which to evaluate */ SummedGradientFunctionGradientFunctionAdapter( - std::unique_ptr> gradFun, - std::vector datasets) + std::unique_ptr> gradFun, + std::vector datasets) : grad_fun_(std::move(gradFun)) - , datasets_(datasets) - {} - - FunctionEvaluationStatus evaluate(gsl::span parameters, - double& fval, - gsl::span gradient, - Logger* logger = nullptr, - double* cpuTime = nullptr) const override - { + , datasets_(datasets) {} + + FunctionEvaluationStatus evaluate( + gsl::span parameters, + double& fval, + gsl::span gradient, + Logger* logger = nullptr, + double* cpuTime = nullptr) const override { return grad_fun_->evaluate( - parameters, datasets_, fval, gradient, logger, cpuTime); + parameters, datasets_, fval, gradient, logger, cpuTime); } - FunctionEvaluationStatus evaluate(gsl::span parameters, - T dataset, - double& fval, - gsl::span gradient, - Logger* logger, - double* cpuTime) const override - { + FunctionEvaluationStatus evaluate( + gsl::span parameters, + T dataset, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime) const override { return grad_fun_->evaluate( - parameters, dataset, fval, gradient, logger, cpuTime); + parameters, dataset, fval, gradient, logger, cpuTime); } - FunctionEvaluationStatus evaluate(gsl::span parameters, - std::vector datasets, - double& fval, - gsl::span gradient, - Logger* logger, - double* cpuTime) const override - { + FunctionEvaluationStatus evaluate( + gsl::span parameters, + std::vector datasets, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime) const override { return grad_fun_->evaluate( - parameters, datasets, fval, gradient, logger, cpuTime); + parameters, datasets, fval, gradient, logger, cpuTime); } int numParameters() const override { return grad_fun_->numParameters(); } - std::vector getParameterIds() const override - { + std::vector getParameterIds() const override { return grad_fun_->getParameterIds(); } @@ -184,12 +175,11 @@ class SummedGradientFunctionGradientFunctionAdapter * @brief Return pointer to the wrapped function (non-owning). * @return Pointer to wrapped function */ - SummedGradientFunction* getWrappedFunction() const - { + SummedGradientFunction* getWrappedFunction() const { return grad_fun_.get(); } -private: + private: /** Wrapped function */ std::unique_ptr> grad_fun_; @@ -197,5 +187,5 @@ class SummedGradientFunctionGradientFunctionAdapter std::vector datasets_; }; -} +} // namespace parpe #endif diff --git a/include/parpecommon/hdf5Misc.h b/include/parpecommon/hdf5Misc.h index 5a5d6fec9..229d178f9 100644 --- a/include/parpecommon/hdf5Misc.h +++ b/include/parpecommon/hdf5Misc.h @@ -2,53 +2,55 @@ #define HDF5_MISC_H #include +#include #include #include -#include #include -#include #include +#include namespace parpe { class HDF5Exception : public std::exception { -public: + public: explicit HDF5Exception(std::string msg = ""); - explicit HDF5Exception(const char *format, ...); + explicit HDF5Exception(char const* format, ...); - const char* what() const noexcept override; + char const* what() const noexcept override; std::string msg; std::string stackTrace; }; - using mutexHdfType = std::recursive_mutex; void initHDF5Mutex(); std::unique_lock hdf5MutexGetLock(); -#define H5_SAVE_ERROR_HANDLER \ - herr_t (*old_func)(void *); \ - void *old_client_data; \ - H5Eget_auto1(&old_func, &old_client_data); \ +#define H5_SAVE_ERROR_HANDLER \ + herr_t (*old_func)(void*); \ + void* old_client_data; \ + H5Eget_auto1(&old_func, &old_client_data); \ H5Eset_auto1(nullptr, nullptr) #define H5_RESTORE_ERROR_HANDLER H5Eset_auto1(old_func, old_client_data) herr_t -hdf5ErrorStackWalker_cb(unsigned int n, const H5E_error_t *err_desc, void *); +hdf5ErrorStackWalker_cb(unsigned int n, const H5E_error_t* err_desc, void*); -bool hdf5GroupExists(H5::H5File const& file, - const std::string &groupName); +bool hdf5GroupExists(H5::H5File const& file, std::string const& groupName); -void hdf5EnsureGroupExists(H5::H5File const& file, - const std::string &groupName); +void hdf5EnsureGroupExists( + H5::H5File const& file, + std::string const& groupName); -void hdf5CreateGroup(H5::H5File const& file, std::string const& groupPath, bool recursively = false); +void hdf5CreateGroup( + H5::H5File const& file, + std::string const& groupPath, + bool recursively = false); /** * @brief Create and open HDF5 file for writing. @@ -60,100 +62,149 @@ void hdf5CreateGroup(H5::H5File const& file, std::string const& groupPath, bool * throws HDF5Exception on failure. * @return HDF5 file handle of the created/opened file */ -H5::H5File hdf5CreateFile(std::string const& filename, - bool overwrite = false); +H5::H5File hdf5CreateFile(std::string const& filename, bool overwrite = false); H5::H5File hdf5OpenForReading(std::string const& hdf5Filename); -H5::H5File hdf5OpenForAppending(const std::string &hdf5Filename); - -void hdf5CreateExtendableDouble2DArray(H5::H5File const& file, std::string const& datasetPath, - hsize_t stride); - -void hdf5CreateExtendableInt2DArray(H5::H5File const& file, std::string const& datasetPath, - hsize_t stride); - -void hdf5CreateExtendableDouble3DArray(H5::H5File const& file, std::string const& datasetPath, - hsize_t stride1, hsize_t stride2); - -void hdf5CreateExtendableString1DArray(H5::H5File const& file, std::string const& datasetPath); - -void hdf5Extend2ndDimensionAndWriteToDouble2DArray(H5::H5File const& file, - std::string const& datasetPath, - gsl::span buffer); - -void hdf5Extend2ndDimensionAndWriteToInt2DArray(H5::H5File const& file, - std::string const& datasetPath, - gsl::span buffer); - -void hdf5ExtendAndWriteToString1DArray(H5::H5File const& file, - std::string const& datasetPath, - std::string const& buffer); - -void hdf5CreateOrExtendAndWriteToDouble2DArray(H5::H5File const& file, - std::string const& parentPath, - std::string const& datasetName, - gsl::span buffer); - -void hdf5CreateOrExtendAndWriteToInt2DArray(H5::H5File const& file, - std::string const& parentPath, - std::string const& datasetName, - gsl::span buffer); - -void hdf5CreateOrExtendAndWriteToDouble3DArray(H5::H5File const& file, - std::string const& parentPath, - std::string const& datasetName, - gsl::span buffer, - hsize_t stride1, - hsize_t stride2); -void hdf5Extend3rdDimensionAndWriteToDouble3DArray(const H5::H5File &file, - std::string const& datasetPath, - gsl::span buffer); - -void hdf5CreateOrExtendAndWriteToString1DArray(H5::H5File const& file, - std::string const& parentPath, - std::string const& datasetName, - std::string const& buffer); - -void hdf5Read2DDoubleHyperslab(H5::H5File const& file, std::string const& path, hsize_t size0, - hsize_t size1, hsize_t offset0, hsize_t offset1, - gsl::span buffer); - -void hdf5Read3DDoubleHyperslab(H5::H5File const& file, std::string const& path, hsize_t size0, - hsize_t size1, hsize_t size2, hsize_t offset0, - hsize_t offset1, hsize_t offset2, - gsl::span buffer); - -std::vector hdf5Get3DDoubleHyperslab(H5::H5File const& file, std::string const& path, hsize_t size0, - hsize_t size1, hsize_t size2, hsize_t offset0, - hsize_t offset1, hsize_t offset2); +H5::H5File hdf5OpenForAppending(std::string const& hdf5Filename); + +void hdf5CreateExtendableDouble2DArray( + H5::H5File const& file, + std::string const& datasetPath, + hsize_t stride); + +void hdf5CreateExtendableInt2DArray( + H5::H5File const& file, + std::string const& datasetPath, + hsize_t stride); + +void hdf5CreateExtendableDouble3DArray( + H5::H5File const& file, + std::string const& datasetPath, + hsize_t stride1, + hsize_t stride2); + +void hdf5CreateExtendableString1DArray( + H5::H5File const& file, + std::string const& datasetPath); + +void hdf5Extend2ndDimensionAndWriteToDouble2DArray( + H5::H5File const& file, + std::string const& datasetPath, + gsl::span buffer); + +void hdf5Extend2ndDimensionAndWriteToInt2DArray( + H5::H5File const& file, + std::string const& datasetPath, + gsl::span buffer); + +void hdf5ExtendAndWriteToString1DArray( + H5::H5File const& file, + std::string const& datasetPath, + std::string const& buffer); + +void hdf5CreateOrExtendAndWriteToDouble2DArray( + H5::H5File const& file, + std::string const& parentPath, + std::string const& datasetName, + gsl::span buffer); + +void hdf5CreateOrExtendAndWriteToInt2DArray( + H5::H5File const& file, + std::string const& parentPath, + std::string const& datasetName, + gsl::span buffer); + +void hdf5CreateOrExtendAndWriteToDouble3DArray( + H5::H5File const& file, + std::string const& parentPath, + std::string const& datasetName, + gsl::span buffer, + hsize_t stride1, + hsize_t stride2); +void hdf5Extend3rdDimensionAndWriteToDouble3DArray( + const H5::H5File& file, + std::string const& datasetPath, + gsl::span buffer); + +void hdf5CreateOrExtendAndWriteToString1DArray( + H5::H5File const& file, + std::string const& parentPath, + std::string const& datasetName, + std::string const& buffer); + +void hdf5Read2DDoubleHyperslab( + H5::H5File const& file, + std::string const& path, + hsize_t size0, + hsize_t size1, + hsize_t offset0, + hsize_t offset1, + gsl::span buffer); + +void hdf5Read3DDoubleHyperslab( + H5::H5File const& file, + std::string const& path, + hsize_t size0, + hsize_t size1, + hsize_t size2, + hsize_t offset0, + hsize_t offset1, + hsize_t offset2, + gsl::span buffer); + +std::vector hdf5Get3DDoubleHyperslab( + H5::H5File const& file, + std::string const& path, + hsize_t size0, + hsize_t size1, + hsize_t size2, + hsize_t offset0, + hsize_t offset1, + hsize_t offset2); std::vector hdf5Read1DIntegerHyperslab( - const H5::H5File &file, std::string const& path, - hsize_t count, hsize_t offset); + const H5::H5File& file, + std::string const& path, + hsize_t count, + hsize_t offset); std::vector hdf5Read2DIntegerHyperslab( - H5::H5File const& file, std::string const& path, - hsize_t size0, hsize_t size1, hsize_t offset0, hsize_t offset1); - -void hdf5GetDatasetDimensions(H5::H5File const& file, std::string const& path, - hsize_t nDimsExpected, - int *d1 = nullptr, int *d2 = nullptr, - int *d3 = nullptr, int *d4 = nullptr); - -bool hdf5AttributeExists(H5::H5File const& file, std::string const& datasetPath, - std::string const& attributeName); - -void hdf5WriteStringAttribute(H5::H5File const& file, std::string const& datasetPath, - std::string const& attributeName, - std::string const& attributeValue); - -std::vector hdf5Read1dStringDataset( - const H5::H5File &file, std::string const& datasetPath); + H5::H5File const& file, + std::string const& path, + hsize_t size0, + hsize_t size1, + hsize_t offset0, + hsize_t offset1); + +void hdf5GetDatasetDimensions( + H5::H5File const& file, + std::string const& path, + hsize_t nDimsExpected, + int* d1 = nullptr, + int* d2 = nullptr, + int* d3 = nullptr, + int* d4 = nullptr); + +bool hdf5AttributeExists( + H5::H5File const& file, + std::string const& datasetPath, + std::string const& attributeName); + +void hdf5WriteStringAttribute( + H5::H5File const& file, + std::string const& datasetPath, + std::string const& attributeName, + std::string const& attributeValue); + +std::vector +hdf5Read1dStringDataset(const H5::H5File& file, std::string const& datasetPath); void hdf5Write1dStringDataset( - const H5::H5File &file, std::string const& parentPath, - std::string const& datasetPath, std::vector const& buffer); + const H5::H5File& file, + std::string const& parentPath, + std::string const& datasetPath, + std::vector const& buffer); } // namespace parpe #endif diff --git a/include/parpecommon/logging.h b/include/parpecommon/logging.h index 8d8335533..64bbb1d6d 100644 --- a/include/parpecommon/logging.h +++ b/include/parpecommon/logging.h @@ -1,36 +1,30 @@ #ifndef LOGGING_H #define LOGGING_H -#include #include #include +#include namespace parpe { -constexpr const char ANSI_COLOR_RED[] = "\x1b[31m"; -constexpr const char ANSI_COLOR_GREEN[] = "\x1b[32m"; -constexpr const char ANSI_COLOR_YELLOW[] = "\x1b[33m"; -constexpr const char ANSI_COLOR_BLUE[] = "\x1b[34m"; -constexpr const char ANSI_COLOR_MAGENTA[] = "\x1b[35m"; -constexpr const char ANSI_COLOR_CYAN[] = "\x1b[36m"; -constexpr const char ANSI_COLOR_RESET[] = "\x1b[0m"; - -std::string printfToString(const char *fmt, va_list ap); - -enum class loglevel { - critical = 1, - error, - warning, - info, - debug -}; +constexpr char const ANSI_COLOR_RED[] = "\x1b[31m"; +constexpr char const ANSI_COLOR_GREEN[] = "\x1b[32m"; +constexpr char const ANSI_COLOR_YELLOW[] = "\x1b[33m"; +constexpr char const ANSI_COLOR_BLUE[] = "\x1b[34m"; +constexpr char const ANSI_COLOR_MAGENTA[] = "\x1b[35m"; +constexpr char const ANSI_COLOR_CYAN[] = "\x1b[36m"; +constexpr char const ANSI_COLOR_RESET[] = "\x1b[0m"; + +std::string printfToString(char const* fmt, va_list ap); + +enum class loglevel { critical = 1, error, warning, info, debug }; // Minimum log level that will be printed extern loglevel minimumLogLevel; void logmessage(loglevel lvl, std::string const& msg); -void logmessage(loglevel lvl, const char *format, ...); -void logmessage(loglevel lvl, const char *format, va_list argptr); +void logmessage(loglevel lvl, char const* format, ...); +void logmessage(loglevel lvl, char const* format, va_list argptr); /** * @brief Print process statistics from /proc/self/status @@ -43,7 +37,7 @@ void printMPIInfo(); void printDebugInfoAndWait(int seconds = 15); class Logger { -public: + public: Logger() = default; explicit Logger(std::string prefix); @@ -52,12 +46,12 @@ class Logger { // TODO add stream operator void logmessage(loglevel lvl, std::string const& msg) const; - void logmessage(loglevel lvl, const char *format, ...) const; - void logmessage(loglevel lvl, const char *format, va_list argptr) const; + void logmessage(loglevel lvl, char const* format, ...) const; + void logmessage(loglevel lvl, char const* format, va_list argptr) const; void setPrefix(std::string const& pre); std::string const& getPrefix() const; -private: + private: std::string prefix; }; diff --git a/include/parpecommon/misc.h b/include/parpecommon/misc.h index b797c2da4..7781f515f 100644 --- a/include/parpecommon/misc.h +++ b/include/parpecommon/misc.h @@ -9,10 +9,8 @@ #include -template -std::ostream& -operator<<(std::ostream& o, std::vector const& v) -{ +template +std::ostream& operator<<(std::ostream& o, std::vector const& v) { o << "[ "; for (auto const& e : v) o << e << " "; @@ -20,10 +18,8 @@ operator<<(std::ostream& o, std::vector const& v) return o; } -template -std::ostream& -operator<<(std::ostream& o, gsl::span const& v) -{ +template +std::ostream& operator<<(std::ostream& o, gsl::span const& v) { o << "[ "; for (auto const& e : v) o << e << " "; @@ -33,9 +29,8 @@ operator<<(std::ostream& o, gsl::span const& v) namespace parpe { -class WallTimer -{ -public: +class WallTimer { + public: WallTimer(); void reset(); @@ -44,14 +39,13 @@ class WallTimer double getTotal() const; -private: + private: std::chrono::time_point start; std::chrono::time_point roundStart; }; -class CpuTimer -{ -public: +class CpuTimer { + public: CpuTimer() = default; void reset(); @@ -60,7 +54,7 @@ class CpuTimer double getTotal() const; -private: + private: clock_t start = clock(); clock_t roundStart = clock(); }; @@ -69,25 +63,22 @@ class CpuTimer if (!(expr)) { \ /* NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay, \ * cppcoreguidelines-pro-type-vararg) */ \ - printf("CRITICAL: Assertion %s in %s:%d failed (%s)\n", \ - (#expr), \ - __FILE__, \ - __LINE__, \ - msg); \ + printf( \ + "CRITICAL: Assertion %s in %s:%d failed (%s)\n", \ + (#expr), \ + __FILE__, \ + __LINE__, \ + msg); \ abort(); \ } -void -strFormatCurrentLocaltime(gsl::span buffer, const char* format); +void strFormatCurrentLocaltime(gsl::span buffer, const char* format); -void -printBacktrace(int nMaxFrames = 20); +void printBacktrace(int nMaxFrames = 20); -std::string -getBacktrace(int nMaxFrames = 20); +std::string getBacktrace(int nMaxFrames = 20); -double -randDouble(double min, double max); +double randDouble(double min, double max); /** * @brief fillArrayRandomDoubleIndividualInterval Fill "buffer" with @@ -96,33 +87,28 @@ randDouble(double min, double max); * @param max * @param buffer */ -void -fillArrayRandomDoubleIndividualInterval(gsl::span min, - gsl::span max, - gsl::span buffer); - -void -fillArrayRandomDoubleSameInterval(double min, - double max, - gsl::span buffer); - -int -getMpiRank(); -int -getMpiCommSize(); -int -getMpiActive(); - -void -finalizeMpiIfNeeded(); - -template -bool -withinBounds(long int n, - T_TEST const* x, - const T_BOUNDS* min, - const T_BOUNDS* max) -{ +void fillArrayRandomDoubleIndividualInterval( + gsl::span min, + gsl::span max, + gsl::span buffer); + +void fillArrayRandomDoubleSameInterval( + double min, + double max, + gsl::span buffer); + +int getMpiRank(); +int getMpiCommSize(); +int getMpiActive(); + +void finalizeMpiIfNeeded(); + +template +bool withinBounds( + long int n, + T_TEST const* x, + const T_BOUNDS* min, + const T_BOUNDS* max) { for (int i = 0; i < n; ++i) if (x[i] < min[i]) return false; @@ -138,22 +124,18 @@ withinBounds(long int n, * @brief The Like std::unique_lock, but unlocking a mutex on construction and * locking on destruction. */ -template -class InverseUniqueLock -{ +template class InverseUniqueLock { public: explicit InverseUniqueLock(MUTEX* mutex) - : mutex(mutex) - { + : mutex(mutex) { mutex->unlock(); } InverseUniqueLock(InverseUniqueLock& other) = delete; - InverseUniqueLock& operator=(const InverseUniqueLock& other) = delete; + InverseUniqueLock& operator=(InverseUniqueLock const& other) = delete; - InverseUniqueLock(InverseUniqueLock&& other) noexcept - { + InverseUniqueLock(InverseUniqueLock&& other) noexcept { mutex = other.mutex; other.mutex = nullptr; } @@ -172,21 +154,18 @@ class InverseUniqueLock * @param b * @return */ -bool -almostEqual(double a, double b); +bool almostEqual(double a, double b); } // namespace parpe #ifndef __cpp_lib_make_unique // custom make_unique while we are still using c++11 namespace std { -template -std::unique_ptr -make_unique(Args&&... args) -{ +template +std::unique_ptr make_unique(Args&&... args) { return std::unique_ptr(new T(std::forward(args)...)); } -} +} // namespace std #endif #endif diff --git a/include/parpecommon/model.h b/include/parpecommon/model.h index 53c9ca98c..5eac3186f 100644 --- a/include/parpecommon/model.h +++ b/include/parpecommon/model.h @@ -12,94 +12,92 @@ namespace parpe { * * TODO output is currently 1-dimensional; should change */ -template -class Model { -public: +template class Model { + public: virtual ~Model() = default; - virtual void evaluate(gsl::span parameters, - std::vector const& features, - std::vector& outputs) const; - - virtual void evaluate(gsl::span parameters, - std::vector const& features, - std::vector& outputs, // here only one output per model! - std::vector>& outputGradients) const = 0; + virtual void evaluate( + gsl::span parameters, + std::vector const& features, + std::vector& outputs) const; + virtual void evaluate( + gsl::span parameters, + std::vector const& features, + std::vector& outputs, // here only one output per model! + std::vector>& outputGradients) const = 0; }; - /** - * @brief The LinearModel class represents a linear model y = Ax + b with feature matrix A and parameters [x, b]. + * @brief The LinearModel class represents a linear model y = Ax + b with + * feature matrix A and parameters [x, b]. */ -class LinearModel : public Model> -{ -public: +class LinearModel : public Model> { + public: LinearModel() = default; // From Model: using Model::evaluate; /** - * @brief Evaluate linear model with the given parameter on the given dataset + * @brief Evaluate linear model with the given parameter on the given + * dataset * @param parameters * @param features * @param outputs * @param outputGradients */ - void evaluate(gsl::span parameters, - std::vector> const& features, - std::vector& outputs, // here only one output per model! - std::vector>& outputGradients) const override; - + void evaluate( + gsl::span parameters, + std::vector> const& features, + std::vector& outputs, // here only one output per model! + std::vector>& outputGradients) const override; }; /** - * @brief The LinearModelMSE class is a wrapper around LinearModel implementing the - * mean squared error loss function. + * @brief The LinearModelMSE class is a wrapper around LinearModel implementing + * the mean squared error loss function. */ -class LinearModelMSE : public SummedGradientFunction -{ -public: - explicit LinearModelMSE(int numParameters) - :numParameters_(numParameters) {} +class LinearModelMSE : public SummedGradientFunction { + public: + explicit LinearModelMSE(int numParameters) + : numParameters_(numParameters) {} // SummedGradientFunction FunctionEvaluationStatus evaluate( - gsl::span parameters, - int dataset, - double &fval, - gsl::span gradient, - Logger *logger, - double *cpuTime) const override { - std::vector dsets {dataset}; - return evaluate(parameters, dsets , fval, gradient, logger, cpuTime); + gsl::span parameters, + int dataset, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime) const override { + std::vector dsets{dataset}; + return evaluate(parameters, dsets, fval, gradient, logger, cpuTime); } FunctionEvaluationStatus evaluate( - gsl::span parameters, - std::vector dataIndices, - double &fval, - gsl::span gradient, - Logger *logger, - double *cpuTime) const override; + gsl::span parameters, + std::vector dataIndices, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime) const override; - int numParameters() const override {return numParameters_;} + int numParameters() const override { return numParameters_; } std::vector getParameterIds() const override { std::vector ids(numParameters()); - for(int i = 0; i < static_cast(ids.size()); ++i) + for (int i = 0; i < static_cast(ids.size()); ++i) ids[i] = std::string("p") + std::to_string(i); return ids; } - int numParameters_ = 0; std::vector> datasets; std::vector labels; LinearModel lm; }; -} +} // namespace parpe #endif diff --git a/include/parpecommon/parpeException.h b/include/parpecommon/parpeException.h index 1a007642a..1655fb20d 100644 --- a/include/parpecommon/parpeException.h +++ b/include/parpecommon/parpeException.h @@ -8,13 +8,13 @@ namespace parpe { class ParPEException : public std::exception { public: - explicit ParPEException(const char *message); + explicit ParPEException(char const* message); explicit ParPEException(std::string message); ~ParPEException() throw() override = default; - const char *what() const noexcept override; + char const* what() const noexcept override; private: std::string message; diff --git a/include/parpeloadbalancer/loadBalancerMaster.h b/include/parpeloadbalancer/loadBalancerMaster.h index 7f40c2486..945587cb1 100644 --- a/include/parpeloadbalancer/loadBalancerMaster.h +++ b/include/parpeloadbalancer/loadBalancerMaster.h @@ -4,12 +4,12 @@ #include #include -#include -#include #include +#include +#include #include #include -#include +#include #ifdef PARPE_ENABLE_MPI #include @@ -21,13 +21,13 @@ namespace parpe { struct JobData { JobData() = default; - JobData(int *jobDone, - std::condition_variable *jobDoneChangedCondition, - std::mutex *jobDoneChangedMutex) - : jobDone(jobDone), - jobDoneChangedCondition(jobDoneChangedCondition), - jobDoneChangedMutex(jobDoneChangedMutex) { - } + JobData( + int* jobDone, + std::condition_variable* jobDoneChangedCondition, + std::mutex* jobDoneChangedMutex) + : jobDone(jobDone) + , jobDoneChangedCondition(jobDoneChangedCondition) + , jobDoneChangedMutex(jobDoneChangedMutex) {} /** auto-assigned (unique number up to MAX_INT) */ int jobId = -1; @@ -39,18 +39,17 @@ struct JobData { std::vector recvBuffer; /** incremented by one, once the results have been received (if set) */ - int *jobDone = nullptr; + int* jobDone = nullptr; /** is signaled after jobDone has been incremented (if set) */ - std::condition_variable *jobDoneChangedCondition = nullptr; + std::condition_variable* jobDoneChangedCondition = nullptr; /** is locked to signal jobDoneChangedCondition condition (if set) */ - std::mutex *jobDoneChangedMutex = nullptr; + std::mutex* jobDoneChangedMutex = nullptr; /** callback when job is finished (if set) */ std::function callbackJobFinished = nullptr; }; - #ifdef PARPE_ENABLE_MPI /** * @brief The LoadBalancerMaster class sends jobs to workers, receives the @@ -62,11 +61,11 @@ class LoadBalancerMaster { LoadBalancerMaster(LoadBalancerMaster& other) = delete; - LoadBalancerMaster& operator=(const LoadBalancerMaster& other) = delete; + LoadBalancerMaster& operator=(LoadBalancerMaster const& other) = delete; - LoadBalancerMaster(LoadBalancerMaster &&other) noexcept = delete; + LoadBalancerMaster(LoadBalancerMaster&& other) noexcept = delete; - LoadBalancerMaster const & operator=(LoadBalancerMaster &&fp) = delete; + LoadBalancerMaster const& operator=(LoadBalancerMaster&& fp) = delete; ~LoadBalancerMaster(); @@ -84,7 +83,7 @@ class LoadBalancerMaster { * @brief Assign job ID and append to queue for sending to workers. * @param data Data to be sent (user keeps ownership). */ - void queueJob(JobData *data); + void queueJob(JobData* data); /** * @brief Stop the loadbalancer thread @@ -141,7 +140,7 @@ class LoadBalancerMaster { * @brief Pop oldest element from the queue and return. * @return The first queue element. */ - JobData *getNextJob(); + JobData* getNextJob(); /** * @brief Send the given work package to the given worker and track @@ -149,7 +148,7 @@ class LoadBalancerMaster { * @param workerIdx Index (not rank) * @param data Job data to send */ - void sendToWorker(int workerIdx, JobData *data); + void sendToWorker(int workerIdx, JobData* data); /** * @brief Handle the result message from a worker as indicated by mpiStatus. @@ -157,7 +156,7 @@ class LoadBalancerMaster { * @param mpiStatus Receive the indicated message, mark job as done, * signal reception. */ - int handleReply(MPI_Status *mpiStatus); + int handleReply(MPI_Status* mpiStatus); /** * @brief Check if jobs are waiting in queue and send to specified worker. @@ -182,7 +181,7 @@ class LoadBalancerMaster { int numWorkers = 0; /** Queue with jobs to be sent to workers */ - std::queue queue; + std::queue queue; /** Last assigned job ID used as MPI message tag */ int lastJobId = 0; @@ -201,7 +200,7 @@ class LoadBalancerMaster { /** Jobs that have been sent to workers. Required for handling replies and * signaling the client that processing has completed. */ - std::vector sentJobsData; + std::vector sentJobsData; /** Mutex to protect access to `queue`. */ mutable std::mutex mutexQueue; @@ -218,7 +217,6 @@ class LoadBalancerMaster { /** Signals whether the queue thread should keep running */ std::atomic_bool queue_thread_continue_ = true; - /** Value to indicate that there is currently no known free worker. */ constexpr static int NO_FREE_WORKER = -1; }; diff --git a/include/parpeloadbalancer/loadBalancerWorker.h b/include/parpeloadbalancer/loadBalancerWorker.h index e60cd972a..022582aed 100644 --- a/include/parpeloadbalancer/loadBalancerWorker.h +++ b/include/parpeloadbalancer/loadBalancerWorker.h @@ -3,8 +3,8 @@ #include -#include #include +#include constexpr int MPI_TAG_EXIT_SIGNAL = 0; @@ -21,16 +21,17 @@ class LoadBalancerWorker { * @param jobId A message identifier, unique over the range of MAX_INT * messages. */ - using messageHandlerFunc = std::function &buffer, int jobId)>; + using messageHandlerFunc = + std::function& buffer, int jobId)>; - void run(const messageHandlerFunc &messageHandler); + void run(messageHandlerFunc const& messageHandler); private: /** * @brief Probe for and dispatch the next incoming job * @return `true` if the termination signal was received, `false` otherwise. */ - bool waitForAndHandleJobs(const messageHandlerFunc& messageHandler); + bool waitForAndHandleJobs(messageHandlerFunc const& messageHandler); }; } // namespace parpe diff --git a/include/parpeoptimization/localOptimizationCeres.h b/include/parpeoptimization/localOptimizationCeres.h index 2e841dac7..09f33843a 100644 --- a/include/parpeoptimization/localOptimizationCeres.h +++ b/include/parpeoptimization/localOptimizationCeres.h @@ -3,7 +3,6 @@ #include - namespace parpe { class OptimizationProblem; @@ -18,8 +17,8 @@ class OptimizerCeres : public Optimizer { * @param problem the optimization problem * @return Returns 0 on success. */ - std::tuple > - optimize(OptimizationProblem *problem) override; + std::tuple> + optimize(OptimizationProblem* problem) override; }; } // namespace parpe diff --git a/include/parpeoptimization/localOptimizationDlib.h b/include/parpeoptimization/localOptimizationDlib.h index 4e05e5d61..085b22e11 100644 --- a/include/parpeoptimization/localOptimizationDlib.h +++ b/include/parpeoptimization/localOptimizationDlib.h @@ -20,8 +20,8 @@ class OptimizerDlibLineSearch : public Optimizer { * @return Returns 0 on success. */ - std::tuple > - optimize(OptimizationProblem *problem) override; + std::tuple> + optimize(OptimizationProblem* problem) override; }; } // namespace parpe diff --git a/include/parpeoptimization/localOptimizationFides.h b/include/parpeoptimization/localOptimizationFides.h index d78fadb86..c54de32be 100644 --- a/include/parpeoptimization/localOptimizationFides.h +++ b/include/parpeoptimization/localOptimizationFides.h @@ -6,8 +6,7 @@ namespace parpe { -class OptimizerFides : public Optimizer -{ +class OptimizerFides : public Optimizer { public: OptimizerFides() = default; @@ -19,8 +18,8 @@ class OptimizerFides : public Optimizer * @return . */ - std::tuple> optimize( - OptimizationProblem* problem) override; + std::tuple> + optimize(OptimizationProblem* problem) override; }; } // namespace parpe diff --git a/include/parpeoptimization/localOptimizationFsqp.h b/include/parpeoptimization/localOptimizationFsqp.h index 2d95de733..a6c096555 100644 --- a/include/parpeoptimization/localOptimizationFsqp.h +++ b/include/parpeoptimization/localOptimizationFsqp.h @@ -11,18 +11,18 @@ namespace parpe { /** * @brief Interface to the FSQP solver. (Tested with FFSQP Version 3.7b) * - * This solver is not included in the parPE repository. A license must be obtained separately. + * This solver is not included in the parPE repository. A license must be + * obtained separately. */ class OptimizerFsqp : public Optimizer { -public: + public: OptimizerFsqp() = default; - std::tuple > - optimize(parpe::OptimizationProblem *problem) override; + std::tuple> + optimize(parpe::OptimizationProblem* problem) override; }; - } // namespace parpe #endif diff --git a/include/parpeoptimization/localOptimizationIpopt.h b/include/parpeoptimization/localOptimizationIpopt.h index 847948358..0a946db92 100644 --- a/include/parpeoptimization/localOptimizationIpopt.h +++ b/include/parpeoptimization/localOptimizationIpopt.h @@ -16,7 +16,8 @@ class OptimizerIpOpt : public Optimizer { * @return Returns 0 on success. */ - std::tuple > optimize(OptimizationProblem *problem) override; + std::tuple> + optimize(OptimizationProblem* problem) override; }; } // namespace parpe diff --git a/include/parpeoptimization/localOptimizationIpoptTNLP.h b/include/parpeoptimization/localOptimizationIpoptTNLP.h index 3ef15f199..e318319d7 100644 --- a/include/parpeoptimization/localOptimizationIpoptTNLP.h +++ b/include/parpeoptimization/localOptimizationIpoptTNLP.h @@ -24,49 +24,62 @@ using mutexIpOptType = std::recursive_mutex; * @brief ipoptMutex Ipopt seems not to be thread safe. Lock this mutex every * time that control is passed to ipopt functions. */ -static mutexIpOptType mutexIpOpt {}; - +static mutexIpOptType mutexIpOpt{}; InverseUniqueLock ipOptReleaseLock(); std::unique_lock ipOptGetLock(); - class OptimizationProblem; class OptimizationReporter; class LocalOptimizationIpoptTNLP : public Ipopt::TNLP { public: - - LocalOptimizationIpoptTNLP(OptimizationProblem &problem, OptimizationReporter &reporter); + LocalOptimizationIpoptTNLP( + OptimizationProblem& problem, + OptimizationReporter& reporter); ~LocalOptimizationIpoptTNLP() override = default; - bool get_nlp_info(Ipopt::Index &n, Ipopt::Index &m, - Ipopt::Index &nnz_jac_g, - Ipopt::Index &nnz_h_lag, - IndexStyleEnum &index_style) override; - - bool get_bounds_info(Ipopt::Index n, Ipopt::Number *x_l, - Ipopt::Number *x_u, Ipopt::Index m, - Ipopt::Number *g_l, Ipopt::Number *g_u) override; - - bool get_starting_point(Ipopt::Index n, bool init_x, - Ipopt::Number *x, - bool init_z, Ipopt::Number *z_L, - Ipopt::Number *z_U, - Ipopt::Index m, bool init_lambda, - Ipopt::Number *lambda) override; - - bool eval_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, - Ipopt::Number &obj_value) override; + bool get_nlp_info( + Ipopt::Index& n, + Ipopt::Index& m, + Ipopt::Index& nnz_jac_g, + Ipopt::Index& nnz_h_lag, + IndexStyleEnum& index_style) override; + + bool get_bounds_info( + Ipopt::Index n, + Ipopt::Number* x_l, + Ipopt::Number* x_u, + Ipopt::Index m, + Ipopt::Number* g_l, + Ipopt::Number* g_u) override; + + bool get_starting_point( + Ipopt::Index n, + bool init_x, + Ipopt::Number* x, + bool init_z, + Ipopt::Number* z_L, + Ipopt::Number* z_U, + Ipopt::Index m, + bool init_lambda, + Ipopt::Number* lambda) override; + + bool eval_f( + Ipopt::Index n, + Ipopt::Number const* x, + bool new_x, + Ipopt::Number& obj_value) override; /** * @brief See Ipopt::TNLP::eval_grad_f. * - * Note: Failure in eval_f (i.e. returning non-finite value or false) will make IpOpt try a new - * step, unless this happens at the starting point. However, if eval_f succeeds, but eval_grad_f - * fails, then IpOpt will terminate. + * Note: Failure in eval_f (i.e. returning non-finite value or false) will + * make IpOpt try a new step, unless this happens at the starting point. + * However, if eval_f succeeds, but eval_grad_f fails, then IpOpt will + * terminate. * * @param n * @param x @@ -74,46 +87,68 @@ class LocalOptimizationIpoptTNLP : public Ipopt::TNLP { * @param grad_f * @return */ - bool eval_grad_f(Ipopt::Index n, const Ipopt::Number *x, bool new_x, - Ipopt::Number *grad_f) override; - - bool eval_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, - Ipopt::Index m, - Ipopt::Number *g) override; - - bool eval_jac_g(Ipopt::Index n, const Ipopt::Number *x, bool new_x, - Ipopt::Index m, - Ipopt::Index nele_jac, Ipopt::Index *iRow, - Ipopt::Index *jCol, - Ipopt::Number *values) override; + bool eval_grad_f( + Ipopt::Index n, + Ipopt::Number const* x, + bool new_x, + Ipopt::Number* grad_f) override; + + bool eval_g( + Ipopt::Index n, + Ipopt::Number const* x, + bool new_x, + Ipopt::Index m, + Ipopt::Number* g) override; + + bool eval_jac_g( + Ipopt::Index n, + Ipopt::Number const* x, + bool new_x, + Ipopt::Index m, + Ipopt::Index nele_jac, + Ipopt::Index* iRow, + Ipopt::Index* jCol, + Ipopt::Number* values) override; bool intermediate_callback( - Ipopt::AlgorithmMode mode, Ipopt::Index iter, Ipopt::Number obj_value, - Ipopt::Number inf_pr, Ipopt::Number inf_du, Ipopt::Number mu, - Ipopt::Number d_norm, Ipopt::Number regularization_size, - Ipopt::Number alpha_du, Ipopt::Number alpha_pr, Ipopt::Index ls_trials, - const Ipopt::IpoptData *ip_data, Ipopt::IpoptCalculatedQuantities *ip_cq) override; - - void finalize_solution(Ipopt::SolverReturn status, Ipopt::Index n, - const Ipopt::Number *x, - const Ipopt::Number *z_L, - const Ipopt::Number *z_U, Ipopt::Index m, - const Ipopt::Number *g, - const Ipopt::Number *lambda, - Ipopt::Number obj_value, - const Ipopt::IpoptData *ip_data, - Ipopt::IpoptCalculatedQuantities *ip_cq) override; -private: - OptimizationProblem &problem; - OptimizationReporter &reporter; + Ipopt::AlgorithmMode mode, + Ipopt::Index iter, + Ipopt::Number obj_value, + Ipopt::Number inf_pr, + Ipopt::Number inf_du, + Ipopt::Number mu, + Ipopt::Number d_norm, + Ipopt::Number regularization_size, + Ipopt::Number alpha_du, + Ipopt::Number alpha_pr, + Ipopt::Index ls_trials, + Ipopt::IpoptData const* ip_data, + Ipopt::IpoptCalculatedQuantities* ip_cq) override; + + void finalize_solution( + Ipopt::SolverReturn status, + Ipopt::Index n, + Ipopt::Number const* x, + Ipopt::Number const* z_L, + Ipopt::Number const* z_U, + Ipopt::Index m, + Ipopt::Number const* g, + Ipopt::Number const* lambda, + Ipopt::Number obj_value, + Ipopt::IpoptData const* ip_data, + Ipopt::IpoptCalculatedQuantities* ip_cq) override; + + private: + OptimizationProblem& problem; + OptimizationReporter& reporter; // need to store initial parameters, because IpOpt asks twice std::vector initialParameters; - }; -void setIpOptOption(const std::pair &pair, - Ipopt::SmartPtr* o); +void setIpOptOption( + std::pair const& pair, + Ipopt::SmartPtr* o); } // namespace parpe diff --git a/include/parpeoptimization/localOptimizationToms611.h b/include/parpeoptimization/localOptimizationToms611.h index 6db73855c..5fa5cb3ba 100644 --- a/include/parpeoptimization/localOptimizationToms611.h +++ b/include/parpeoptimization/localOptimizationToms611.h @@ -22,7 +22,8 @@ class OptimizerToms611TrustRegionSumsl : public Optimizer { * @return Returns 0 on success. */ - virtual std::tuple > optimize(OptimizationProblem *problem) override; + virtual std::tuple> + optimize(OptimizationProblem* problem) override; }; } // namespace parpe diff --git a/include/parpeoptimization/minibatchOptimization.h b/include/parpeoptimization/minibatchOptimization.h old mode 100755 new mode 100644 index c7bee47a3..37bb51943 --- a/include/parpeoptimization/minibatchOptimization.h +++ b/include/parpeoptimization/minibatchOptimization.h @@ -1,65 +1,63 @@ #ifndef PARPE_OPTIMIZATION_MINIBATCH_OPTIMIZATION_H #define PARPE_OPTIMIZATION_MINIBATCH_OPTIMIZATION_H +#include +#include #include #include #include -#include -#include -#include -#include -#include #include -#include -#include +#include #include +#include #include +#include +#include +#include #include - namespace parpe { /** * @brief Return status for mini-batch optimizer */ enum class minibatchExitStatus { - gradientNormConvergence, maxEpochsExceeded, invalidNumber + gradientNormConvergence, + maxEpochsExceeded, + invalidNumber }; /** * @brief Shape of learning rate interpolation */ -enum class learningRateInterp { - linear, inverseLinear, logarithmic -}; +enum class learningRateInterp { linear, inverseLinear, logarithmic }; /** * @brief Reaction upon ODE solver crashes */ -enum class interceptType { - none, reduceStep, reduceStepAndRestart -}; +enum class interceptType { none, reduceStep, reduceStepAndRestart }; /** * @brief Problem definition for mini-batch optimization. * - * This class provides cost function and training data for a mini-batch optimizer. - * Data maybe be the actual data or just an index list referencing the data, the - * cost function will operate on. + * This class provides cost function and training data for a mini-batch + * optimizer. Data maybe be the actual data or just an index list referencing + * the data, the cost function will operate on. */ -template -class MinibatchOptimizationProblem: public OptimizationProblem { -public: +template +class MinibatchOptimizationProblem : public OptimizationProblem { + public: MinibatchOptimizationProblem() = default; - MinibatchOptimizationProblem(std::unique_ptr> costFun, - std::unique_ptr logger) : - OptimizationProblem(costFun, logger) { - } + MinibatchOptimizationProblem( + std::unique_ptr> costFun, + std::unique_ptr logger) + : OptimizationProblem(costFun, logger) {} - MinibatchOptimizationProblem(MinibatchOptimizationProblem const& other) = delete; + MinibatchOptimizationProblem(MinibatchOptimizationProblem const& other) = + delete; ~MinibatchOptimizationProblem() override = default; @@ -68,7 +66,8 @@ class MinibatchOptimizationProblem: public OptimizationProblem { /** mini batch cost function */ SummedGradientFunction* getGradientFunction() const { - auto summedGradientFunction = dynamic_cast*>(cost_fun_.get()); + auto summedGradientFunction = + dynamic_cast*>(cost_fun_.get()); Ensures(summedGradientFunction != nullptr); return summedGradientFunction; } @@ -77,21 +76,24 @@ class MinibatchOptimizationProblem: public OptimizationProblem { /** * @brief learning rate updaters for mini-batch optimizers * - * The LearningRateUpdater provides the possibility to reduce the learning rate per epoch - * and makes it possible to adapt the learning rate according to success or failure of - * the ODE solver. + * The LearningRateUpdater provides the possibility to reduce the learning rate + * per epoch and makes it possible to adapt the learning rate according to + * success or failure of the ODE solver. */ class LearningRateUpdater { -public: + public: /** * @brief Update the learning rate * @param maxEpochs Maximum number of epochs in optimization - * @param LearningRateadaptionMode Type of interpolation between startLearningRate and endLearningRate + * @param LearningRateadaptionMode Type of interpolation between + * startLearningRate and endLearningRate */ - LearningRateUpdater(int maxEpochs, - learningRateInterp learningRateInterpMode); + LearningRateUpdater( + int maxEpochs, + learningRateInterp learningRateInterpMode); - /** Update function, to be called in every epoch or optimization iteration */ + /** Update function, to be called in every epoch or optimization iteration + */ void updateLearningRate(int iteration); /** Update function, to be called if parameter update did not work well */ @@ -115,14 +117,16 @@ class LearningRateUpdater { /** Function to set end learning rate */ void setEndLearningRate(double learningRate); -private: - /** Maximum number of epochs, will be set after creation of problem instance */ + private: + /** Maximum number of epochs, will be set after creation of problem instance + */ int maxEpochs = 0; /** Learning rate, i.e. step size, at the moment of optimization */ double currentLearningRate = 0.0; - /** If an optimization step is not successful, the learning rate, i.e., step size, will be reduced by this factor */ + /** If an optimization step is not successful, the learning rate, i.e., step + * size, will be reduced by this factor */ double reductionFactor = 4.0; /** Learning rate, i.e. step size, at the beginning of optimization */ @@ -131,7 +135,8 @@ class LearningRateUpdater { /** Learning rate, i.e. step size, at the end of optimization */ double endLearningRate = 0.001; - /** Mode of interpolation between the beginning and the end of optimization */ + /** Mode of interpolation between the beginning and the end of optimization + */ learningRateInterp learningRateInterpMode = learningRateInterp::linear; }; @@ -139,7 +144,7 @@ class LearningRateUpdater { * @brief Interface for parameter updaters for mini-batch optimizers */ class ParameterUpdater { -public: + public: /** * @brief Update parameter vector * @param learningRate Current learning rate, i.e., step-size @@ -147,40 +152,44 @@ class ParameterUpdater { * @param gradient Cost function gradient at parameters * @param parameters In: Current parameters, Out: Updated parameters */ - virtual void updateParameters(double learningRate, - int iteration, - gsl::span gradient, - gsl::span parameters, - gsl::span lowerBounds = gsl::span(), - gsl::span upperBounds = gsl::span()) = 0; - - /** If ODE becomes non-integrable, the last step must be undone using this method */ + virtual void updateParameters( + double learningRate, + int iteration, + gsl::span gradient, + gsl::span parameters, + gsl::span lowerBounds = gsl::span(), + gsl::span upperBounds = gsl::span()) = 0; + + /** If ODE becomes non-integrable, the last step must be undone using this + * method */ virtual void undoLastStep() = 0; - /** If the ODE is repeatedly non-integrable, a cold restart is performed using this method */ + /** If the ODE is repeatedly non-integrable, a cold restart is performed + * using this method */ virtual void clearCache() = 0; /** Initialize the parameter updater */ virtual void initialize(unsigned int numParameters) = 0; virtual ~ParameterUpdater() = default; - }; /** * @brief Mini-batch optimizer: Vanilla SGD Updater * The simplest mini batch algorithm. */ -class ParameterUpdaterVanilla: public ParameterUpdater { -public: +class ParameterUpdaterVanilla : public ParameterUpdater { + public: ParameterUpdaterVanilla() = default; - void updateParameters(double learningRate, - int iteration, - gsl::span gradient, - gsl::span parameters, - gsl::span lowerBounds = gsl::span(), - gsl::span upperBounds = gsl::span()) override; + void updateParameters( + double learningRate, + int iteration, + gsl::span gradient, + gsl::span parameters, + gsl::span lowerBounds = gsl::span(), + gsl::span upperBounds = + gsl::span()) override; void undoLastStep() override; @@ -193,16 +202,18 @@ class ParameterUpdaterVanilla: public ParameterUpdater { * @brief Mini-batch optimizer: RMSProp Updater * A so-called adaptive mini batching algorithm without momentum */ -class ParameterUpdaterRmsProp: public ParameterUpdater { -public: +class ParameterUpdaterRmsProp : public ParameterUpdater { + public: ParameterUpdaterRmsProp() = default; - void updateParameters(double learningRate, - int iteration, - gsl::span gradient, - gsl::span parameters, - gsl::span lowerBounds = gsl::span(), - gsl::span upperBounds = gsl::span()) override; + void updateParameters( + double learningRate, + int iteration, + gsl::span gradient, + gsl::span parameters, + gsl::span lowerBounds = gsl::span(), + gsl::span upperBounds = + gsl::span()) override; void undoLastStep() override; @@ -210,9 +221,9 @@ class ParameterUpdaterRmsProp: public ParameterUpdater { void initialize(unsigned int numParameters) override; -private: - - /** Rate for memorizing gradient norms (between 0 and 1, high rates mean long memory) */ + private: + /** Rate for memorizing gradient norms (between 0 and 1, high rates mean + * long memory) */ double decayRate = 0.9; /** Stabilization factor for gradient normalization (avoid dividing by 0) */ @@ -221,7 +232,8 @@ class ParameterUpdaterRmsProp: public ParameterUpdater { /** Memorized gradient norms (decaying average) from last steps */ std::vector gradientNormCache; - /** Memorized gradient norms (decaying average), one step back (if one step must be undone) */ + /** Memorized gradient norms (decaying average), one step back (if one step + * must be undone) */ std::vector oldGradientNormCache; }; @@ -229,16 +241,18 @@ class ParameterUpdaterRmsProp: public ParameterUpdater { * @brief Mini-batch optimizer: Momentum Updater * A classical gradient based optimizer using a vanilla momentum formula */ -class ParameterUpdaterMomentum: public ParameterUpdater { -public: +class ParameterUpdaterMomentum : public ParameterUpdater { + public: ParameterUpdaterMomentum() = default; - void updateParameters(double learningRate, - int iteration, - gsl::span gradient, - gsl::span parameters, - gsl::span lowerBounds = gsl::span(), - gsl::span upperBounds = gsl::span()) override; + void updateParameters( + double learningRate, + int iteration, + gsl::span gradient, + gsl::span parameters, + gsl::span lowerBounds = gsl::span(), + gsl::span upperBounds = + gsl::span()) override; void undoLastStep() override; @@ -246,32 +260,35 @@ class ParameterUpdaterMomentum: public ParameterUpdater { void initialize(unsigned int numParameters) override; -private: - - /** Rate for memorizing gradient norms (between 0 and 1, high rates mean long memory) */ + private: + /** Rate for memorizing gradient norms (between 0 and 1, high rates mean + * long memory) */ double decayRate = 0.8; /** Accumulated momentum (decaying average) from last steps */ std::vector momentum; - /** Accumulated momentum (decaying average), one step back (if one step must be undone) */ + /** Accumulated momentum (decaying average), one step back (if one step must + * be undone) */ std::vector oldMomentum; }; - /** +/** * @brief Mini-batch optimizer: Adam Updater * A momentum-based and so-called adaptive mini batching algorithm */ -class ParameterUpdaterAdam: public ParameterUpdater { -public: +class ParameterUpdaterAdam : public ParameterUpdater { + public: ParameterUpdaterAdam() = default; - void updateParameters(double learningRate, - int iteration, - gsl::span gradient, - gsl::span parameters, - gsl::span lowerBounds = gsl::span(), - gsl::span upperBounds = gsl::span()) override; + void updateParameters( + double learningRate, + int iteration, + gsl::span gradient, + gsl::span parameters, + gsl::span lowerBounds = gsl::span(), + gsl::span upperBounds = + gsl::span()) override; void undoLastStep() override; @@ -279,12 +296,13 @@ class ParameterUpdaterAdam: public ParameterUpdater { void initialize(unsigned int numParameters) override; -private: - - /** Rate for memorizing gradients (between 0 and 1, high rates mean long memory) */ + private: + /** Rate for memorizing gradients (between 0 and 1, high rates mean long + * memory) */ double decayRateGradient = 0.9; - /** Rate for memorizing gradient norms (between 0 and 1, high rates mean long memory) */ + /** Rate for memorizing gradient norms (between 0 and 1, high rates mean + * long memory) */ double decayRateGradientNorm = 0.9; /** Stabilization factor for gradient normalization (avoid dividing by 0) */ @@ -293,13 +311,15 @@ class ParameterUpdaterAdam: public ParameterUpdater { /** Memorized gradient norms (decaying average) from last steps */ std::vector gradientNormCache; - /** Memorized gradient norms (decaying average), one step back (if one step must be undone) */ + /** Memorized gradient norms (decaying average), one step back (if one step + * must be undone) */ std::vector oldGradientNormCache; /** Memorized gradients (decaying average) from last steps */ std::vector gradientCache; - /** Memorized gradients (decaying average), one step back (if one step must be undone) */ + /** Memorized gradients (decaying average), one step back (if one step must + * be undone) */ std::vector oldGradientCache; }; @@ -308,16 +328,18 @@ class ParameterUpdaterAdam: public ParameterUpdater { * A momentum-based and so-called adaptive mini batching algorithm, * using the original settings from the literature */ -class ParameterUpdaterAdamClassic: public ParameterUpdater { -public: +class ParameterUpdaterAdamClassic : public ParameterUpdater { + public: ParameterUpdaterAdamClassic() = default; - void updateParameters(double learningRate, - int iteration, - gsl::span gradient, - gsl::span parameters, - gsl::span lowerBounds = gsl::span(), - gsl::span upperBounds = gsl::span()) override; + void updateParameters( + double learningRate, + int iteration, + gsl::span gradient, + gsl::span parameters, + gsl::span lowerBounds = gsl::span(), + gsl::span upperBounds = + gsl::span()) override; void undoLastStep() override; @@ -325,12 +347,13 @@ class ParameterUpdaterAdamClassic: public ParameterUpdater { void initialize(unsigned int numParameters) override; -private: - - /** Rate for memorizing gradients (between 0 and 1, high rates mean long memory) */ + private: + /** Rate for memorizing gradients (between 0 and 1, high rates mean long + * memory) */ double decayRateGradient = 0.9; - /** Rate for memorizing gradient norms (between 0 and 1, high rates mean long memory) */ + /** Rate for memorizing gradient norms (between 0 and 1, high rates mean + * long memory) */ double decayRateGradientNorm = 0.999; /** Stabilization factor for gradient normalization (avoid dividing by 0) */ @@ -339,13 +362,15 @@ class ParameterUpdaterAdamClassic: public ParameterUpdater { /** Memorized gradient norms (decaying average) from last steps */ std::vector gradientNormCache; - /** Memorized gradient norms (decaying average), one step back (if one step must be undone) */ + /** Memorized gradient norms (decaying average), one step back (if one step + * must be undone) */ std::vector oldGradientNormCache; /** Memorized gradients (decaying average) from last steps */ std::vector gradientCache; - /** Memorized gradients (decaying average), one step back (if one step must be undone) */ + /** Memorized gradients (decaying average), one step back (if one step must + * be undone) */ std::vector oldGradientCache; }; @@ -358,14 +383,15 @@ class ParameterUpdaterAdamClassic: public ParameterUpdater { * @param batchSize Number of elements in each batch * @return Vector batches of data elements */ -template -std::vector> getBatches(gsl::span data, - int batchSize) { +template +std::vector> getBatches(gsl::span data, int batchSize) { int numBatches = ceil(static_cast(data.size()) / batchSize); - std::vector < std::vector < T >> batches(numBatches, std::vector()); + std::vector> batches(numBatches, std::vector()); int batchIdx = -1; - for (int i = 0; static_cast(i) < data.size(); ++i) { + for (int i = 0; + static_cast(i) < data.size(); + ++i) { if (i % batchSize == 0) { ++batchIdx; int remaining = data.size() - i; @@ -382,7 +408,7 @@ std::vector> getBatches(gsl::span data, * @param v * @return the norm */ -double getVectorNorm(gsl::span v); +double getVectorNorm(gsl::span v); /** * @brief Get difference of two vectors (v - w). @@ -390,12 +416,11 @@ double getVectorNorm(gsl::span v); * @param w * @return the norm */ -std::vector getVectorDifference(gsl::span v, - gsl::span w); +std::vector +getVectorDifference(gsl::span v, gsl::span w); -template -class MinibatchOptimizer { -public: +template class MinibatchOptimizer { + public: /** * @brief Minimize the given function using mini-batch gradient descent. * @@ -406,18 +431,20 @@ class MinibatchOptimizer { * @param logger Logger instance for status messages * @return Tuple (exit code, final cost, final parameters) */ - std::tuple > optimize(SummedGradientFunction const& f, - gsl::span data, - gsl::span initialParameters, - gsl::span lowerParameterBounds, - gsl::span upperParameterBounds, - OptimizationReporter *reporter, - Logger *logger_) { - Expects((unsigned) f.numParameters() == initialParameters.size()); + std::tuple> optimize( + SummedGradientFunction const& f, + gsl::span data, + gsl::span initialParameters, + gsl::span lowerParameterBounds, + gsl::span upperParameterBounds, + OptimizationReporter* reporter, + Logger* logger_) { + Expects((unsigned)f.numParameters() == initialParameters.size()); Logger logger = logger_ ? *logger_ : Logger(); // We don't change the user inputs but work with copies - std::vector parameters(initialParameters.begin(), initialParameters.end()); + std::vector parameters( + initialParameters.begin(), initialParameters.end()); std::vector oldParameters(parameters.size(), NAN); std::vector shuffledData(data.begin(), data.end()); @@ -433,12 +460,14 @@ class MinibatchOptimizer { if (reporter) { reporter->starting(initialParameters); - reporter->result_writer_->setLoggingEachFunctionEvaluation(false, true); + reporter->result_writer_->setLoggingEachFunctionEvaluation( + false, true); reporter->result_writer_->setLoggingEachIteration(false); } for (int epoch = 0; epoch < maxEpochs; ++epoch) { - auto epochLogger = logger.getChild(std::string("e") + std::to_string(epoch)); + auto epochLogger = + logger.getChild(std::string("e") + std::to_string(epoch)); // Create randomized batches std::shuffle(shuffledData.begin(), shuffledData.end(), rng); @@ -447,44 +476,86 @@ class MinibatchOptimizer { // Update learning rate according to epoch learningRateUpdater->updateLearningRate(epoch); - for (int batchIdx = 0; (unsigned) batchIdx < batches.size(); ++batchIdx) { - auto batchLogger = epochLogger->getChild(std::string("b") + std::to_string(batchIdx)); + for (int batchIdx = 0; (unsigned)batchIdx < batches.size(); + ++batchIdx) { + auto batchLogger = epochLogger->getChild( + std::string("b") + std::to_string(batchIdx)); iteration++; - auto status = evaluate(f, parameters, batches[batchIdx], cost, gradient, batchLogger.get(), reporter); + auto status = evaluate( + f, + parameters, + batches[batchIdx], + cost, + gradient, + batchLogger.get(), + reporter); // Give some output learningRate = learningRateUpdater->getCurrentLearningRate(); std::stringstream ss; - ss << ": Cost: " << cost << " |g|2: " << getVectorNorm(gradient) << " Batch: " << batches[batchIdx] - << " LearningRate: " << learningRate << std::endl; + ss << ": Cost: " << cost << " |g|2: " << getVectorNorm(gradient) + << " Batch: " << batches[batchIdx] + << " LearningRate: " << learningRate << std::endl; batchLogger->logmessage(loglevel::debug, ss.str().c_str()); if (status == functionEvaluationFailure) { - // Check, if the interceptor should be used (should always be the case, except for study purpose... + // Check, if the interceptor should be used (should always + // be the case, except for study purpose... if (interceptor > interceptType::none) - status = rescueInterceptor(parameters, oldParameters, gradient, oldGradient, - lowerParameterBounds, upperParameterBounds, cost, subsequentFails, - iteration, f, batches[batchIdx], batchLogger.get(), reporter); + status = rescueInterceptor( + parameters, + oldParameters, + gradient, + oldGradient, + lowerParameterBounds, + upperParameterBounds, + cost, + subsequentFails, + iteration, + f, + batches[batchIdx], + batchLogger.get(), + reporter); // If we still have a failure, stop optimization if (status == functionEvaluationFailure) - return finish(cost, parameters, minibatchExitStatus::invalidNumber, reporter, batchLogger.get()); + return finish( + cost, + parameters, + minibatchExitStatus::invalidNumber, + reporter, + batchLogger.get()); } else { - // Cost function evaluation was successful, so we can increase the step size + // Cost function evaluation was successful, so we can + // increase the step size subsequentFails = std::max(subsequentFails - 1, 0); learningRateUpdater->increaseLearningRate(); - // Overwrite old parameters and old gradient, since they won't be needed any more - std::copy(gradient.begin(), gradient.end(), oldGradient.begin()); - std::copy(parameters.begin(), parameters.end(), oldParameters.begin()); + // Overwrite old parameters and old gradient, since they + // won't be needed any more + std::copy( + gradient.begin(), gradient.end(), oldGradient.begin()); + std::copy( + parameters.begin(), + parameters.end(), + oldParameters.begin()); } /* Update parameters after successful gradient evaluation */ - handleStep(parameters, oldParameters, gradient, lowerParameterBounds, upperParameterBounds, - cost, iteration, f, batches[batchIdx], batchLogger.get(), reporter); - + handleStep( + parameters, + oldParameters, + gradient, + lowerParameterBounds, + upperParameterBounds, + cost, + iteration, + f, + batches[batchIdx], + batchLogger.get(), + reporter); } // epoch finished, write the values in HDF5-file @@ -493,39 +564,58 @@ class MinibatchOptimizer { if (getVectorNorm(gradient) <= gradientNormThreshold) { // evaluate on full data set - auto dataSpan = std::vector < BATCH_ELEMENT > (data.cbegin(), data.cend()); - evaluate(f, parameters, dataSpan, cost, gradient, epochLogger.get(), reporter); - return finish(cost, parameters, minibatchExitStatus::gradientNormConvergence, reporter, - epochLogger.get()); + auto dataSpan = + std::vector(data.cbegin(), data.cend()); + evaluate( + f, + parameters, + dataSpan, + cost, + gradient, + epochLogger.get(), + reporter); + return finish( + cost, + parameters, + minibatchExitStatus::gradientNormConvergence, + reporter, + epochLogger.get()); } } // evaluate on full data set - auto dataSpan = std::vector < BATCH_ELEMENT > (data.cbegin(), data.cend()); + auto dataSpan = std::vector(data.cbegin(), data.cend()); evaluate(f, parameters, dataSpan, cost, gradient, &logger, reporter); - return finish(cost, parameters, minibatchExitStatus::maxEpochsExceeded, reporter, &logger); + return finish( + cost, + parameters, + minibatchExitStatus::maxEpochsExceeded, + reporter, + &logger); } - FunctionEvaluationStatus evaluate(SummedGradientFunction const& f, - gsl::span parameters, - std::vector datasets, - double &cost, - gsl::span gradient, - Logger *logger, - OptimizationReporter *reporter) const { + FunctionEvaluationStatus evaluate( + SummedGradientFunction const& f, + gsl::span parameters, + std::vector datasets, + double& cost, + gsl::span gradient, + Logger* logger, + OptimizationReporter* reporter) const { if (reporter) { reporter->beforeCostFunctionCall(parameters); reporter->logger_->setPrefix(logger->getPrefix()); } double cpuTime = 0.0; - auto status = f.evaluate(parameters, datasets, cost, gradient, logger, &cpuTime); + auto status = + f.evaluate(parameters, datasets, cost, gradient, logger, &cpuTime); // Normalize to batch size double batchSize = datasets.size(); cost /= batchSize; - for (auto &g : gradient) + for (auto& g : gradient) g /= batchSize; if (reporter) { @@ -537,28 +627,35 @@ class MinibatchOptimizer { return status; } - std::tuple > finish(double cost, - std::vector const& parameters, - minibatchExitStatus status, - OptimizationReporter *reporter, - Logger *logger) { + std::tuple> finish( + double cost, + std::vector const& parameters, + minibatchExitStatus status, + OptimizationReporter* reporter, + Logger* logger) { if (logger) { switch (status) { case minibatchExitStatus::invalidNumber: - logger->logmessage(loglevel::error, "Mini-batch cost function evaluation failed."); + logger->logmessage( + loglevel::error, + "Mini-batch cost function evaluation failed."); break; case minibatchExitStatus::gradientNormConvergence: - logger->logmessage(loglevel::info, "Convergence: gradientNormThreshold reached."); + logger->logmessage( + loglevel::info, + "Convergence: gradientNormThreshold reached."); break; case minibatchExitStatus::maxEpochsExceeded: - logger->logmessage(loglevel::info, "Number of epochs exceeded."); + logger->logmessage( + loglevel::info, "Number of epochs exceeded."); } } if (reporter) - reporter->finished(cost, parameters, (int) status); + reporter->finished(cost, parameters, (int)status); - return std::tuple >((int) status, cost, parameters); + return std::tuple>( + (int)status, cost, parameters); } /** @@ -576,19 +673,20 @@ class MinibatchOptimizer { * @param reporter OptimizationReporter instance for tracking progress * @return FunctionEvaluationStatus */ - FunctionEvaluationStatus rescueInterceptor(gsl::span parameters, - gsl::span oldParameters, - gsl::span gradient, - gsl::span oldGradient, - gsl::span lowerParameterBounds, - gsl::span upperParameterBounds, - double &cost, - int &subsequentFails, - int iteration, - SummedGradientFunction const& f, - std::vector datasets, - Logger *logger, - OptimizationReporter *reporter) { + FunctionEvaluationStatus rescueInterceptor( + gsl::span parameters, + gsl::span oldParameters, + gsl::span gradient, + gsl::span oldGradient, + gsl::span lowerParameterBounds, + gsl::span upperParameterBounds, + double& cost, + int& subsequentFails, + int iteration, + SummedGradientFunction const& f, + std::vector datasets, + Logger* logger, + OptimizationReporter* reporter) { // initialize diagnostic variables int maxSubsequentFails = 10; @@ -606,34 +704,44 @@ class MinibatchOptimizer { // debug output if (logger) { - std::vector firstDifference = getVectorDifference(parameters, oldParameters); + std::vector firstDifference = + getVectorDifference(parameters, oldParameters); std::stringstream first_ss; - first_ss << " Interceptor is active! Former step size: " << getVectorNorm(firstDifference) << std::endl; + first_ss << " Interceptor is active! Former step size: " + << getVectorNorm(firstDifference) << std::endl; logger->logmessage(loglevel::debug, first_ss.str().c_str()); } - // Cost function evaluation failed: We need to intercept while (status == functionEvaluationFailure) { - // If the objective function evaluation failed, we want to undo the step + // If the objective function evaluation failed, we want to undo the + // step ++subsequentFails; parameterUpdater->undoLastStep(); std::copy(oldGradient.begin(), oldGradient.end(), gradient.begin()); - std::copy(oldParameters.begin(), oldParameters.end(), parameters.begin()); + std::copy( + oldParameters.begin(), oldParameters.end(), parameters.begin()); // note the previous parameter step for debugging parDifference = getVectorDifference(parameters, oldParameters); - // Check if there are NaNs in the parameter vector now (e.g., fail at first iteration) - if (std::any_of(parameters.begin(), parameters.end(), [](double d) {return std::isnan(d);})) + // Check if there are NaNs in the parameter vector now (e.g., fail + // at first iteration) + if (std::any_of(parameters.begin(), parameters.end(), [](double d) { + return std::isnan(d); + })) initialFail = true; if (subsequentFails >= maxSubsequentFails) finalFail = true; // If nothing helps and no cold restart wanted: cancel optimization - if (initialFail || (finalFail && interceptor != interceptType::reduceStepAndRestart)) { - logger->logmessage(loglevel::debug, "Failure at initial point of optimization. Stopping."); + if (initialFail || + (finalFail && + interceptor != interceptType::reduceStepAndRestart)) { + logger->logmessage( + loglevel::debug, + "Failure at initial point of optimization. Stopping."); return functionEvaluationFailure; } @@ -641,46 +749,53 @@ class MinibatchOptimizer { /* Reducing step size did not work. * Do a cold restart and take a very small step. */ - ss << "Final failure. Rescue interceptor could not recover optimization run." << std::endl; + ss << "Final failure. Rescue interceptor could not recover " + "optimization run." + << std::endl; subsequentFails = 0; parameterUpdater->clearCache(); learningRateUpdater->setReductionFactor(1e-5); } else { /* We did not fail too often: we reduce the step size */ - learningRateUpdater->reduceLearningRate(); - ss << "Failure. Reducing learning rate and retry..." << std::endl; + learningRateUpdater->reduceLearningRate(); + ss << "Failure. Reducing learning rate and retry..." + << std::endl; } // debug output ss << ": Interceptor, before new evaluation: " << std::endl - << " New cost: " << cost - << ", new |g|2: " << getVectorNorm(gradient) - << ", new LearningRate: " << learningRate - << ", real step length: " << getVectorNorm(parDifference) - << ", Failures: " << subsequentFails - << std::endl; + << " New cost: " << cost + << ", new |g|2: " << getVectorNorm(gradient) + << ", new LearningRate: " << learningRate + << ", real step length: " << getVectorNorm(parDifference) + << ", Failures: " << subsequentFails << std::endl; // Do the next step learningRate = learningRateUpdater->getCurrentLearningRate(); - parameterUpdater->updateParameters(learningRate, iteration, gradient, parameters, lowerParameterBounds, - upperParameterBounds); + parameterUpdater->updateParameters( + learningRate, + iteration, + gradient, + parameters, + lowerParameterBounds, + upperParameterBounds); // Re-evaluate the cost function and hope for the best - status = evaluate(f, parameters, datasets, cost, gradient, logger, reporter); + status = evaluate( + f, parameters, datasets, cost, gradient, logger, reporter); // get Difference for debugging parDifference = getVectorDifference(parameters, oldParameters); // debug output ss << ": Interceptor, after new evaluation: " << std::endl - << " New Cost: " << cost - << " new |g|2: " << getVectorNorm(gradient) - << " new LearningRate: " << learningRate - << "real step length: " << getVectorNorm(parDifference) - << std::endl; + << " New Cost: " << cost + << " new |g|2: " << getVectorNorm(gradient) + << " new LearningRate: " << learningRate + << "real step length: " << getVectorNorm(parDifference) + << std::endl; if (logger) logger->logmessage(loglevel::debug, ss.str().c_str()); - } return status; @@ -701,42 +816,67 @@ class MinibatchOptimizer { * @param reporter OptimizationReporter instance for tracking progress * @return FunctionEvaluationStatus */ - void handleStep(gsl::span parameters, - gsl::span oldParameters, - gsl::span gradient, - gsl::span lowerParameterBounds, - gsl::span upperParameterBounds, - double cost, - int iteration, - SummedGradientFunction const& f, - std::vector datasets, - Logger *logger, - OptimizationReporter *reporter) { + void handleStep( + gsl::span parameters, + gsl::span oldParameters, + gsl::span gradient, + gsl::span lowerParameterBounds, + gsl::span upperParameterBounds, + double cost, + int iteration, + SummedGradientFunction const& f, + std::vector datasets, + Logger* logger, + OptimizationReporter* reporter) { /* Retrieve step length and try a full step */ double stepLength = learningRateUpdater->getCurrentLearningRate(); - parameterUpdater->updateParameters(stepLength, iteration, gradient, parameters, - lowerParameterBounds, upperParameterBounds); + parameterUpdater->updateParameters( + stepLength, + iteration, + gradient, + parameters, + lowerParameterBounds, + upperParameterBounds); /* If no line search desired: that's it! */ if (lineSearchSteps == 0) return; /* Define lambda function for step length evaluation */ - std::function evalLineSearch = [&f, &datasets, iteration, - ¶meters, &oldParameters, &gradient, - &lowerParameterBounds, &upperParameterBounds, - &logger, &reporter, this](double alpha) { - + std::function evalLineSearch = [&f, + &datasets, + iteration, + ¶meters, + &oldParameters, + &gradient, + &lowerParameterBounds, + &upperParameterBounds, + &logger, + &reporter, + this](double alpha) { /* Reset oldParameters and re-update with new step length */ - std::copy(oldParameters.begin(), oldParameters.end(), parameters.begin()); + std::copy( + oldParameters.begin(), oldParameters.end(), parameters.begin()); parameterUpdater->undoLastStep(); - parameterUpdater->updateParameters(alpha, iteration, gradient, parameters, - lowerParameterBounds, upperParameterBounds); + parameterUpdater->updateParameters( + alpha, + iteration, + gradient, + parameters, + lowerParameterBounds, + upperParameterBounds); /* Write new cost function value and return */ double newCost = NAN; - evaluate(f, parameters, datasets, newCost, gsl::span(), logger, reporter); + evaluate( + f, + parameters, + datasets, + newCost, + gsl::span(), + logger, + reporter); return newCost; }; @@ -747,25 +887,27 @@ class MinibatchOptimizer { * cost0 is the previous cost before the parameter update, * we want the update to be lower. * - * First compute a naive step as if there was no line-search -> get cost1 - * If cost1 > cost0: Try to improve -> get cost2 - * If also cost2 > cost0: - * If only short line-search is desired: take min(cost1, cost2) - * If more longer line-search is desired: step into performLineSearch() + * First compute a naive step as if there was no line-search -> get + * cost1 If cost1 > cost0: Try to improve -> get cost2 If also cost2 > + * cost0: If only short line-search is desired: take min(cost1, cost2) + * If more longer line-search is desired: step into + * performLineSearch() * */ double cost1 = evalLineSearch(stepLength); /* Return on improvement */ if (cost1 <= cost) { - logger->logmessage(loglevel::debug, " Line-Search: Step was good right away..."); + logger->logmessage( + loglevel::debug, " Line-Search: Step was good right away..."); return; } // Debugging output std::stringstream line_ss; std::stringstream parabola_ss; - std::vector parDifference = getVectorDifference(parameters, oldParameters); + std::vector parDifference = + getVectorDifference(parameters, oldParameters); /* No improvement: compute update direction */ std::vector direction(parameters.size(), NAN); @@ -781,7 +923,8 @@ class MinibatchOptimizer { if (dirGradient > 0) { /* No descent direction, no hope for improvement: * Try to do something smart anyway */ - parabola_ss << " Line-Search: Not a descent direction! " << std::endl; + parabola_ss << " Line-Search: Not a descent direction! " + << std::endl; /* Fit a parabola to decide whether a smaller or * a bigger step seems more promising */ @@ -798,14 +941,18 @@ class MinibatchOptimizer { double cost2 = evalLineSearch(newStepLength); if (cost2 > cost1) { - /* The parabola idea didn't work. Just admit the step, as it is */ + /* The parabola idea didn't work. Just admit the step, as it is + */ cost1 = evalLineSearch(stepLength); parabola_ss << " Step adaption did not work..." << std::endl; } - parabola_ss << " cost0: " << cost << " (step length: " << getVectorNorm(parDifference) - << "), cost1: " << cost1 << " (step length: " << stepLength - << "), cost2: " << cost2 << " (step length: " << newStepLength - << ") " << std::endl; + parabola_ss << " cost0: " << cost + << " (step length: " << getVectorNorm(parDifference) + << "), cost1: " << cost1 + << " (step length: " << stepLength + << "), cost2: " << cost2 + << " (step length: " << newStepLength << ") " + << std::endl; if (logger) logger->logmessage(loglevel::debug, parabola_ss.str().c_str()); @@ -816,25 +963,30 @@ class MinibatchOptimizer { /* Original step was too big, but we're facing a descent direction * Propose a new step based on a parabolic interpolation */ double newStepLength = -0.5 * dirGradient * std::pow(stepLength, 2.0) / - (cost1 - cost - dirGradient * stepLength); + (cost1 - cost - dirGradient * stepLength); double cost2 = evalLineSearch(newStepLength); // Debugging output if (logger) { - line_ss << " Line-Search: two steps were done, results are: " << std::endl - << " cost0: " << cost << " (step length: " << getVectorNorm(parDifference) - << "), cost1: " << cost1 << " (step length: " << stepLength - << "), cost2: " << cost2 << " (step length: " << newStepLength - << ")" << std::endl; + line_ss << " Line-Search: two steps were done, results are: " + << std::endl + << " cost0: " << cost + << " (step length: " << getVectorNorm(parDifference) + << "), cost1: " << cost1 + << " (step length: " << stepLength + << "), cost2: " << cost2 + << " (step length: " << newStepLength << ")" << std::endl; logger->logmessage(loglevel::debug, line_ss.str().c_str()); } /* If we did improve, return, otherwise iterate */ - if (cost2 < cost) return; + if (cost2 < cost) + return; if (lineSearchSteps < 2) { /* No more iteration wanted, but 2nd try was better than 1st */ - if (cost2 <= cost1) return; + if (cost2 <= cost1) + return; /* 1st try was better than 2nd, use it */ cost1 = evalLineSearch(stepLength); @@ -842,16 +994,19 @@ class MinibatchOptimizer { /* No descent found and line search option is set: iterate! */ std::stringstream line_ss; if (logger) { - line_ss << " Line-Search: Need to go to third order approximation, looping... " << std::endl; + line_ss << " Line-Search: Need to go to third order " + "approximation, looping... " + << std::endl; logger->logmessage(loglevel::debug, line_ss.str().c_str()); } - performLineSearch(stepLength, - newStepLength, - cost, - cost1, - cost2, - dirGradient, - evalLineSearch); + performLineSearch( + stepLength, + newStepLength, + cost, + cost1, + cost2, + dirGradient, + evalLineSearch); } } @@ -868,13 +1023,14 @@ class MinibatchOptimizer { * @param dirGradient alignment of gradient and step direction * @param costFunEvaluate objective function wo gradient */ - void performLineSearch(double alpha1, - double alpha2, - double cost, - double cost1, - double cost2, - double dirGradient, - std::function costFunEvaluate) { + void performLineSearch( + double alpha1, + double alpha2, + double cost, + double cost1, + double cost2, + double dirGradient, + std::function costFunEvaluate) { /* From here on, we will use cubic interpolation. * We need to compute the matrix-vector multiplication @@ -904,13 +1060,15 @@ class MinibatchOptimizer { double b = (tmp_21 * tmp_v1 + tmp_22 * tmp_v2) / tmp_D; /* Compute possible new step length */ - double alpha3 = (-b + std::sqrt(b*b - 3.0*a*dirGradient)) / (3.0*a); + double alpha3 = + (-b + std::sqrt(b * b - 3.0 * a * dirGradient)) / (3.0 * a); /* Evaluate line search function at alpha2 */ double cost3 = costFunEvaluate(alpha3); /* If improvement, return */ - if (cost3 < cost) return; + if (cost3 < cost) + return; /* If no improvement, update values and re-iterate */ if (iStep < lineSearchSteps) { @@ -923,7 +1081,8 @@ class MinibatchOptimizer { /* No good step was found, but the max number * of line search steps was reached */ - if (cost3 < std::min(cost1, cost2)) return; + if (cost3 < std::min(cost1, cost2)) + return; if (cost1 < cost2) { cost1 = costFunEvaluate(alpha1); @@ -932,8 +1091,8 @@ class MinibatchOptimizer { } } - - std::unique_ptr parameterUpdater = std::make_unique(); + std::unique_ptr parameterUpdater = + std::make_unique(); // Set some default values interceptType interceptor = interceptType::reduceStepAndRestart; @@ -943,8 +1102,10 @@ class MinibatchOptimizer { double gradientNormThreshold = 0.0; double learningRate = 0.001; - std::unique_ptr learningRateUpdater = std::make_unique < LearningRateUpdater - > (maxEpochs, learningRateInterp::linear); + std::unique_ptr learningRateUpdater = + std::make_unique( + maxEpochs, + learningRateInterp::linear); }; /** @@ -952,24 +1113,28 @@ class MinibatchOptimizer { * @param pair * @param optimizer */ -void setMinibatchOption(const std::pair &pair, - MinibatchOptimizer* optimizer); +void setMinibatchOption( + std::pair const& pair, + MinibatchOptimizer* optimizer); /** * @brief Create and setup a mini-batch optimizer according to the given options * @param options * @return */ -template -std::unique_ptr> getMinibatchOptimizer(OptimizationOptions const& options) { +template +std::unique_ptr> +getMinibatchOptimizer(OptimizationOptions const& options) { auto optim = std::make_unique>(); - options.for_each*>(setMinibatchOption, optim.get()); + options.for_each*>( + setMinibatchOption, optim.get()); return optim; } -std::tuple > runMinibatchOptimization(MinibatchOptimizationProblem *problem); +std::tuple> +runMinibatchOptimization(MinibatchOptimizationProblem* problem); /** * @brief Clip values to given element-wise bounds. @@ -977,17 +1142,20 @@ std::tuple > runMinibatchOptimization(Minibatch * @param upperBounds * @param x */ -template -void clipToBounds(gsl::span lowerBounds, - gsl::span upperBounds, - gsl::span x) { +template +void clipToBounds( + gsl::span lowerBounds, + gsl::span upperBounds, + gsl::span x) { if (lowerBounds.empty() && upperBounds.empty()) return; Expects(lowerBounds.size() == upperBounds.size()); Expects(lowerBounds.size() == x.size()); - for (int i = 0; static_cast::index_type>(i) < x.size(); ++i) + for (int i = 0; + static_cast::index_type>(i) < x.size(); + ++i) x[i] = std::min(std::max(lowerBounds[i], x[i]), upperBounds[i]); } diff --git a/include/parpeoptimization/multiStartOptimization.h b/include/parpeoptimization/multiStartOptimization.h index f5c68ef9c..3ff4d7977 100644 --- a/include/parpeoptimization/multiStartOptimization.h +++ b/include/parpeoptimization/multiStartOptimization.h @@ -11,7 +11,7 @@ namespace parpe { * @brief Interface for multi-start optimization problems */ class MultiStartOptimizationProblem { -public: + public: virtual int getNumberOfStarts() const = 0; virtual bool restartOnFailure() const { return false; } @@ -22,7 +22,6 @@ class MultiStartOptimizationProblem { virtual ~MultiStartOptimizationProblem() = default; }; - /** * @brief The MultiStartOptimization class runs multiple optimization runs */ @@ -30,9 +29,10 @@ class MultiStartOptimizationProblem { class MultiStartOptimization { public: - MultiStartOptimization(MultiStartOptimizationProblem& problem, - bool runParallel = true, - int first_start_idx = 0); + MultiStartOptimization( + MultiStartOptimizationProblem& problem, + bool runParallel = true, + int first_start_idx = 0); ~MultiStartOptimization() = default; @@ -62,7 +62,6 @@ class MultiStartOptimization { */ int runStart(int start_idx) const; - /** Optimization problem to be solved */ MultiStartOptimizationProblem& msProblem; diff --git a/include/parpeoptimization/optimizationOptions.h b/include/parpeoptimization/optimizationOptions.h index e3f9fa85b..359b847b5 100644 --- a/include/parpeoptimization/optimizationOptions.h +++ b/include/parpeoptimization/optimizationOptions.h @@ -3,11 +3,11 @@ #include -#include -#include -#include #include +#include +#include #include +#include #include namespace parpe { @@ -24,8 +24,6 @@ enum class optimizerName { OPTIMIZER_MINIBATCH_1 = 10 }; - - /** Type to describe an optimization (minimization) problem */ class OptimizationOptions { @@ -39,7 +37,7 @@ class OptimizationOptions { optimizerName optimizer = optimizerName::OPTIMIZER_IPOPT; /** Optimizer log file */ - char *logFile = nullptr; + char* logFile = nullptr; /** Print progress to stdout */ bool printToStdout = true; @@ -47,10 +45,14 @@ class OptimizationOptions { /** Maximum number of optimizer iterations*/ int maxOptimizerIterations = 100; - static std::unique_ptr fromHDF5(std::string const& fileName); - static std::unique_ptr fromHDF5(const H5::H5File &file, const std::string &path = "/optimizationOptions"); + static std::unique_ptr + fromHDF5(std::string const& fileName); + static std::unique_ptr fromHDF5( + const H5::H5File& file, + std::string const& path = "/optimizationOptions"); - static std::vector getStartingPoint(const H5::H5File &file, int index); + static std::vector + getStartingPoint(const H5::H5File& file, int index); /** Number of starts for local optimization (only used for multi-start * optimization */ @@ -61,29 +63,33 @@ class OptimizationOptions { int retryOptimization = false; /** use hierarchical optimization if respective configuration is available - * see hierarchicalOptimization.cpp */ + * see hierarchicalOptimization.cpp */ int hierarchicalOptimization = true; int multistartsInParallel = true; std::string toString(); - int getIntOption(const std::string &key); - double getDoubleOption(const std::string &key); - std::string getStringOption(const std::string& key); + int getIntOption(std::string const& key); + double getDoubleOption(std::string const& key); + std::string getStringOption(std::string const& key); - void setOption(const std::string &key, int value); - void setOption(const std::string &key, double value); - void setOption(const std::string& key, std::string value); + void setOption(std::string const& key, int value); + void setOption(std::string const& key, double value); + void setOption(std::string const& key, std::string value); template - void for_each(std::function< void (const std::pair, T)> f, T arg) const - { - std::for_each(options.cbegin(), options.cend(), - std::bind(f, std::placeholders::_1, arg)); + void for_each( + std::function< + void(std::pair const, T)> f, + T arg) const { + std::for_each( + options.cbegin(), + options.cend(), + std::bind(f, std::placeholders::_1, arg)); } -private: + private: std::map options; }; @@ -92,7 +98,7 @@ std::unique_ptr optimizerFactory(optimizerName optimizer); /** * @brief Print list of supported optimizers */ -void printAvailableOptimizers(const std::string &prefix = ""); +void printAvailableOptimizers(std::string const& prefix = ""); } // namespace parpe #endif // OPTIMIZATIONOPTIONS_H diff --git a/include/parpeoptimization/optimizationProblem.h b/include/parpeoptimization/optimizationProblem.h index 68524c879..5894fb1d5 100644 --- a/include/parpeoptimization/optimizationProblem.h +++ b/include/parpeoptimization/optimizationProblem.h @@ -1,15 +1,15 @@ #ifndef OPTIMIZATION_PROBLEM_H #define OPTIMIZATION_PROBLEM_H -#include -#include #include #include +#include +#include #include -#include #include +#include namespace parpe { @@ -26,20 +26,23 @@ class OptimizationReporter; * evaluation is added to allow caching previous cost function values. */ -class OptimizationReporter: public GradientFunction { -public: - OptimizationReporter(GradientFunction *gradFun, - std::unique_ptr logger); +class OptimizationReporter : public GradientFunction { + public: + OptimizationReporter( + GradientFunction* gradFun, + std::unique_ptr logger); - OptimizationReporter(GradientFunction *gradFun, - std::unique_ptr rw, - std::unique_ptr logger); + OptimizationReporter( + GradientFunction* gradFun, + std::unique_ptr rw, + std::unique_ptr logger); - FunctionEvaluationStatus evaluate(gsl::span parameters, - double &fval, - gsl::span gradient, - Logger *logger = nullptr, - double *cpuTime = nullptr) const override; + FunctionEvaluationStatus evaluate( + gsl::span parameters, + double& fval, + gsl::span gradient, + Logger* logger = nullptr, + double* cpuTime = nullptr) const override; int numParameters() const override; @@ -49,7 +52,7 @@ class OptimizationReporter: public GradientFunction { * @param initialParameters * @return Quit optimization? */ - virtual bool starting(gsl::span initialParameters) const; + virtual bool starting(gsl::span initialParameters) const; /** * @brief Is called after each iteration except for the last one @@ -58,29 +61,33 @@ class OptimizationReporter: public GradientFunction { * @param objectiveFunctionGradient * @return Quit optimization? */ - virtual bool iterationFinished(gsl::span parameters, - double objectiveFunctionValue, - gsl::span objectiveFunctionGradient) const; + virtual bool iterationFinished( + gsl::span parameters, + double objectiveFunctionValue, + gsl::span objectiveFunctionGradient) const; - virtual bool beforeCostFunctionCall(gsl::span parameters) const; + virtual bool + beforeCostFunctionCall(gsl::span parameters) const; - virtual bool afterCostFunctionCall(gsl::span parameters, - double objectiveFunctionValue, - gsl::span objectiveFunctionGradient) const; + virtual bool afterCostFunctionCall( + gsl::span parameters, + double objectiveFunctionValue, + gsl::span objectiveFunctionGradient) const; /** * @brief Is called after optimization finished */ - virtual void finished(double optimalCost, - gsl::span parameters, - int exitStatus) const; + virtual void finished( + double optimalCost, + gsl::span parameters, + int exitStatus) const; // TODO how to pass optimizer-specific info? pass OptimizerStatus class ? // virtual int intermediateFunction(int alg_mod, int iter_count, // double obj_value, double inf_pr, - // double inf_du, double mu, double d_norm, - // double regularization_size, + // double inf_du, double mu, double + // d_norm, double regularization_size, // double alpha_du, double alpha_pr, // int ls_trials); @@ -88,7 +95,7 @@ class OptimizationReporter: public GradientFunction { virtual std::vector const& getFinalParameters() const; - void setGradientFunction(GradientFunction *gradFun) const; + void setGradientFunction(GradientFunction* gradFun) const; std::vector getParameterIds() const override; @@ -98,7 +105,7 @@ class OptimizationReporter: public GradientFunction { mutable double cpu_time_iteration_sec_ = 0.0; std::unique_ptr logger_; -protected: + protected: void printObjectiveFunctionFailureMessage() const; // data members are mutable, because we inherit from GradientFunction, @@ -113,7 +120,7 @@ class OptimizationReporter: public GradientFunction { mutable bool started_ = false; // non-owning - mutable GradientFunction *grad_fun_ = nullptr; + mutable GradientFunction* grad_fun_ = nullptr; // for caching mutable bool have_cached_cost_ = false; @@ -130,7 +137,6 @@ class OptimizationReporter: public GradientFunction { std::string default_logger_prefix_; }; - /** * @brief The OptimizationProblem class describes an optimization problem. * @@ -144,10 +150,11 @@ class OptimizationReporter: public GradientFunction { class OptimizationProblem { -public: + public: OptimizationProblem() = default; - OptimizationProblem(std::unique_ptr costFun, - std::unique_ptr logger); + OptimizationProblem( + std::unique_ptr costFun, + std::unique_ptr logger); OptimizationProblem(OptimizationProblem const& other) = delete; virtual ~OptimizationProblem() = default; @@ -173,17 +180,16 @@ class OptimizationProblem { std::unique_ptr logger_; -private: + private: OptimizationOptions optimization_options_; }; - /** * @brief Mixin class for handling parameter bounds */ -class OptimizationProblemImpl: public OptimizationProblem { +class OptimizationProblemImpl : public OptimizationProblem { -public: + public: using OptimizationProblem::OptimizationProblem; /** lower bound of parameter values */ @@ -200,37 +206,38 @@ class OptimizationProblemImpl: public OptimizationProblem { void fillInitialParameters(gsl::span buffer) const override; -private: + private: std::vector parametersMin; std::vector parametersMax; std::vector parametersStart; - }; - /** * @brief getLocalOptimum * @param problem * @return int indicating status. 0: success, != 0: failure */ -int getLocalOptimum(OptimizationProblem *problem); - +int getLocalOptimum(OptimizationProblem* problem); -void optimizationProblemGradientCheckMultiEps(OptimizationProblem *problem, - int numParameterIndicesToCheck); +void optimizationProblemGradientCheckMultiEps( + OptimizationProblem* problem, + int numParameterIndicesToCheck); -void optimizationProblemGradientCheckMultiEps(OptimizationProblem *problem, - gsl::span parameterIndices, - gsl::span multi_eps); +void optimizationProblemGradientCheckMultiEps( + OptimizationProblem* problem, + gsl::span parameterIndices, + gsl::span multi_eps); -void optimizationProblemGradientCheck(OptimizationProblem *problem, - int numParameterIndicesToCheck, - double epsilon); +void optimizationProblemGradientCheck( + OptimizationProblem* problem, + int numParameterIndicesToCheck, + double epsilon); -void optimizationProblemGradientCheck(OptimizationProblem *problem, - gsl::span parameterIndices, - double epsilon); +void optimizationProblemGradientCheck( + OptimizationProblem* problem, + gsl::span parameterIndices, + double epsilon); } // namespace parpe diff --git a/include/parpeoptimization/optimizationResultWriter.h b/include/parpeoptimization/optimizationResultWriter.h index abb5c26f6..ca20a6fe3 100644 --- a/include/parpeoptimization/optimizationResultWriter.h +++ b/include/parpeoptimization/optimizationResultWriter.h @@ -16,14 +16,13 @@ namespace parpe { */ class OptimizationResultWriter { -public: + public: /** * @brief Write to pre-opened HDF5 file (will be re-opened) * @param file * @param rootPath */ - OptimizationResultWriter(const H5::H5File &file, - std::string rootPath); + OptimizationResultWriter(const H5::H5File& file, std::string rootPath); /** * @brief Open HDF5 file and write there @@ -31,11 +30,13 @@ class OptimizationResultWriter { * @param overwrite Overwrite output file if already exists * @param rootPath */ - OptimizationResultWriter(const std::string &filename, - bool overwrite, - std::string rootPath); + OptimizationResultWriter( + std::string const& filename, + bool overwrite, + std::string rootPath); - OptimizationResultWriter& operator=(const OptimizationResultWriter& other) = delete; + OptimizationResultWriter& + operator=(OptimizationResultWriter const& other) = delete; OptimizationResultWriter(OptimizationResultWriter const& other); @@ -53,12 +54,12 @@ class OptimizationResultWriter { * evaluation (wall time) */ virtual void logObjectiveFunctionEvaluation( - gsl::span parameters, - double objectiveFunctionValue, - gsl::span objectiveFunctionGradient, - int numIterations, - int numFunctionCalls, - double timeElapsedInSeconds); + gsl::span parameters, + double objectiveFunctionValue, + gsl::span objectiveFunctionGradient, + int numIterations, + int numFunctionCalls, + double timeElapsedInSeconds); /** * @brief Function to be called after each optimizer iteration. (For @@ -78,23 +79,23 @@ class OptimizationResultWriter { * @param alpha_pr * @param ls_trials */ - virtual void logOptimizerIteration(int numIterations, - gsl::span parameters, - double objectiveFunctionValue, - gsl::span gradient, - double wallSeconds, - double cpuSeconds); + virtual void logOptimizerIteration( + int numIterations, + gsl::span parameters, + double objectiveFunctionValue, + gsl::span gradient, + double wallSeconds, + double cpuSeconds); void setLoggingEachIteration(bool logGradient); - void setLoggingEachFunctionEvaluation(bool logGradient, - bool logParameters); + void setLoggingEachFunctionEvaluation(bool logGradient, bool logParameters); /** * @brief Log optimizer start * @param initialParameters */ - virtual void starting(gsl::span initialParameters); + virtual void starting(gsl::span initialParameters); /** * @brief Function to be called when local optimization is finished. @@ -103,11 +104,12 @@ class OptimizationResultWriter { * @param masterTime Wall time for this optimization * @param exitStatus Exit status (cause of optimizer termination) */ - virtual void saveOptimizerResults(double finalNegLogLikelihood, - gsl::span optimalParameters, - double wallSec, - double cpuSec, - int exitStatus) const; + virtual void saveOptimizerResults( + double finalNegLogLikelihood, + gsl::span optimalParameters, + double wallSec, + double cpuSec, + int exitStatus) const; H5::H5File const& getH5File() const; @@ -125,20 +127,19 @@ class OptimizationResultWriter { */ void setRootPath(std::string const& path); -protected: + protected: /** * @brief Write buffered output to file */ virtual void flushResultWriter() const; -private: + private: virtual std::string getIterationPath(int iterationIdx) const; H5::H5File file = 0; /** Root path within HDF5 file */ std::string rootPath = "/"; - }; } // namespace parpe diff --git a/include/parpeoptimization/optimizer.h b/include/parpeoptimization/optimizer.h index 16d5c6437..53e57218f 100644 --- a/include/parpeoptimization/optimizer.h +++ b/include/parpeoptimization/optimizer.h @@ -14,7 +14,8 @@ class OptimizationOptions; */ class Optimizer { public: - virtual std::tuple > optimize(OptimizationProblem *) = 0; + virtual std::tuple> + optimize(OptimizationProblem*) = 0; virtual ~Optimizer() = default; }; diff --git a/src/parpeamici/amiciMisc.cpp b/src/parpeamici/amiciMisc.cpp index 0869c2963..91bfea1cd 100644 --- a/src/parpeamici/amiciMisc.cpp +++ b/src/parpeamici/amiciMisc.cpp @@ -1,8 +1,8 @@ -#include #include -#include #include #include +#include +#include #include #include @@ -12,11 +12,11 @@ namespace parpe { using amici::ReturnData; std::unique_ptr run_amici_simulation( - amici::Solver &solver, - const amici::ExpData *edata, - amici::Model &model, + amici::Solver& solver, + amici::ExpData const* edata, + amici::Model& model, bool rethrow, - Logger *logger) { + Logger* logger) { auto rdata = amici::runAmiciSimulation(solver, edata, model, rethrow); @@ -24,7 +24,7 @@ std::unique_ptr run_amici_simulation( // TODO: subclass amici::Logger to print messages without delay // for now, print collected messages after simulation - for(auto const& log_item: rdata->messages) { + for (auto const& log_item : rdata->messages) { auto lvl = loglevel::debug; switch (log_item.severity) { case amici::LogSeverity::debug: @@ -37,16 +37,15 @@ std::unique_ptr run_amici_simulation( lvl = loglevel::error; break; } - if(!log_item.identifier.empty()) { - logger->logmessage(lvl, "[" + log_item.identifier + "] " + log_item.message); + if (!log_item.identifier.empty()) { + logger->logmessage( + lvl, "[" + log_item.identifier + "] " + log_item.message); } else { logger->logmessage(loglevel::warning, log_item.message); } - } } return rdata; } } // namespace parpe - diff --git a/src/parpeamici/amiciSimulationRunner.cpp b/src/parpeamici/amiciSimulationRunner.cpp index 0b44347f9..376329f1c 100644 --- a/src/parpeamici/amiciSimulationRunner.cpp +++ b/src/parpeamici/amiciSimulationRunner.cpp @@ -6,37 +6,35 @@ #include #endif -#include -#include -#include -#include #include #include +#include +#include +#include +#include // #define PARPE_SIMULATION_RUNNER_DEBUG namespace parpe { AmiciSimulationRunner::AmiciSimulationRunner( - std::vector const& optimizationParameters, - amici::SensitivityOrder sensitivityOrder, - std::vector const& conditionIndices, - AmiciSimulationRunner::callbackJobFinishedType callbackJobFinished, - AmiciSimulationRunner::callbackAllFinishedType aggregate, - std::string logPrefix) - : optimization_parameters_(optimizationParameters) - , sensitivity_order_(sensitivityOrder) - , condition_indices_(conditionIndices) - , callback_job_finished_(std::move(std::move(callbackJobFinished))) - , aggregate_(std::move(std::move(aggregate))) - , log_prefix_(std::move(logPrefix)) -{} + std::vector const& optimizationParameters, + amici::SensitivityOrder sensitivityOrder, + std::vector const& conditionIndices, + AmiciSimulationRunner::callbackJobFinishedType callbackJobFinished, + AmiciSimulationRunner::callbackAllFinishedType aggregate, + std::string logPrefix) + : optimization_parameters_(optimizationParameters) + , sensitivity_order_(sensitivityOrder) + , condition_indices_(conditionIndices) + , callback_job_finished_(std::move(std::move(callbackJobFinished))) + , aggregate_(std::move(std::move(aggregate))) + , log_prefix_(std::move(logPrefix)) {} #ifdef PARPE_ENABLE_MPI -int -AmiciSimulationRunner::runDistributedMemory(LoadBalancerMaster* loadBalancer, - const int maxSimulationsPerPackage) -{ +int AmiciSimulationRunner::runDistributedMemory( + LoadBalancerMaster* loadBalancer, + const int maxSimulationsPerPackage) { #ifdef PARPE_SIMULATION_RUNNER_DEBUG printf("runDistributedMemory\n"); #endif @@ -46,34 +44,35 @@ AmiciSimulationRunner::runDistributedMemory(LoadBalancerMaster* loadBalancer, std::mutex simulationsMutex; // multiple simulations may be grouped into one work package - auto numJobsTotal = static_cast( - std::ceil(static_cast(condition_indices_.size()) / - maxSimulationsPerPackage)); - std::vector jobs{ static_cast( - numJobsTotal) }; + auto numJobsTotal = static_cast(std::ceil( + static_cast(condition_indices_.size()) / + maxSimulationsPerPackage)); + std::vector jobs{ + static_cast(numJobsTotal)}; int numJobsFinished = 0; int numConditionsSent = 0; // prepare and queue work package for (int jobIdx = 0; jobIdx < numJobsTotal; ++jobIdx) { int const simulationsLeft = - static_cast(condition_indices_.size()) - numConditionsSent; + static_cast(condition_indices_.size()) - numConditionsSent; int const simulationsCurrentPackage = - std::min(simulationsLeft, maxSimulationsPerPackage); + std::min(simulationsLeft, maxSimulationsPerPackage); auto currentConditions = std::vector( - &condition_indices_[static_cast::size_type>( - numConditionsSent)], - &condition_indices_[numConditionsSent + simulationsCurrentPackage]); - queueSimulation(loadBalancer, - &jobs[jobIdx], - &numJobsFinished, - &simulationsCond, - &simulationsMutex, - jobIdx, - optimization_parameters_, - sensitivity_order_, - currentConditions); + &condition_indices_[static_cast::size_type>( + numConditionsSent)], + &condition_indices_[numConditionsSent + simulationsCurrentPackage]); + queueSimulation( + loadBalancer, + &jobs[jobIdx], + &numJobsFinished, + &simulationsCond, + &simulationsMutex, + jobIdx, + optimization_parameters_, + sensitivity_order_, + currentConditions); numConditionsSent += simulationsCurrentPackage; // printf("Queued work: "); printDatapath(path); @@ -82,8 +81,9 @@ AmiciSimulationRunner::runDistributedMemory(LoadBalancerMaster* loadBalancer, // wait for simulations to finish // TODO don't wait for all to complete; stop early if errors occurred std::unique_lock lock(simulationsMutex); - simulationsCond.wait(lock, [&numJobsFinished, &numJobsTotal]{ - return numJobsFinished == numJobsTotal;}); + simulationsCond.wait(lock, [&numJobsFinished, &numJobsTotal] { + return numJobsFinished == numJobsTotal; + }); // unpack if (aggregate_) @@ -93,16 +93,15 @@ AmiciSimulationRunner::runDistributedMemory(LoadBalancerMaster* loadBalancer, } #endif -int -AmiciSimulationRunner::runSharedMemory(const messageHandlerFunc& messageHandler, - [[maybe_unused]] bool sequential) -{ +int AmiciSimulationRunner::runSharedMemory( + const messageHandlerFunc& messageHandler, + [[maybe_unused]] bool sequential) { #ifdef PARPE_SIMULATION_RUNNER_DEBUG printf("runSharedMemory\n"); #endif - std::vector jobs{ static_cast( - condition_indices_.size()) }; + std::vector jobs{ + static_cast(condition_indices_.size())}; #if defined(_OPENMP) if (sequential) @@ -115,11 +114,12 @@ AmiciSimulationRunner::runSharedMemory(const messageHandlerFunc& messageHandler, ++simulationIdx) { // to reuse the parallel code and for debugging we still serialize the // job data here - auto curConditionIndices = std::vector{ simulationIdx }; - AmiciWorkPackageSimple work{ optimization_parameters_, - sensitivity_order_, - curConditionIndices, - log_prefix_ }; + auto curConditionIndices = std::vector{simulationIdx}; + AmiciWorkPackageSimple work{ + optimization_parameters_, + sensitivity_order_, + curConditionIndices, + log_prefix_}; auto buffer = amici::serializeToStdVec(work); messageHandler(buffer, simulationIdx); @@ -137,8 +137,7 @@ AmiciSimulationRunner::runSharedMemory(const messageHandlerFunc& messageHandler, } #ifdef PARPE_ENABLE_MPI -void -AmiciSimulationRunner::queueSimulation( +void AmiciSimulationRunner::queueSimulation( LoadBalancerMaster* loadBalancer, JobData* d, int* jobDone, @@ -147,29 +146,30 @@ AmiciSimulationRunner::queueSimulation( int jobIdx, std::vector const& optimizationParameters, amici::SensitivityOrder sensitivityOrder, - std::vector const& conditionIndices) const -{ + std::vector const& conditionIndices) const { // TODO avoid copy optimizationParameters; reuse;; for const& in work // package need to split into(de)serialize *d = JobData(jobDone, jobDoneChangedCondition, jobDoneChangedMutex); AmiciWorkPackageSimple work{ - optimizationParameters, sensitivityOrder, conditionIndices, log_prefix_ - }; + optimizationParameters, + sensitivityOrder, + conditionIndices, + log_prefix_}; d->sendBuffer = amici::serializeToStdVec(work); // TODO: must ignore 2nd argument for SimulationRunnerSimple if (callback_job_finished_) - d->callbackJobFinished = std::bind(callback_job_finished_, std::placeholders::_1, jobIdx); + d->callbackJobFinished = + std::bind(callback_job_finished_, std::placeholders::_1, jobIdx); loadBalancer->queueJob(d); } #endif -void -swap(AmiciSimulationRunner::AmiciResultPackageSimple& first, - AmiciSimulationRunner::AmiciResultPackageSimple& second) noexcept -{ +void swap( + AmiciSimulationRunner::AmiciResultPackageSimple& first, + AmiciSimulationRunner::AmiciResultPackageSimple& second) noexcept { using std::swap; swap(first.llh, second.llh); swap(first.simulationTimeSeconds, second.simulationTimeSeconds); @@ -180,10 +180,9 @@ swap(AmiciSimulationRunner::AmiciResultPackageSimple& first, swap(first.status, second.status); } -bool -operator==(const AmiciSimulationRunner::AmiciResultPackageSimple& lhs, - const AmiciSimulationRunner::AmiciResultPackageSimple& rhs) -{ +bool operator==( + AmiciSimulationRunner::AmiciResultPackageSimple const& lhs, + AmiciSimulationRunner::AmiciResultPackageSimple const& rhs) { return lhs.llh == rhs.llh && lhs.status == rhs.status && lhs.gradient == rhs.gradient && lhs.modelOutput == rhs.modelOutput && lhs.modelStates == rhs.modelStates && diff --git a/src/parpeamici/hierarchicalOptimization.cpp b/src/parpeamici/hierarchicalOptimization.cpp index 2afd76db0..d14f17274 100644 --- a/src/parpeamici/hierarchicalOptimization.cpp +++ b/src/parpeamici/hierarchicalOptimization.cpp @@ -22,9 +22,8 @@ HierarchicalOptimizationWrapper::HierarchicalOptimizationWrapper( int numConditions, int numObservables) : wrapped_function_(wrapped_function) - , numConditions(numConditions) - , numObservables(numObservables) -{ + , numConditions(numConditions) + , numObservables(numObservables) { scalingReader = std::make_unique(); offsetReader = std::make_unique(); sigmaReader = std::make_unique(); @@ -41,10 +40,9 @@ HierarchicalOptimizationWrapper::HierarchicalOptimizationWrapper( int numObservables, ErrorModel errorModel) : wrapped_function_(wrapped_function) - , numConditions(numConditions) - , numObservables(numObservables) - , errorModel(errorModel) -{ + , numConditions(numConditions) + , numObservables(numObservables) + , errorModel(errorModel) { scalingReader = std::make_unique( file, hdf5RootPath + "/scalingParameterIndices", @@ -72,19 +70,16 @@ HierarchicalOptimizationWrapper::HierarchicalOptimizationWrapper( int numObservables, ErrorModel errorModel) : wrapped_function_(wrapped_function) - , scalingReader(std::move(scalingReader)) - , offsetReader(std::move(offsetReader)) - , sigmaReader(std::move(sigmaReader)) - , numConditions(numConditions) - , numObservables(numObservables) - , errorModel(errorModel) -{ + , scalingReader(std::move(scalingReader)) + , offsetReader(std::move(offsetReader)) + , sigmaReader(std::move(sigmaReader)) + , numConditions(numConditions) + , numObservables(numObservables) + , errorModel(errorModel) { init(); } -void -HierarchicalOptimizationWrapper::init() -{ +void HierarchicalOptimizationWrapper::init() { if (errorModel != ErrorModel::normal) { throw ParPEException("Only Gaussian noise is supported so far."); } @@ -94,77 +89,79 @@ HierarchicalOptimizationWrapper::init() * scalingFactorIdx in mapping table -> difficult) */ proportionalityFactorIndices = this->scalingReader->getOptimizationParameterIndices(); - Expects(std::is_sorted(this->proportionalityFactorIndices.begin(), - this->proportionalityFactorIndices.end())); + Expects(std::is_sorted( + this->proportionalityFactorIndices.begin(), + this->proportionalityFactorIndices.end())); offsetParameterIndices = this->offsetReader->getOptimizationParameterIndices(); - Expects(std::is_sorted(this->offsetParameterIndices.begin(), - this->offsetParameterIndices.end())); + Expects(std::is_sorted( + this->offsetParameterIndices.begin(), + this->offsetParameterIndices.end())); sigmaParameterIndices = this->sigmaReader->getOptimizationParameterIndices(); - Expects(std::is_sorted(this->sigmaParameterIndices.begin(), - this->sigmaParameterIndices.end())); + Expects(std::is_sorted( + this->sigmaParameterIndices.begin(), + this->sigmaParameterIndices.end())); if (wrapped_function_) { std::stringstream ss; ss << "HierarchicalOptimizationWrapper parameters: " - << wrapped_function_->numParameters() << " total, " << numParameters() - << " numerical, " << proportionalityFactorIndices.size() - << " proportionality, " << offsetParameterIndices.size() - << " offset, " << sigmaParameterIndices.size() << " sigma\n"; + << wrapped_function_->numParameters() << " total, " + << numParameters() << " numerical, " + << proportionalityFactorIndices.size() << " proportionality, " + << offsetParameterIndices.size() << " offset, " + << sigmaParameterIndices.size() << " sigma\n"; Logger logger; logger.logmessage(loglevel::debug, ss.str()); } } -FunctionEvaluationStatus -HierarchicalOptimizationWrapper::evaluate(gsl::span parameters, - double& fval, - gsl::span gradient, - Logger* logger, - double* cpuTime) const -{ +FunctionEvaluationStatus HierarchicalOptimizationWrapper::evaluate( + gsl::span parameters, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime) const { std::vector fullParameters; std::vector fullGradient; - return evaluate(parameters, - fval, - gradient, - fullParameters, - fullGradient, - logger, - cpuTime); + return evaluate( + parameters, + fval, + gradient, + fullParameters, + fullGradient, + logger, + cpuTime); } -FunctionEvaluationStatus -HierarchicalOptimizationWrapper::evaluate( - gsl::span reducedParameters, +FunctionEvaluationStatus HierarchicalOptimizationWrapper::evaluate( + gsl::span reducedParameters, double& fval, gsl::span gradient, std::vector& fullParameters, std::vector& fullGradient, Logger* logger, - double* cpuTime) const -{ + double* cpuTime) const { WallTimer walltimer; FunctionEvaluationStatus status; if (reducedParameters.size() != (unsigned)numParameters()) { - throw ParPEException("Reduced parameter vector size " + - std::to_string(reducedParameters.size()) + - " does not match numParameters " + - std::to_string(numParameters())); + throw ParPEException( + "Reduced parameter vector size " + + std::to_string(reducedParameters.size()) + + " does not match numParameters " + std::to_string(numParameters())); } Expects(gradient.empty() || gradient.size() == reducedParameters.size()); if (numProportionalityFactors() == 0 && numOffsetParameters() == 0 && numSigmaParameters() == 0) { // nothing to do, only fill parameters / gradient and pass through - fullParameters.assign(reducedParameters.begin(), - reducedParameters.end()); + fullParameters.assign( + reducedParameters.begin(), reducedParameters.end()); fullGradient.assign(gradient.begin(), gradient.end()); // evaluate for all conditions @@ -178,8 +175,8 @@ HierarchicalOptimizationWrapper::evaluate( std::vector> modelOutputs; std::vector> modelSigmas; try { - std::tie(modelOutputs, modelSigmas) = - getUnscaledModelOutputsAndSigmas(reducedParameters, logger, cpuTime); + std::tie(modelOutputs, modelSigmas) = getUnscaledModelOutputsAndSigmas( + reducedParameters, logger, cpuTime); } catch (ParPEException const& e) { return FunctionEvaluationStatus::functionEvaluationFailure; } @@ -210,26 +207,28 @@ HierarchicalOptimizationWrapper::evaluate( } // splice parameter vector we get from optimizer with analytically // computed parameters - fullParameters = spliceParameters(reducedParameters, - proportionalityFactorIndices, - offsetParameterIndices, - sigmaParameterIndices, - scalings, - offsets, - sigmas); + fullParameters = spliceParameters( + reducedParameters, + proportionalityFactorIndices, + offsetParameterIndices, + sigmaParameterIndices, + scalings, + offsets, + sigmas); // evaluate with analytical scaling parameters double cpuTimeInner = 0.0; - status = evaluateWithOptimalParameters(fullParameters, - sigmas, - measurements, - modelOutputs, - modelSigmas, - fval, - gradient, - fullGradient, - logger, - &cpuTimeInner); + status = evaluateWithOptimalParameters( + fullParameters, + sigmas, + measurements, + modelOutputs, + modelSigmas, + fval, + gradient, + fullGradient, + logger, + &cpuTimeInner); if (cpuTime) *cpuTime += cpuTimeInner + walltimer.getTotal(); @@ -238,21 +237,20 @@ HierarchicalOptimizationWrapper::evaluate( } std::vector -HierarchicalOptimizationWrapper::getDefaultScalingFactors() const -{ +HierarchicalOptimizationWrapper::getDefaultScalingFactors() const { auto result = std::vector(numProportionalityFactors()); for (int i = 0; i < numProportionalityFactors(); ++i) { - result[i] = getDefaultScalingFactor( - wrapped_function_->getParameterScaling(proportionalityFactorIndices[i])); + result[i] = + getDefaultScalingFactor(wrapped_function_->getParameterScaling( + proportionalityFactorIndices[i])); } return result; } std::vector -HierarchicalOptimizationWrapper::getDefaultOffsetParameters() const -{ +HierarchicalOptimizationWrapper::getDefaultOffsetParameters() const { auto result = std::vector(numOffsetParameters()); for (int i = 0; i < numOffsetParameters(); ++i) { @@ -264,8 +262,7 @@ HierarchicalOptimizationWrapper::getDefaultOffsetParameters() const } std::vector -HierarchicalOptimizationWrapper::getDefaultSigmaParameters() const -{ +HierarchicalOptimizationWrapper::getDefaultSigmaParameters() const { auto result = std::vector(numSigmaParameters()); for (int i = 0; i < numSigmaParameters(); ++i) { @@ -277,56 +274,53 @@ HierarchicalOptimizationWrapper::getDefaultSigmaParameters() const return result; } -std::tuple>,std::vector>> +std::tuple>, std::vector>> HierarchicalOptimizationWrapper::getUnscaledModelOutputsAndSigmas( - const gsl::span reducedParameters, + gsl::span const reducedParameters, Logger* logger, - double* cpuTime) const -{ + double* cpuTime) const { // run simulations, collect outputs auto scalingDummy = getDefaultScalingFactors(); auto offsetDummy = getDefaultOffsetParameters(); auto sigmaDummy = getDefaultSigmaParameters(); // splice hidden scaling parameter and external parameters - auto fullParameters = spliceParameters(reducedParameters, - proportionalityFactorIndices, - offsetParameterIndices, - sigmaParameterIndices, - scalingDummy, - offsetDummy, - sigmaDummy); + auto fullParameters = spliceParameters( + reducedParameters, + proportionalityFactorIndices, + offsetParameterIndices, + sigmaParameterIndices, + scalingDummy, + offsetDummy, + sigmaDummy); std::vector> modelOutputs(numConditions); std::vector> modelSigmas(numConditions); - auto status = - wrapped_function_->getModelOutputsAndSigmas(fullParameters, modelOutputs, - modelSigmas, logger, cpuTime); + auto status = wrapped_function_->getModelOutputsAndSigmas( + fullParameters, modelOutputs, modelSigmas, logger, cpuTime); if (status != FunctionEvaluationStatus::functionEvaluationSuccess) throw ParPEException("Function evaluation failed."); return std::make_tuple(modelOutputs, modelSigmas); } -std::vector -HierarchicalOptimizationWrapper::computeAnalyticalScalings( +std::vector HierarchicalOptimizationWrapper::computeAnalyticalScalings( std::vector> const& measurements, - std::vector> const& modelOutputsUnscaled) const -{ + std::vector> const& modelOutputsUnscaled) const { int numProportionalityFactors = proportionalityFactorIndices.size(); std::vector proportionalityFactors(numProportionalityFactors); for (int scalingIdx = 0; scalingIdx < numProportionalityFactors; ++scalingIdx) { - auto proportionalityFactor = - parpe::computeAnalyticalScalings(scalingIdx, - modelOutputsUnscaled, - measurements, - *scalingReader, - numObservables); - auto scale = - wrapped_function_->getParameterScaling(proportionalityFactorIndices[scalingIdx]); + auto proportionalityFactor = parpe::computeAnalyticalScalings( + scalingIdx, + modelOutputsUnscaled, + measurements, + *scalingReader, + numObservables); + auto scale = wrapped_function_->getParameterScaling( + proportionalityFactorIndices[scalingIdx]); proportionalityFactors[scalingIdx] = getScaledParameter(proportionalityFactor, scale); } @@ -334,62 +328,61 @@ HierarchicalOptimizationWrapper::computeAnalyticalScalings( return proportionalityFactors; } -void -HierarchicalOptimizationWrapper::applyOptimalScalings( +void HierarchicalOptimizationWrapper::applyOptimalScalings( std::vector const& proportionalityFactors, - std::vector>& modelOutputs) const -{ + std::vector>& modelOutputs) const { for (int i = 0; (unsigned)i < proportionalityFactors.size(); ++i) { double scaling = getUnscaledParameter( proportionalityFactors[i], - wrapped_function_->getParameterScaling(proportionalityFactorIndices[i])); + wrapped_function_->getParameterScaling( + proportionalityFactorIndices[i])); applyOptimalScaling( i, scaling, modelOutputs, *scalingReader, numObservables); } } -std::vector -HierarchicalOptimizationWrapper::computeAnalyticalOffsets( +std::vector HierarchicalOptimizationWrapper::computeAnalyticalOffsets( std::vector> const& measurements, - std::vector>& modelOutputsUnscaled) const -{ + std::vector>& modelOutputsUnscaled) const { int numOffsetParameters = offsetParameterIndices.size(); std::vector offsetParameters(numOffsetParameters); for (int i = 0; i < numOffsetParameters; ++i) { auto offsetParameter = parpe::computeAnalyticalOffsets( - i, modelOutputsUnscaled, measurements, *offsetReader, numObservables); - auto scale = wrapped_function_->getParameterScaling(offsetParameterIndices[i]); + i, + modelOutputsUnscaled, + measurements, + *offsetReader, + numObservables); + auto scale = + wrapped_function_->getParameterScaling(offsetParameterIndices[i]); offsetParameters[i] = getScaledParameter(offsetParameter, scale); } return offsetParameters; } -std::vector -HierarchicalOptimizationWrapper::computeAnalyticalSigmas( - const std::vector>& measurements, - std::vector> const& modelOutputsScaled) const -{ +std::vector HierarchicalOptimizationWrapper::computeAnalyticalSigmas( + std::vector> const& measurements, + std::vector> const& modelOutputsScaled) const { int numSigmas = sigmaParameterIndices.size(); std::vector sigmas(numSigmas); for (int i = 0; i < numSigmas; ++i) { auto sigma = parpe::computeAnalyticalSigmas( i, modelOutputsScaled, measurements, *sigmaReader, numObservables); - auto scale = wrapped_function_->getParameterScaling(sigmaParameterIndices[i]); + auto scale = + wrapped_function_->getParameterScaling(sigmaParameterIndices[i]); sigmas[i] = getScaledParameter(sigma, scale); } return sigmas; } -void -HierarchicalOptimizationWrapper::applyOptimalOffsets( +void HierarchicalOptimizationWrapper::applyOptimalOffsets( std::vector const& offsetParameters, - std::vector>& modelOutputs) const -{ + std::vector>& modelOutputs) const { for (int i = 0; (unsigned)i < offsetParameters.size(); ++i) { double offset = getUnscaledParameter( @@ -400,11 +393,9 @@ HierarchicalOptimizationWrapper::applyOptimalOffsets( } } -void -HierarchicalOptimizationWrapper::fillInAnalyticalSigmas( +void HierarchicalOptimizationWrapper::fillInAnalyticalSigmas( std::vector>& allSigmas, - std::vector const& analyticalSigmas) const -{ + std::vector const& analyticalSigmas) const { for (int sigmaParameterIdx = 0; (unsigned)sigmaParameterIdx < analyticalSigmas.size(); ++sigmaParameterIdx) { @@ -413,7 +404,8 @@ HierarchicalOptimizationWrapper::fillInAnalyticalSigmas( // and not passed to AMICI -> unscale auto sigmaParameterValue = getUnscaledParameter( analyticalSigmas[sigmaParameterIdx], - wrapped_function_->getParameterScaling(sigmaParameterIndices[sigmaParameterIdx])); + wrapped_function_->getParameterScaling( + sigmaParameterIndices[sigmaParameterIdx])); auto dependentConditions = sigmaReader->getConditionsForParameter(sigmaParameterIdx); @@ -446,13 +438,12 @@ HierarchicalOptimizationWrapper::evaluateWithOptimalParameters( std::vector const& sigmas, std::vector> const& measurements, std::vector> const& modelOutputsScaled, - std::vector> & fullSigmaMatrices, + std::vector>& fullSigmaMatrices, double& fval, - const gsl::span gradient, + gsl::span const gradient, std::vector& fullGradient, Logger* logger, - double* cpuTime) const -{ + double* cpuTime) const { if (!gradient.empty()) { fval = NAN; @@ -491,71 +482,61 @@ HierarchicalOptimizationWrapper::evaluateWithOptimalParameters( : functionEvaluationFailure; } -int -HierarchicalOptimizationWrapper::numParameters() const -{ +int HierarchicalOptimizationWrapper::numParameters() const { return wrapped_function_->numParameters() - numProportionalityFactors() - numOffsetParameters() - numSigmaParameters(); } -int -HierarchicalOptimizationWrapper::numProportionalityFactors() const -{ +int HierarchicalOptimizationWrapper::numProportionalityFactors() const { return proportionalityFactorIndices.size(); } -const std::vector& -HierarchicalOptimizationWrapper::getProportionalityFactorIndices() const -{ +std::vector const& +HierarchicalOptimizationWrapper::getProportionalityFactorIndices() const { return proportionalityFactorIndices; } -int -HierarchicalOptimizationWrapper::numOffsetParameters() const -{ +int HierarchicalOptimizationWrapper::numOffsetParameters() const { return offsetParameterIndices.size(); } -int -HierarchicalOptimizationWrapper::numSigmaParameters() const -{ +int HierarchicalOptimizationWrapper::numSigmaParameters() const { return sigmaParameterIndices.size(); } -const std::vector& -HierarchicalOptimizationWrapper::getOffsetParameterIndices() const -{ +std::vector const& +HierarchicalOptimizationWrapper::getOffsetParameterIndices() const { return offsetParameterIndices; } -const std::vector& -HierarchicalOptimizationWrapper::getSigmaParameterIndices() const -{ +std::vector const& +HierarchicalOptimizationWrapper::getSigmaParameterIndices() const { return sigmaParameterIndices; } std::vector -HierarchicalOptimizationWrapper::getAnalyticalParameterIndices() const -{ +HierarchicalOptimizationWrapper::getAnalyticalParameterIndices() const { auto combinedIndices = proportionalityFactorIndices; - combinedIndices.insert(combinedIndices.end(), - offsetParameterIndices.begin(), - offsetParameterIndices.end()); - combinedIndices.insert(combinedIndices.end(), - sigmaParameterIndices.begin(), - sigmaParameterIndices.end()); + combinedIndices.insert( + combinedIndices.end(), + offsetParameterIndices.begin(), + offsetParameterIndices.end()); + combinedIndices.insert( + combinedIndices.end(), + sigmaParameterIndices.begin(), + sigmaParameterIndices.end()); std::sort(combinedIndices.begin(), combinedIndices.end()); return combinedIndices; } -AmiciSummedGradientFunction *HierarchicalOptimizationWrapper::getWrappedFunction() const -{ +AmiciSummedGradientFunction* +HierarchicalOptimizationWrapper::getWrappedFunction() const { return wrapped_function_; } -std::vector HierarchicalOptimizationWrapper::getParameterIds() const -{ +std::vector +HierarchicalOptimizationWrapper::getParameterIds() const { return removeInnerParameters( wrapped_function_->getParameterIds(), proportionalityFactorIndices, @@ -565,9 +546,8 @@ std::vector HierarchicalOptimizationWrapper::getParameterIds() cons HierarchicalOptimizationProblemWrapper::HierarchicalOptimizationProblemWrapper( std::unique_ptr problemToWrap, - const MultiConditionDataProviderHDF5* dataProvider) - : wrapped_problem_(std::move(problemToWrap)) -{ + MultiConditionDataProviderHDF5 const* dataProvider) + : wrapped_problem_(std::move(problemToWrap)) { logger_ = std::make_unique(*wrapped_problem_->logger_); auto wrappedFun = dynamic_cast*>( @@ -576,15 +556,14 @@ HierarchicalOptimizationProblemWrapper::HierarchicalOptimizationProblemWrapper( auto model = dataProvider->getModel(); [[maybe_unused]] auto lock = hdf5MutexGetLock(); - cost_fun_.reset( - new HierarchicalOptimizationWrapper( - dynamic_cast( - wrappedFun->getWrappedFunction()), - dataProvider->getHdf5File(), - "/", - dataProvider->getNumberOfSimulationConditions(), - model->nytrue, - ErrorModel::normal)); + cost_fun_.reset(new HierarchicalOptimizationWrapper( + dynamic_cast( + wrappedFun->getWrappedFunction()), + dataProvider->getHdf5File(), + "/", + dataProvider->getNumberOfSimulationConditions(), + model->nytrue, + ErrorModel::normal)); } HierarchicalOptimizationProblemWrapper::HierarchicalOptimizationProblemWrapper( @@ -592,41 +571,32 @@ HierarchicalOptimizationProblemWrapper::HierarchicalOptimizationProblemWrapper( std::unique_ptr costFun, std::unique_ptr logger) : OptimizationProblem(std::move(costFun), std::move(logger)) - , wrapped_problem_(std::move(problemToWrap)) -{} + , wrapped_problem_(std::move(problemToWrap)) {} -void -HierarchicalOptimizationProblemWrapper::fillInitialParameters( - gsl::span buffer) const -{ +void HierarchicalOptimizationProblemWrapper::fillInitialParameters( + gsl::span buffer) const { std::vector full(wrapped_problem_->cost_fun_->numParameters()); wrapped_problem_->fillInitialParameters(full); fillFilteredParams(full, buffer); } -void -HierarchicalOptimizationProblemWrapper::fillParametersMax( - gsl::span buffer) const -{ +void HierarchicalOptimizationProblemWrapper::fillParametersMax( + gsl::span buffer) const { std::vector full(wrapped_problem_->cost_fun_->numParameters()); wrapped_problem_->fillParametersMax(full); fillFilteredParams(full, buffer); } -void -HierarchicalOptimizationProblemWrapper::fillParametersMin( - gsl::span buffer) const -{ +void HierarchicalOptimizationProblemWrapper::fillParametersMin( + gsl::span buffer) const { std::vector full(wrapped_problem_->cost_fun_->numParameters()); wrapped_problem_->fillParametersMin(full); fillFilteredParams(full, buffer); } -void -HierarchicalOptimizationProblemWrapper::fillFilteredParams( - const std::vector& fullParams, - gsl::span buffer) const -{ +void HierarchicalOptimizationProblemWrapper::fillFilteredParams( + std::vector const& fullParams, + gsl::span buffer) const { auto hierarchical = dynamic_cast(cost_fun_.get()); auto combinedIndices = hierarchical->getAnalyticalParameterIndices(); @@ -634,22 +604,19 @@ HierarchicalOptimizationProblemWrapper::fillFilteredParams( } std::unique_ptr -HierarchicalOptimizationProblemWrapper::getReporter() const -{ +HierarchicalOptimizationProblemWrapper::getReporter() const { auto innerReporter = wrapped_problem_->getReporter(); auto outerReporter = std::make_unique( dynamic_cast(cost_fun_.get()), std::move(innerReporter->result_writer_), - std::make_unique(*logger_) - ); + std::make_unique(*logger_)); return outerReporter; } -void -fillFilteredParams(std::vector const& valuesToFilter, - std::vector const& sortedIndicesToExclude, - gsl::span result) -{ +void fillFilteredParams( + std::vector const& valuesToFilter, + std::vector const& sortedIndicesToExclude, + gsl::span result) { // adapt to offsets unsigned int nextFilterIdx = 0; unsigned int resultIdx = 0; @@ -665,13 +632,12 @@ fillFilteredParams(std::vector const& valuesToFilter, } } Ensures(nextFilterIdx == sortedIndicesToExclude.size()); - Ensures(resultIdx == - (unsigned)valuesToFilter.size() - sortedIndicesToExclude.size()); + Ensures( + resultIdx == + (unsigned)valuesToFilter.size() - sortedIndicesToExclude.size()); } -double -getDefaultScalingFactor(amici::ParameterScaling scaling) -{ +double getDefaultScalingFactor(amici::ParameterScaling scaling) { switch (scaling) { case amici::ParameterScaling::none: return 1.0; @@ -684,9 +650,7 @@ getDefaultScalingFactor(amici::ParameterScaling scaling) } } -double -getDefaultOffsetParameter(amici::ParameterScaling scaling) -{ +double getDefaultOffsetParameter(amici::ParameterScaling scaling) { switch (scaling) { case amici::ParameterScaling::none: return 0.0; @@ -699,14 +663,12 @@ getDefaultOffsetParameter(amici::ParameterScaling scaling) } } -double -computeAnalyticalScalings( +double computeAnalyticalScalings( int scalingIdx, - const std::vector>& modelOutputsUnscaled, - const std::vector>& measurements, + std::vector> const& modelOutputsUnscaled, + std::vector> const& measurements, AnalyticalParameterProvider const& scalingReader, - int numObservables) -{ + int numObservables) { auto dependentConditions = scalingReader.getConditionsForParameter(scalingIdx); @@ -733,28 +695,30 @@ computeAnalyticalScalings( timeIdx * numObservables]; // std::cout< -1e-18) { // negative values due to numerical errors // TODO: some outputs may be validly < 0 - logmessage(loglevel::warning, - "In computeAnalyticalScalings %d: " - "Simulation is %g < 0 for condition %d " - "observable %d timepoint %d. " - "Setting to 0.0.", - scalingIdx, - sim, - conditionIdx, - observableIdx, - timeIdx); + logmessage( + loglevel::warning, + "In computeAnalyticalScalings %d: " + "Simulation is %g < 0 for condition %d " + "observable %d timepoint %d. " + "Setting to 0.0.", + scalingIdx, + sim, + conditionIdx, + observableIdx, + timeIdx); sim = 0.0; } @@ -766,12 +730,13 @@ computeAnalyticalScalings( } if (denominator == 0.0) { - logmessage(loglevel::warning, - "In computeAnalyticalScalings: denominator is 0.0 for " - "scaling parameter " + - std::to_string(scalingIdx) + - ". Probably model output is always 0.0 and scaling, " - "thus, not used. Setting scaling parameter to 1.0."); + logmessage( + loglevel::warning, + "In computeAnalyticalScalings: denominator is 0.0 for " + "scaling parameter " + + std::to_string(scalingIdx) + + ". Probably model output is always 0.0 and scaling, " + "thus, not used. Setting scaling parameter to 1.0."); return 1.0; } double scaling = enumerator / denominator; @@ -793,14 +758,12 @@ computeAnalyticalScalings( // return upper_bound; } -double -computeAnalyticalOffsets( +double computeAnalyticalOffsets( int offsetIdx, std::vector> const& modelOutputsUnscaled, std::vector> const& measurements, AnalyticalParameterProvider& offsetReader, - int numObservables) -{ + int numObservables) { auto dependentConditions = offsetReader.getConditionsForParameter(offsetIdx); @@ -821,14 +784,15 @@ computeAnalyticalOffsets( [observableIdx + timeIdx * numObservables]; if (std::isnan(sim)) { - logmessage(loglevel::warning, - "In computeAnalyticalOffsets %d: " - "Simulation is NaN for condition %d " - "observable %d timepoint %d", - offsetIdx, - conditionIdx, - observableIdx, - timeIdx); + logmessage( + loglevel::warning, + "In computeAnalyticalOffsets %d: " + "Simulation is NaN for condition %d " + "observable %d timepoint %d", + offsetIdx, + conditionIdx, + observableIdx, + timeIdx); } enumerator += mes - sim; denominator += 1.0; @@ -838,28 +802,27 @@ computeAnalyticalOffsets( } if (denominator == 0.0) { - logmessage(loglevel::warning, - "In computeAnalyticalOffsets: denominator is 0.0 " - "for offset parameter " + - std::to_string(offsetIdx) + - ". This probably means that there exists no measurement " - "using this parameter. Setting offset to 0.0."); + logmessage( + loglevel::warning, + "In computeAnalyticalOffsets: denominator is 0.0 " + "for offset parameter " + + std::to_string(offsetIdx) + + ". This probably means that there exists no measurement " + "using this parameter. Setting offset to 0.0."); return 0.0; } return enumerator / denominator; } -double -computeAnalyticalSigmas( +double computeAnalyticalSigmas( int sigmaIdx, - const std::vector>& modelOutputsScaled, - const std::vector>& measurements, + std::vector> const& modelOutputsScaled, + std::vector> const& measurements, AnalyticalParameterProvider const& sigmaReader, int numObservables, double epsilonAbs, - double epsilonRel) -{ + double epsilonRel) { auto dependentConditions = sigmaReader.getConditionsForParameter(sigmaIdx); double enumerator = 0.0; @@ -878,21 +841,22 @@ computeAnalyticalSigmas( } for (int timeIdx = 0; timeIdx < numTimepoints; ++timeIdx) { - const int flat_index = observableIdx + timeIdx * numObservables; + int const flat_index = observableIdx + timeIdx * numObservables; double mes = measurements[conditionIdx][flat_index]; if (!std::isnan(mes)) { double scaledSim = modelOutputsScaled[conditionIdx][flat_index]; if (std::isnan(scaledSim)) { - logmessage(loglevel::warning, - "In computeAnalyticalSigmas %d: " - "Simulation is NaN for condition %d " - "observable %d timepoint %d", - sigmaIdx, - conditionIdx, - observableIdx, - timeIdx); + logmessage( + loglevel::warning, + "In computeAnalyticalSigmas %d: " + "Simulation is NaN for condition %d " + "observable %d timepoint %d", + sigmaIdx, + conditionIdx, + observableIdx, + timeIdx); } enumerator += (mes - scaledSim) * (mes - scaledSim); denominator += 1.0; @@ -905,12 +869,13 @@ computeAnalyticalSigmas( } if (denominator == 0.0) { - logmessage(loglevel::warning, - "In computeAnalyticalSigmas: Denominator is 0.0 for sigma " - "parameter " + - std::to_string(sigmaIdx) + - ". This probably means that there exists no measurement " - "using this parameter."); + logmessage( + loglevel::warning, + "In computeAnalyticalSigmas: Denominator is 0.0 for sigma " + "parameter " + + std::to_string(sigmaIdx) + + ". This probably means that there exists no measurement " + "using this parameter."); } double sigma = std::sqrt(enumerator / denominator); @@ -919,23 +884,23 @@ computeAnalyticalSigmas( if (sigma < epsilonAbs) { // Must not return sigma = 0.0 - logmessage(loglevel::warning, - "In computeAnalyticalSigmas " + std::to_string(sigmaIdx) + - ": Computed sigma < epsilon. Setting to " + - std::to_string(epsilonAbs)); + logmessage( + loglevel::warning, + "In computeAnalyticalSigmas " + std::to_string(sigmaIdx) + + ": Computed sigma < epsilon. Setting to " + + std::to_string(epsilonAbs)); return epsilonAbs; } return sigma; } -void -applyOptimalScaling(int scalingIdx, - double scalingLin, - std::vector>& modelOutputs, - AnalyticalParameterProvider const& scalingReader, - int numObservables) -{ +void applyOptimalScaling( + int scalingIdx, + double scalingLin, + std::vector>& modelOutputs, + AnalyticalParameterProvider const& scalingReader, + int numObservables) { auto dependentConditions = scalingReader.getConditionsForParameter(scalingIdx); for (auto const conditionIdx : dependentConditions) { @@ -959,13 +924,12 @@ applyOptimalScaling(int scalingIdx, } } -void -applyOptimalOffset(int offsetIdx, - double offsetLin, - std::vector>& modelOutputs, - const AnalyticalParameterProvider& offsetReader, - int numObservables) -{ +void applyOptimalOffset( + int offsetIdx, + double offsetLin, + std::vector>& modelOutputs, + AnalyticalParameterProvider const& offsetReader, + int numObservables) { auto dependentConditions = offsetReader.getConditionsForParameter(offsetIdx); for (auto const conditionIdx : dependentConditions) { @@ -986,15 +950,14 @@ applyOptimalOffset(int offsetIdx, } } -std::vector -spliceParameters(const gsl::span reducedParameters, - const std::vector& proportionalityFactorIndices, - const std::vector& offsetParameterIndices, - const std::vector& sigmaParameterIndices, - const std::vector& scalingFactors, - const std::vector& offsetParameters, - const std::vector& sigmaParameters) -{ +std::vector spliceParameters( + gsl::span const reducedParameters, + std::vector const& proportionalityFactorIndices, + std::vector const& offsetParameterIndices, + std::vector const& sigmaParameterIndices, + std::vector const& scalingFactors, + std::vector const& offsetParameters, + std::vector const& sigmaParameters) { std::vector fullParameters( reducedParameters.size() + scalingFactors.size() + @@ -1008,11 +971,13 @@ spliceParameters(const gsl::span reducedParameters, if ((unsigned)idxScaling < proportionalityFactorIndices.size() && proportionalityFactorIndices[idxScaling] == i) fullParameters[i] = scalingFactors.at(idxScaling++); - else if ((unsigned)idxOffset < offsetParameterIndices.size() && - offsetParameterIndices[idxOffset] == i) + else if ( + (unsigned)idxOffset < offsetParameterIndices.size() && + offsetParameterIndices[idxOffset] == i) fullParameters[i] = offsetParameters.at(idxOffset++); - else if ((unsigned)idxSigma < sigmaParameterIndices.size() && - sigmaParameterIndices[idxSigma] == i) + else if ( + (unsigned)idxSigma < sigmaParameterIndices.size() && + sigmaParameterIndices[idxSigma] == i) fullParameters[i] = sigmaParameters.at(idxSigma++); else if ((unsigned)idxRegular < reducedParameters.size()) fullParameters[i] = reducedParameters[idxRegular++]; @@ -1020,15 +985,14 @@ spliceParameters(const gsl::span reducedParameters, throw std::exception(); } - Ensures((unsigned) idxScaling == proportionalityFactorIndices.size()); - Ensures((unsigned) idxOffset == offsetParameterIndices.size()); - Ensures((unsigned) idxSigma == sigmaParameterIndices.size()); - Ensures((unsigned) idxRegular == reducedParameters.size()); + Ensures((unsigned)idxScaling == proportionalityFactorIndices.size()); + Ensures((unsigned)idxOffset == offsetParameterIndices.size()); + Ensures((unsigned)idxSigma == sigmaParameterIndices.size()); + Ensures((unsigned)idxRegular == reducedParameters.size()); return fullParameters; } - double computeNegLogLikelihood( std::vector> const& measurements, std::vector> const& modelOutputsScaled, @@ -1039,9 +1003,10 @@ double computeNegLogLikelihood( for (int conditionIdx = 0; (unsigned)conditionIdx < measurements.size(); ++conditionIdx) { - nllh += computeNegLogLikelihood(measurements[conditionIdx], - modelOutputsScaled[conditionIdx], - sigmas[conditionIdx]); + nllh += computeNegLogLikelihood( + measurements[conditionIdx], + modelOutputsScaled[conditionIdx], + sigmas[conditionIdx]); if (std::isnan(nllh)) return nllh; } @@ -1049,12 +1014,10 @@ double computeNegLogLikelihood( return nllh; } - -std::vector -getOuterParameters(const std::vector& fullParameters, - const H5::H5File& parameterFile, - const std::string& parameterPath) -{ +std::vector getOuterParameters( + std::vector const& fullParameters, + const H5::H5File& parameterFile, + std::string const& parameterPath) { // auto options = OptimizationOptions::fromHDF5(parameterFile.getId(), // parameterPath + "/optimizationOptions"); AnalyticalParameterHdf5Reader hierarchicalScalingReader( @@ -1078,12 +1041,14 @@ getOuterParameters(const std::vector& fullParameters, hierarchicalSigmaReader.getOptimizationParameterIndices(); auto combinedIndices = proportionalityFactorIndices; - combinedIndices.insert(combinedIndices.end(), - offsetParameterIndices.begin(), - offsetParameterIndices.end()); - combinedIndices.insert(combinedIndices.end(), - sigmaParameterIndices.begin(), - sigmaParameterIndices.end()); + combinedIndices.insert( + combinedIndices.end(), + offsetParameterIndices.begin(), + offsetParameterIndices.end()); + combinedIndices.insert( + combinedIndices.end(), + sigmaParameterIndices.begin(), + sigmaParameterIndices.end()); std::sort(combinedIndices.begin(), combinedIndices.end()); std::vector result(fullParameters.size() - combinedIndices.size()); @@ -1092,11 +1057,10 @@ getOuterParameters(const std::vector& fullParameters, return result; } -double -computeNegLogLikelihood(std::vector const& measurements, - std::vector const& modelOutputsScaled, - std::vector const& sigmas) -{ +double computeNegLogLikelihood( + std::vector const& measurements, + std::vector const& modelOutputsScaled, + std::vector const& sigmas) { // measurement/simulation output dimension mismatch Expects(measurements.size() == modelOutputsScaled.size()); @@ -1110,11 +1074,14 @@ computeNegLogLikelihood(std::vector const& measurements, double sigmaSquared = sigmas[i] * sigmas[i]; if (std::isnan(sim)) { logmessage( - loglevel::warning, "Simulation is NaN for data point %d", i); + loglevel::warning, + "Simulation is NaN for data point %d", + i); return std::numeric_limits::quiet_NaN(); } if (std::isnan(sigmaSquared)) { - logmessage(loglevel::warning, "Sigma is NaN for data point %d", i); + logmessage( + loglevel::warning, "Sigma is NaN for data point %d", i); return std::numeric_limits::quiet_NaN(); } if (sigmaSquared < 0.0) { @@ -1137,18 +1104,15 @@ HierarchicalOptimizationReporter::HierarchicalOptimizationReporter( HierarchicalOptimizationWrapper* gradFun, std::unique_ptr rw, std::unique_ptr logger) - : OptimizationReporter(gradFun, std::move(rw), std::move(logger)), - hierarchical_wrapper_(gradFun) -{ -} + : OptimizationReporter(gradFun, std::move(rw), std::move(logger)) + , hierarchical_wrapper_(gradFun) {} -FunctionEvaluationStatus -HierarchicalOptimizationReporter::evaluate(gsl::span parameters, - double& fval, - gsl::span gradient, - Logger* logger, - double* cpuTime) const -{ +FunctionEvaluationStatus HierarchicalOptimizationReporter::evaluate( + gsl::span parameters, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime) const { double myCpuTimeSec = 0.0; if (cpuTime) *cpuTime = 0.0; @@ -1157,9 +1121,10 @@ HierarchicalOptimizationReporter::evaluate(gsl::span parameters, return functionEvaluationFailure; if (gradient.data()) { - if (!have_cached_gradient_ || !std::equal(parameters.begin(), - parameters.end(), - cached_parameters_.begin())) { + if (!have_cached_gradient_ || !std::equal( + parameters.begin(), + parameters.end(), + cached_parameters_.begin())) { // Have to compute anew cached_status_ = hierarchical_wrapper_->evaluate( parameters, @@ -1177,9 +1142,10 @@ HierarchicalOptimizationReporter::evaluate(gsl::span parameters, cached_gradient_.begin(), cached_gradient_.end(), gradient.begin()); fval = cached_cost_; } else { - if (!have_cached_cost_ || !std::equal(parameters.begin(), - parameters.end(), - cached_parameters_.begin())) { + if (!have_cached_cost_ || !std::equal( + parameters.begin(), + parameters.end(), + cached_parameters_.begin())) { // Have to compute anew cached_status_ = hierarchical_wrapper_->evaluate( parameters, @@ -1204,20 +1170,19 @@ HierarchicalOptimizationReporter::evaluate(gsl::span parameters, if (cpuTime) *cpuTime = myCpuTimeSec; - if (afterCostFunctionCall(parameters, - cached_cost_, - gradient.data() ? cached_full_gradient_ - : gsl::span()) != 0) + if (afterCostFunctionCall( + parameters, + cached_cost_, + gradient.data() ? cached_full_gradient_ : gsl::span()) != 0) return functionEvaluationFailure; return cached_status_; } -void -HierarchicalOptimizationReporter::finished(double optimalCost, - gsl::span parameters, - int exitStatus) const -{ +void HierarchicalOptimizationReporter::finished( + double optimalCost, + gsl::span parameters, + int exitStatus) const { double timeElapsed = wall_timer_.getTotal(); if (cached_cost_ > optimalCost) { @@ -1244,39 +1209,38 @@ HierarchicalOptimizationReporter::finished(double optimalCost, cpu_time_total_sec_); if (result_writer_) - result_writer_->saveOptimizerResults(cached_cost_, - cached_full_parameters_, - timeElapsed, - cpu_time_total_sec_, - exitStatus); + result_writer_->saveOptimizerResults( + cached_cost_, + cached_full_parameters_, + timeElapsed, + cpu_time_total_sec_, + exitStatus); } -const std::vector& -HierarchicalOptimizationReporter::getFinalParameters() const -{ +std::vector const& +HierarchicalOptimizationReporter::getFinalParameters() const { return cached_full_parameters_; } -bool -HierarchicalOptimizationReporter::iterationFinished( - gsl::span parameters, +bool HierarchicalOptimizationReporter::iterationFinished( + gsl::span parameters, double objectiveFunctionValue, - gsl::span /*objectiveFunctionGradient*/) const -{ + gsl::span /*objectiveFunctionGradient*/) const { double wallTimeIter = wall_timer_.getRound(); double wallTimeOptim = wall_timer_.getTotal(); if (logger_) - logger_->logmessage(loglevel::info, - "iter: %d cost: %g " - "time_iter: wall: %gs cpu: %gs " - "time_optim: wall: %gs cpu: %gs", - num_iterations_, - objectiveFunctionValue, - wallTimeIter, - cpu_time_iteration_sec_, - wallTimeOptim, - cpu_time_total_sec_); + logger_->logmessage( + loglevel::info, + "iter: %d cost: %g " + "time_iter: wall: %gs cpu: %gs " + "time_optim: wall: %gs cpu: %gs", + num_iterations_, + objectiveFunctionValue, + wallTimeIter, + cpu_time_iteration_sec_, + wallTimeOptim, + cpu_time_total_sec_); if (result_writer_) { /* check if the optimizer-reported cost matches the last function @@ -1287,9 +1251,10 @@ HierarchicalOptimizationReporter::iterationFinished( * have. */ if (almostEqual(objectiveFunctionValue, cached_cost_) && - (parameters.empty() || std::equal(parameters.begin(), - parameters.end(), - cached_parameters_.begin()))) { + (parameters.empty() || std::equal( + parameters.begin(), + parameters.end(), + cached_parameters_.begin()))) { result_writer_->logOptimizerIteration( num_iterations_, cached_full_parameters_, @@ -1305,32 +1270,31 @@ HierarchicalOptimizationReporter::iterationFinished( // parameters so we can't append them due to different dimension // TODO: save both, outer + combined? can easily save outer + inner // separately - std::vector nanParameters(cached_full_parameters_.size(), - NAN); - - result_writer_->logOptimizerIteration(num_iterations_, - nanParameters, - objectiveFunctionValue, - nanParameters, - wallTimeIter, - cpu_time_iteration_sec_); + std::vector nanParameters( + cached_full_parameters_.size(), NAN); + + result_writer_->logOptimizerIteration( + num_iterations_, + nanParameters, + objectiveFunctionValue, + nanParameters, + wallTimeIter, + cpu_time_iteration_sec_); } } ++num_iterations_; - logger_->setPrefix(default_logger_prefix_ + "i" + - std::to_string(num_iterations_)); + logger_->setPrefix( + default_logger_prefix_ + "i" + std::to_string(num_iterations_)); cpu_time_iteration_sec_ = 0.0; return false; } -bool -HierarchicalOptimizationReporter::afterCostFunctionCall( - gsl::span /*parameters*/, +bool HierarchicalOptimizationReporter::afterCostFunctionCall( + gsl::span /*parameters*/, double objectiveFunctionValue, - gsl::span objectiveFunctionGradient) const -{ + gsl::span objectiveFunctionGradient) const { double wallTime = wall_timer_.getTotal(); if (!std::isfinite(objectiveFunctionValue)) @@ -1348,21 +1312,21 @@ HierarchicalOptimizationReporter::afterCostFunctionCall( return false; } -void -checkGradientForAnalyticalParameters(const std::vector& gradient, - const std::vector& analyticalIndices, - double threshold) -{ +void checkGradientForAnalyticalParameters( + std::vector const& gradient, + std::vector const& analyticalIndices, + double threshold) { for (auto const idx : analyticalIndices) { auto curGradient = gradient[idx]; // std::cout<<" : "< threshold) - logmessage(loglevel::warning, - "Gradient w.r.t. analytically computed parameter " - "%d is %f, exceeding threshold %g", - idx, - curGradient, - threshold); + logmessage( + loglevel::warning, + "Gradient w.r.t. analytically computed parameter " + "%d is %f, exceeding threshold %g", + idx, + curGradient, + threshold); } } diff --git a/src/parpeamici/hierarchicalOptimizationAnalyticalParameterProvider.cpp b/src/parpeamici/hierarchicalOptimizationAnalyticalParameterProvider.cpp index 94bfe6a8e..af577a731 100644 --- a/src/parpeamici/hierarchicalOptimizationAnalyticalParameterProvider.cpp +++ b/src/parpeamici/hierarchicalOptimizationAnalyticalParameterProvider.cpp @@ -5,25 +5,20 @@ namespace parpe { - -std::vector -AnalyticalParameterProviderDefault::getConditionsForParameter( - int parameterIndex) const -{ +std::vector AnalyticalParameterProviderDefault::getConditionsForParameter( + int parameterIndex) const { return conditionsForParameter[parameterIndex]; } -const std::vector& +std::vector const& AnalyticalParameterProviderDefault::getObservablesForParameter( int parameterIndex, - int conditionIdx) const -{ + int conditionIdx) const { return mapping[parameterIndex].at(conditionIdx); } std::vector -AnalyticalParameterProviderDefault::getOptimizationParameterIndices() const -{ +AnalyticalParameterProviderDefault::getOptimizationParameterIndices() const { return optimizationParameterIndices; } @@ -32,18 +27,15 @@ AnalyticalParameterHdf5Reader::AnalyticalParameterHdf5Reader( std::string analyticalParameterIndicesPath, std::string mapPath) : mapPath(std::move(mapPath)) - , analyticalParameterIndicesPath(std::move(analyticalParameterIndicesPath)) -{ + , analyticalParameterIndicesPath( + std::move(analyticalParameterIndicesPath)) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); this->file = file; // copy while mutex is locked! readParameterConditionObservableMappingFromFile(); } - -std::vector -AnalyticalParameterHdf5Reader::getConditionsForParameter( - int parameterIndex) const -{ +std::vector AnalyticalParameterHdf5Reader::getConditionsForParameter( + int parameterIndex) const { std::vector result; result.reserve(mapping[parameterIndex].size()); for (auto const& [k, v] : mapping[parameterIndex]) @@ -51,17 +43,15 @@ AnalyticalParameterHdf5Reader::getConditionsForParameter( return result; } -const std::vector& +std::vector const& AnalyticalParameterHdf5Reader::getObservablesForParameter( int parameterIndex, - int conditionIdx) const -{ + int conditionIdx) const { return mapping[parameterIndex].at(conditionIdx); } std::vector -AnalyticalParameterHdf5Reader::getOptimizationParameterIndices() const -{ +AnalyticalParameterHdf5Reader::getOptimizationParameterIndices() const { [[maybe_unused]] auto lock = hdf5MutexGetLock(); std::vector analyticalParameterIndices; @@ -76,21 +66,18 @@ AnalyticalParameterHdf5Reader::getOptimizationParameterIndices() const dataspace.getSimpleExtentDims(&numScalings); analyticalParameterIndices.resize(numScalings); - dataset.read(analyticalParameterIndices.data(), - H5::PredType::NATIVE_INT); + dataset.read( + analyticalParameterIndices.data(), H5::PredType::NATIVE_INT); } return analyticalParameterIndices; } -AnalyticalParameterHdf5Reader::~AnalyticalParameterHdf5Reader() -{ +AnalyticalParameterHdf5Reader::~AnalyticalParameterHdf5Reader() { [[maybe_unused]] auto lock = hdf5MutexGetLock(); file.close(); } -int -AnalyticalParameterHdf5Reader::getNumAnalyticalParameters() const -{ +int AnalyticalParameterHdf5Reader::getNumAnalyticalParameters() const { hsize_t numAnalyticalParameters = 0; [[maybe_unused]] auto lock = hdf5MutexGetLock(); @@ -106,9 +93,8 @@ AnalyticalParameterHdf5Reader::getNumAnalyticalParameters() const return numAnalyticalParameters; } -void -AnalyticalParameterHdf5Reader::readParameterConditionObservableMappingFromFile() -{ +void AnalyticalParameterHdf5Reader:: + readParameterConditionObservableMappingFromFile() { [[maybe_unused]] auto lock = hdf5MutexGetLock(); H5_SAVE_ERROR_HANDLER; try { @@ -133,17 +119,14 @@ AnalyticalParameterHdf5Reader::readParameterConditionObservableMappingFromFile() int observableIdx = rawMap[i * nCols + observableCol]; mapping[scalingIdx][conditionIdx].push_back(observableIdx); } - } catch (H5::FileIException const&) { - return; - } + } catch (H5::FileIException const&) { return; } H5_RESTORE_ERROR_HANDLER; } -std::vector -AnalyticalParameterHdf5Reader::readRawMap(H5::DataSet const& dataset, - hsize_t& nRows, - hsize_t& nCols) const -{ +std::vector AnalyticalParameterHdf5Reader::readRawMap( + H5::DataSet const& dataset, + hsize_t& nRows, + hsize_t& nCols) const { [[maybe_unused]] auto lock = hdf5MutexGetLock(); auto dataspace = dataset.getSpace(); diff --git a/src/parpeamici/multiConditionDataProvider.cpp b/src/parpeamici/multiConditionDataProvider.cpp index 542678e96..143d8db58 100644 --- a/src/parpeamici/multiConditionDataProvider.cpp +++ b/src/parpeamici/multiConditionDataProvider.cpp @@ -16,10 +16,9 @@ namespace parpe { MultiConditionDataProviderHDF5::MultiConditionDataProviderHDF5( - std::unique_ptr model, - std::string const& hdf5Filename) - : MultiConditionDataProviderHDF5(std::move(model), hdf5Filename, "") -{} + std::unique_ptr model, + std::string const& hdf5Filename) + : MultiConditionDataProviderHDF5(std::move(model), hdf5Filename, "") {} MultiConditionDataProviderHDF5::~MultiConditionDataProviderHDF5() { [[maybe_unused]] auto lock = hdf5MutexGetLock(); @@ -28,11 +27,10 @@ MultiConditionDataProviderHDF5::~MultiConditionDataProviderHDF5() { MultiConditionDataProviderHDF5::MultiConditionDataProviderHDF5( std::unique_ptr model, - std::string const& hdf5Filename, - std::string const& rootPath) - : model_(std::move(model)) - , root_path_(rootPath) -{ + std::string const& hdf5Filename, + std::string const& rootPath) + : model_(std::move(model)) + , root_path_(rootPath) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); file_ = hdf5OpenForReading(hdf5Filename); @@ -43,17 +41,17 @@ MultiConditionDataProviderHDF5::MultiConditionDataProviderHDF5( hdf5_measurement_sigma_path_ = rootPath + "/measurements/ysigma"; hdf5_condition_path_ = rootPath + "/fixedParameters/k"; hdf5_reference_condition_path_ = - rootPath + "/fixedParameters/simulationConditions"; + rootPath + "/fixedParameters/simulationConditions"; hdf5_amici_options_path_ = rootPath + "/amiciOptions"; hdf5_parameter_path_ = rootPath + "/parameters"; hdf5_parameter_min_path_ = hdf5_parameter_path_ + "/lowerBound"; hdf5_parameter_max_path_ = hdf5_parameter_path_ + "/upperBound"; hdf5_parameter_scale_simulation_path_ = - hdf5_parameter_path_ + "/pscaleSimulation"; + hdf5_parameter_path_ + "/pscaleSimulation"; hdf5_parameter_scale_optimization_path_ = - hdf5_parameter_path_ + "/pscaleOptimization"; + hdf5_parameter_path_ + "/pscaleOptimization"; hdf5_simulation_to_optimization_parameter_mapping_path_ = - rootPath + "/parameters/optimizationSimulationMapping"; + rootPath + "/parameters/optimizationSimulationMapping"; hdf5_parameter_overrides_path = rootPath + "/parameters/parameterOverrides"; hdf5_parameter_ids_path_ = rootPath + "/parameters/parameterNames"; hdf5_reinitialization_idxs_path_ = @@ -61,12 +59,10 @@ MultiConditionDataProviderHDF5::MultiConditionDataProviderHDF5( checkDataIntegrity(); amici::hdf5::readModelDataFromHDF5( - file_, *this->model_, hdf5_amici_options_path_); + file_, *this->model_, hdf5_amici_options_path_); } -int -MultiConditionDataProviderHDF5::getNumberOfSimulationConditions() const -{ +int MultiConditionDataProviderHDF5::getNumberOfSimulationConditions() const { // TODO: add additional layer for selection of condition indices (for // testing and later for mini-batch) // -> won't need different file for testing/validation splits @@ -75,23 +71,22 @@ MultiConditionDataProviderHDF5::getNumberOfSimulationConditions() const [[maybe_unused]] auto lock = hdf5MutexGetLock(); int d1, d2; - hdf5GetDatasetDimensions(file_, hdf5_reference_condition_path_, - 2, &d1, &d2); + hdf5GetDatasetDimensions( + file_, hdf5_reference_condition_path_, 2, &d1, &d2); return d1; } std::vector MultiConditionDataProviderHDF5::getSimulationToOptimizationParameterMapping( - int conditionIdx) const -{ + int conditionIdx) const { std::string path = hdf5_simulation_to_optimization_parameter_mapping_path_; [[maybe_unused]] auto lock = hdf5MutexGetLock(); if (file_.nameExists(path)) { return hdf5Read2DIntegerHyperslab( - file_, path, model_->np(), 1, 0, conditionIdx); + file_, path, model_->np(), 1, 0, conditionIdx); } // return trivial default mapping @@ -101,36 +96,37 @@ MultiConditionDataProviderHDF5::getSimulationToOptimizationParameterMapping( return defaultMap; } -void -MultiConditionDataProviderHDF5::mapSimulationToOptimizationGradientAddMultiply( - int conditionIdx, - gsl::span simulation, - gsl::span optimization, - gsl::span parameters, - double coefficient) const -{ +void MultiConditionDataProviderHDF5:: + mapSimulationToOptimizationGradientAddMultiply( + int conditionIdx, + gsl::span simulation, + gsl::span optimization, + gsl::span parameters, + double coefficient) const { auto mapping = getSimulationToOptimizationParameterMapping(conditionIdx); auto plist = getSensitivityParameterList(mapping); // Need to consider varying scaling auto scaleOpt = getParameterScaleOpt(); auto scaleSim = getParameterScaleSim(conditionIdx); - for(std::vector::size_type i_plist = 0; i_plist < plist.size(); ++i_plist) { + for (std::vector::size_type i_plist = 0; i_plist < plist.size(); + ++i_plist) { auto i_p = plist[i_plist]; - double newGrad = applyChainRule(simulation[i_plist], parameters[i_p], - scaleSim[i_p], scaleOpt[mapping[i_p]]); + double newGrad = applyChainRule( + simulation[i_plist], + parameters[i_p], + scaleSim[i_p], + scaleOpt[mapping[i_p]]); optimization[mapping[i_p]] += coefficient * newGrad; } } -void -MultiConditionDataProviderHDF5::mapAndSetOptimizationToSimulationVariables( - int conditionIdx, - gsl::span optimization, - gsl::span simulation, - gsl::span optimizationScale, - gsl::span simulationScale) const -{ +void MultiConditionDataProviderHDF5::mapAndSetOptimizationToSimulationVariables( + int conditionIdx, + gsl::span optimization, + gsl::span simulation, + gsl::span optimizationScale, + gsl::span simulationScale) const { auto mapping = getSimulationToOptimizationParameterMapping(conditionIdx); std::vector overrides; @@ -140,13 +136,14 @@ MultiConditionDataProviderHDF5::mapAndSetOptimizationToSimulationVariables( if (file_.nameExists(hdf5_parameter_overrides_path)) { overrides.resize(model_->np()); - hdf5Read2DDoubleHyperslab(file_, - hdf5_parameter_overrides_path, - model_->np(), - 1, - 0, - conditionIdx, - overrides); + hdf5Read2DDoubleHyperslab( + file_, + hdf5_parameter_overrides_path, + model_->np(), + 1, + 0, + conditionIdx, + overrides); } } @@ -156,9 +153,9 @@ MultiConditionDataProviderHDF5::mapAndSetOptimizationToSimulationVariables( if (mapping[i] >= 0) { // map from optimization parameters simulation[i] = getScaledParameter( - getUnscaledParameter(optimization[mapping[i]], - optimizationScale[mapping[i]]), - simulationScale[i]); + getUnscaledParameter( + optimization[mapping[i]], optimizationScale[mapping[i]]), + simulationScale[i]); } else if (!overrides.empty()) { // TODO do we need to rescale here? or done in PEtab? simulation[i] = overrides[i]; @@ -169,11 +166,10 @@ MultiConditionDataProviderHDF5::mapAndSetOptimizationToSimulationVariables( } std::vector -MultiConditionDataProviderHDF5::getParameterScaleOpt() const -{ +MultiConditionDataProviderHDF5::getParameterScaleOpt() const { [[maybe_unused]] auto lock = hdf5MutexGetLock(); auto resInt = amici::hdf5::getIntDataset1D( - file_, hdf5_parameter_scale_optimization_path_); + file_, hdf5_parameter_scale_optimization_path_); std::vector res(resInt.size()); for (unsigned int i = 0; i < resInt.size(); ++i) res[i] = static_cast(resInt[i]); @@ -181,90 +177,80 @@ MultiConditionDataProviderHDF5::getParameterScaleOpt() const } amici::ParameterScaling -MultiConditionDataProviderHDF5::getParameterScaleOpt(int parameterIdx) const -{ +MultiConditionDataProviderHDF5::getParameterScaleOpt(int parameterIdx) const { auto res = - hdf5Read1DIntegerHyperslab( - file_, hdf5_parameter_scale_optimization_path_, 1, parameterIdx) - .at(0); + hdf5Read1DIntegerHyperslab( + file_, hdf5_parameter_scale_optimization_path_, 1, parameterIdx) + .at(0); return static_cast(res); } std::vector -MultiConditionDataProviderHDF5::getParameterScaleSim(int simulationIdx) const -{ - auto resInt = - hdf5Read2DIntegerHyperslab(file_, - hdf5_parameter_scale_simulation_path_, - 1, - model_->np(), - simulationIdx, - 0); +MultiConditionDataProviderHDF5::getParameterScaleSim(int simulationIdx) const { + auto resInt = hdf5Read2DIntegerHyperslab( + file_, + hdf5_parameter_scale_simulation_path_, + 1, + model_->np(), + simulationIdx, + 0); std::vector res(resInt.size()); for (unsigned int i = 0; i < resInt.size(); ++i) res[i] = static_cast(resInt[i]); return res; } -amici::ParameterScaling -MultiConditionDataProviderHDF5::getParameterScaleSim( - int simulationIdx, - int modelParameterIdx) const -{ - auto res = hdf5Read2DIntegerHyperslab(file_, - hdf5_parameter_scale_simulation_path_, - 1, - 1, - simulationIdx, - modelParameterIdx) - .at(0); +amici::ParameterScaling MultiConditionDataProviderHDF5::getParameterScaleSim( + int simulationIdx, + int modelParameterIdx) const { + auto res = hdf5Read2DIntegerHyperslab( + file_, + hdf5_parameter_scale_simulation_path_, + 1, + 1, + simulationIdx, + modelParameterIdx) + .at(0); return static_cast(res); } -void -MultiConditionDataProviderHDF5::updateFixedSimulationParameters( - int simulationIdx, - amici::ExpData& edata) const -{ +void MultiConditionDataProviderHDF5::updateFixedSimulationParameters( + int simulationIdx, + amici::ExpData& edata) const { edata.fixedParameters.resize(model_->nk()); // TODO cache int conditionIdxPreeq, conditionIdxSim; - getSimAndPreeqConditions(simulationIdx, - conditionIdxPreeq, - conditionIdxSim); + getSimAndPreeqConditions(simulationIdx, conditionIdxPreeq, conditionIdxSim); if (conditionIdxPreeq >= 0) { // -1 means no preequilibration edata.fixedParametersPreequilibration.resize(model_->nk()); - readFixedSimulationParameters(conditionIdxPreeq, - edata.fixedParametersPreequilibration); + readFixedSimulationParameters( + conditionIdxPreeq, edata.fixedParametersPreequilibration); edata.reinitialization_state_idxs_sim = getReinitializationIndices(simulationIdx); } else { edata.fixedParametersPreequilibration.resize(0); - edata.reinitialization_state_idxs_sim .clear(); + edata.reinitialization_state_idxs_sim.clear(); } readFixedSimulationParameters(conditionIdxSim, edata.fixedParameters); } -void MultiConditionDataProviderHDF5::setModel(std::unique_ptr model) -{ +void MultiConditionDataProviderHDF5::setModel( + std::unique_ptr model) { model_ = std::move(model); } -std::vector MultiConditionDataProviderHDF5::getProblemParameterIds() const -{ +std::vector +MultiConditionDataProviderHDF5::getProblemParameterIds() const { return hdf5Read1dStringDataset(file_, hdf5_parameter_ids_path_); - } -void -MultiConditionDataProviderHDF5::readFixedSimulationParameters( - int conditionIdx, - gsl::span buffer) const -{ +void MultiConditionDataProviderHDF5::readFixedSimulationParameters( + int conditionIdx, + gsl::span buffer) const { if (!model_->nk()) return; @@ -272,22 +258,18 @@ MultiConditionDataProviderHDF5::readFixedSimulationParameters( H5_SAVE_ERROR_HANDLER; - hdf5Read2DDoubleHyperslab(file_, - hdf5_condition_path_, - model_->nk(), - 1, - 0, - conditionIdx, - buffer); + hdf5Read2DDoubleHyperslab( + file_, hdf5_condition_path_, model_->nk(), 1, 0, conditionIdx, buffer); if (H5Eget_num(H5E_DEFAULT)) { - logmessage(loglevel::critical, - "Problem in readFixedParameters (row %d, nk %d)\n", - conditionIdx, - model_->nk()); + logmessage( + loglevel::critical, + "Problem in readFixedParameters (row %d, nk %d)\n", + conditionIdx, + model_->nk()); printBacktrace(20); H5Ewalk2( - H5E_DEFAULT, H5E_WALK_DOWNWARD, hdf5ErrorStackWalker_cb, nullptr); + H5E_DEFAULT, H5E_WALK_DOWNWARD, hdf5ErrorStackWalker_cb, nullptr); abort(); } @@ -299,15 +281,15 @@ MultiConditionDataProviderHDF5::readFixedSimulationParameters( "unable to read data"); } -std::unique_ptr MultiConditionDataProviderHDF5::getExperimentalDataForCondition( - int simulationIdx) const { +std::unique_ptr +MultiConditionDataProviderHDF5::getExperimentalDataForCondition( + int simulationIdx) const { auto edata = std::make_unique(*model_); [[maybe_unused]] auto lock = hdf5MutexGetLock(); - edata->setTimepoints( - amici::hdf5::getDoubleDataset1D( - file_, root_path_ + "/measurements/t/" - + std::to_string(simulationIdx))); + edata->setTimepoints(amici::hdf5::getDoubleDataset1D( + file_, + root_path_ + "/measurements/t/" + std::to_string(simulationIdx))); edata->setObservedData(getMeasurementForSimulationIndex(simulationIdx)); edata->setObservedDataStdDev(getSigmaForSimulationIndex(simulationIdx)); // fixed parameters and state variable indices for reinitialization @@ -320,13 +302,14 @@ std::unique_ptr MultiConditionDataProviderHDF5::getExperimentalD } std::vector MultiConditionDataProviderHDF5::getSensitivityParameterList( - const std::vector &mapping) const { + std::vector const& mapping) const { // We can construct plist simply from the parameter mapping: // If a model parameter does not map to a problem parameter (w.r.t. all // of which we need to compute sensitivities), there is a negative index // in the mapping. That means, plist is the list of indices for which // the mapping is non-negative. - int nplist = std::count_if(mapping.begin(), mapping.end(), [](int i) { return i >= 0; }); + int nplist = std::count_if( + mapping.begin(), mapping.end(), [](int i) { return i >= 0; }); int i_plist = 0; std::vector plist(nplist); for (int i_p = 0; i_p < model_->np(); ++i_p) { @@ -338,8 +321,7 @@ std::vector MultiConditionDataProviderHDF5::getSensitivityParameterList( } std::vector> -MultiConditionDataProviderHDF5::getAllMeasurements() const -{ +MultiConditionDataProviderHDF5::getAllMeasurements() const { std::vector> result(getNumberOfSimulationConditions()); for (int conditionIdx = 0; (unsigned)conditionIdx < result.size(); ++conditionIdx) { @@ -349,8 +331,7 @@ MultiConditionDataProviderHDF5::getAllMeasurements() const } std::vector> -MultiConditionDataProviderHDF5::getAllSigmas() const -{ +MultiConditionDataProviderHDF5::getAllSigmas() const { std::vector> result(getNumberOfSimulationConditions()); for (int conditionIdx = 0; (unsigned)conditionIdx < result.size(); ++conditionIdx) { @@ -359,36 +340,31 @@ MultiConditionDataProviderHDF5::getAllSigmas() const return result; } -std::vector -MultiConditionDataProviderHDF5::getSigmaForSimulationIndex( - int simulationIdx) const -{ +std::vector MultiConditionDataProviderHDF5::getSigmaForSimulationIndex( + int simulationIdx) const { hsize_t dim1, dim2; [[maybe_unused]] auto lock = hdf5MutexGetLock(); - return amici::hdf5::getDoubleDataset2D(file_, - hdf5_measurement_sigma_path_ + "/" + - std::to_string(simulationIdx), - dim1, - dim2); + return amici::hdf5::getDoubleDataset2D( + file_, + hdf5_measurement_sigma_path_ + "/" + std::to_string(simulationIdx), + dim1, + dim2); } std::vector MultiConditionDataProviderHDF5::getMeasurementForSimulationIndex( - int simulationIdx) const -{ + int simulationIdx) const { hsize_t dim1, dim2; [[maybe_unused]] auto lock = hdf5MutexGetLock(); - return amici::hdf5::getDoubleDataset2D(file_, - hdf5_measurement_path_ + "/" + - std::to_string(simulationIdx), - dim1, - dim2); + return amici::hdf5::getDoubleDataset2D( + file_, + hdf5_measurement_path_ + "/" + std::to_string(simulationIdx), + dim1, + dim2); } -void -MultiConditionDataProviderHDF5::getOptimizationParametersLowerBounds( - gsl::span buffer) const -{ +void MultiConditionDataProviderHDF5::getOptimizationParametersLowerBounds( + gsl::span buffer) const { [[maybe_unused]] auto lock = hdf5MutexGetLock(); auto dataset = file_.openDataSet(hdf5_parameter_min_path_); @@ -403,10 +379,8 @@ MultiConditionDataProviderHDF5::getOptimizationParametersLowerBounds( dataset.read(buffer.data(), H5::PredType::NATIVE_DOUBLE); } -void -MultiConditionDataProviderHDF5::getOptimizationParametersUpperBounds( - gsl::span buffer) const -{ +void MultiConditionDataProviderHDF5::getOptimizationParametersUpperBounds( + gsl::span buffer) const { [[maybe_unused]] auto lock = hdf5MutexGetLock(); auto dataset = file_.openDataSet(hdf5_parameter_max_path_); @@ -421,38 +395,31 @@ MultiConditionDataProviderHDF5::getOptimizationParametersUpperBounds( dataset.read(buffer.data(), H5::PredType::NATIVE_DOUBLE); } -int -MultiConditionDataProviderHDF5::getNumOptimizationParameters() const -{ +int MultiConditionDataProviderHDF5::getNumOptimizationParameters() const { std::string path = root_path_ + "/parameters/parameterNames"; int size = 0; hdf5GetDatasetDimensions(file_, path, 1, &size); return size; } -std::unique_ptr -MultiConditionDataProviderHDF5::getModel() const -{ +std::unique_ptr MultiConditionDataProviderHDF5::getModel() const { return std::unique_ptr(model_->clone()); } std::unique_ptr -MultiConditionDataProviderHDF5::getSolver() const -{ +MultiConditionDataProviderHDF5::getSolver() const { auto solver = model_->getSolver(); [[maybe_unused]] auto lock = hdf5MutexGetLock(); amici::hdf5::readSolverSettingsFromHDF5( - file_, *solver, hdf5_amici_options_path_); + file_, *solver, hdf5_amici_options_path_); return solver; } -void -MultiConditionDataProviderHDF5::updateSimulationParametersAndScale( - int simulationIdx, - gsl::span optimizationParams, - amici::Model& model) const -{ +void MultiConditionDataProviderHDF5::updateSimulationParametersAndScale( + int simulationIdx, + gsl::span optimizationParams, + amici::Model& model) const { // int conditionIdxPreeq, conditionIdxSim; // getSimAndPreeqConditions(simulationIdx, conditionIdxPreeq, // conditionIdxSim); @@ -463,38 +430,35 @@ MultiConditionDataProviderHDF5::updateSimulationParametersAndScale( model.setParameterScale(scaleSim); mapAndSetOptimizationToSimulationVariables( - simulationIdx, optimizationParams, p, scaleOpt, scaleSim); + simulationIdx, optimizationParams, p, scaleOpt, scaleSim); model.setParameters(p); } -void -MultiConditionDataProviderHDF5::copyInputData(H5::H5File const& target) -{ +void MultiConditionDataProviderHDF5::copyInputData(H5::H5File const& target) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); - H5Ocopy(file_.getId(), - "/", - target.getId(), - "/inputData", - H5P_DEFAULT, - H5P_DEFAULT); + H5Ocopy( + file_.getId(), + "/", + target.getId(), + "/inputData", + H5P_DEFAULT, + H5P_DEFAULT); H5Fflush(target.getId(), H5F_SCOPE_LOCAL); } -void -MultiConditionDataProviderHDF5::getSimAndPreeqConditions( - const int simulationIdx, - int& preequilibrationConditionIdx, - int& simulationConditionIdx) const -{ +void MultiConditionDataProviderHDF5::getSimAndPreeqConditions( + int const simulationIdx, + int& preequilibrationConditionIdx, + int& simulationConditionIdx) const { auto tmp = hdf5Read2DIntegerHyperslab( - file_, hdf5_reference_condition_path_, 1, 2, simulationIdx, 0); + file_, hdf5_reference_condition_path_, 1, 2, simulationIdx, 0); preequilibrationConditionIdx = tmp[0]; simulationConditionIdx = tmp[1]; } std::vector MultiConditionDataProviderHDF5::getReinitializationIndices( - const int simulationIdx) const { + int const simulationIdx) const { [[maybe_unused]] auto lock = hdf5MutexGetLock(); auto dataset = file_.openDataSet(hdf5_reinitialization_idxs_path_); @@ -502,12 +466,13 @@ std::vector MultiConditionDataProviderHDF5::getReinitializationIndices( Expects(filespace.getSimpleExtentNdims() == 1); hsize_t num_simulation_conditions; filespace.getSimpleExtentDims(&num_simulation_conditions); - Expects(simulationIdx >= 0 - && (hsize_t) simulationIdx < num_simulation_conditions); + Expects( + simulationIdx >= 0 && + (hsize_t)simulationIdx < num_simulation_conditions); // read only for one condition - const hsize_t len = 1; - const hsize_t offset = simulationIdx; + hsize_t const len = 1; + hsize_t const offset = simulationIdx; filespace.selectHyperslab(H5S_SELECT_SET, &len, &offset); H5::DataSpace memspace(1, &len); @@ -517,13 +482,12 @@ std::vector MultiConditionDataProviderHDF5::getReinitializationIndices( dataset.read(&buffer, memtype, memspace, filespace); Expects(buffer.len == 0 || buffer.p); - auto int_ptr = static_cast(buffer.p); + auto int_ptr = static_cast(buffer.p); return std::vector(&int_ptr[0], &int_ptr[buffer.len]); } -H5::H5File MultiConditionDataProviderHDF5::getHdf5File() const -{ +H5::H5File MultiConditionDataProviderHDF5::getHdf5File() const { [[maybe_unused]] auto lock = hdf5MutexGetLock(); H5::H5File result(file_); return result; @@ -547,32 +511,32 @@ H5::H5File MultiConditionDataProviderHDF5::getHdf5File() const // getMaxIter()); //} -void -MultiConditionDataProviderHDF5::checkDataIntegrity() const -{ +void MultiConditionDataProviderHDF5::checkDataIntegrity() const { // check matching IDs std::string modelParameterIdsPath = root_path_ + "/model/parameterIds"; auto dataParameterIds = - hdf5Read1dStringDataset(file_, modelParameterIdsPath); + hdf5Read1dStringDataset(file_, modelParameterIdsPath); auto modelParameterIds = this->model_->getParameterIds(); - RELEASE_ASSERT(dataParameterIds == modelParameterIds, - "Parameter IDs do not match."); + RELEASE_ASSERT( + dataParameterIds == modelParameterIds, "Parameter IDs do not match."); std::string speciesIdsPath = root_path_ + "/model/stateIds"; auto dataSpeciesIds = hdf5Read1dStringDataset(file_, speciesIdsPath); - RELEASE_ASSERT(dataSpeciesIds == model_->getStateIds(), - "State IDs do not match."); + RELEASE_ASSERT( + dataSpeciesIds == model_->getStateIds(), "State IDs do not match."); std::string fixedParIdsPath = root_path_ + "/model/fixedParameterIds"; auto fixedParIds = hdf5Read1dStringDataset(file_, fixedParIdsPath); - RELEASE_ASSERT(fixedParIds == model_->getFixedParameterIds(), - "Fixed parameter IDs do not match."); + RELEASE_ASSERT( + fixedParIds == model_->getFixedParameterIds(), + "Fixed parameter IDs do not match."); std::string observableIdsPath = root_path_ + "/model/observableIds"; auto observableIds = hdf5Read1dStringDataset(file_, observableIdsPath); - RELEASE_ASSERT(observableIds == model_->getObservableIds(), - "Observable IDs do not match."); + RELEASE_ASSERT( + observableIds == model_->getObservableIds(), + "Observable IDs do not match."); // int numConditions = getNumberOfSimulationConditions(); @@ -581,9 +545,9 @@ MultiConditionDataProviderHDF5::checkDataIntegrity() const [[maybe_unused]] auto lock = hdf5MutexGetLock(); Ensures( - H5Lexists(file_.getId(), hdf5_measurement_path_.c_str(), H5P_DEFAULT)); + H5Lexists(file_.getId(), hdf5_measurement_path_.c_str(), H5P_DEFAULT)); Ensures(H5Lexists( - file_.getId(), hdf5_measurement_sigma_path_.c_str(), H5P_DEFAULT)); + file_.getId(), hdf5_measurement_sigma_path_.c_str(), H5P_DEFAULT)); // parpe::hdf5GetDatasetDimensions(file.getId(), // hdf5MeasurementPath.c_str(), @@ -601,42 +565,36 @@ MultiConditionDataProviderHDF5::checkDataIntegrity() const if (model_->nk()) { parpe::hdf5GetDatasetDimensions( - file_, hdf5_condition_path_, 2, &d1, &d2); + file_, hdf5_condition_path_, 2, &d1, &d2); Expects(d1 == model_->nk()); } } MultiConditionDataProviderDefault::MultiConditionDataProviderDefault( - std::unique_ptr model, - std::unique_ptr solver) - : model_(std::move(model)) - , solver_(std::move(solver)) -{} - -int -MultiConditionDataProviderDefault::getNumberOfSimulationConditions() const -{ + std::unique_ptr model, + std::unique_ptr solver) + : model_(std::move(model)) + , solver_(std::move(solver)) {} + +int MultiConditionDataProviderDefault::getNumberOfSimulationConditions() const { return edata_.size(); } std::vector MultiConditionDataProviderDefault::getSimulationToOptimizationParameterMapping( - int /*conditionIdx*/) const -{ + int /*conditionIdx*/) const { std::vector mapping(model_->np()); std::iota(mapping.begin(), mapping.end(), 0); return mapping; } -void -MultiConditionDataProviderDefault :: - mapSimulationToOptimizationGradientAddMultiply( - int conditionIdx, - gsl::span simulation, - gsl::span optimization, - gsl::span /*parameters*/, - double coefficient) const -{ +void MultiConditionDataProviderDefault :: + mapSimulationToOptimizationGradientAddMultiply( + int conditionIdx, + gsl::span simulation, + gsl::span optimization, + gsl::span /*parameters*/, + double coefficient) const { // TODO redundant auto mapping = getSimulationToOptimizationParameterMapping(conditionIdx); @@ -646,14 +604,13 @@ MultiConditionDataProviderDefault :: } } -void -MultiConditionDataProviderDefault::mapAndSetOptimizationToSimulationVariables( - int conditionIdx, - gsl::span optimization, - gsl::span simulation, - gsl::span /*optimizationScale*/, - gsl::span /*simulationScale*/) const -{ +void MultiConditionDataProviderDefault:: + mapAndSetOptimizationToSimulationVariables( + int conditionIdx, + gsl::span optimization, + gsl::span simulation, + gsl::span /*optimizationScale*/, + gsl::span /*simulationScale*/) const { // TODO redundant auto mapping = getSimulationToOptimizationParameterMapping(conditionIdx); @@ -663,132 +620,117 @@ MultiConditionDataProviderDefault::mapAndSetOptimizationToSimulationVariables( } std::vector -MultiConditionDataProviderDefault::getParameterScaleOpt() const -{ +MultiConditionDataProviderDefault::getParameterScaleOpt() const { return model_->getParameterScale(); } -amici::ParameterScaling -MultiConditionDataProviderDefault::getParameterScaleOpt( - int optimizationParameterIndex) const -{ +amici::ParameterScaling MultiConditionDataProviderDefault::getParameterScaleOpt( + int optimizationParameterIndex) const { return getParameterScaleSim(0, optimizationParameterIndex); } -amici::ParameterScaling -MultiConditionDataProviderDefault::getParameterScaleSim( - int /*simulationIdx*/, - int optimizationParameterIndex) const -{ +amici::ParameterScaling MultiConditionDataProviderDefault::getParameterScaleSim( + int /*simulationIdx*/, + int optimizationParameterIndex) const { // TODO assumes no extra optimization parameters return model_->getParameterScale()[optimizationParameterIndex]; } std::vector MultiConditionDataProviderDefault::getParameterScaleSim( - int /*simulationIdx*/) const -{ + int /*simulationIdx*/) const { return model_->getParameterScale(); } -void -MultiConditionDataProviderDefault::updateSimulationParametersAndScale( - int /*conditionIndex*/, - gsl::span optimizationParams, - amici::Model& model) const -{ - logmessage(loglevel::warning, - "MultiConditionDataProviderDefault::updateSimulationParameters: " - "No proper mapping implemented. Ensure this is correct."); - model.setParameters(std::vector(optimizationParams.begin(), - optimizationParams.end())); +void MultiConditionDataProviderDefault::updateSimulationParametersAndScale( + int /*conditionIndex*/, + gsl::span optimizationParams, + amici::Model& model) const { + logmessage( + loglevel::warning, + "MultiConditionDataProviderDefault::updateSimulationParameters: " + "No proper mapping implemented. Ensure this is correct."); + model.setParameters(std::vector( + optimizationParams.begin(), optimizationParams.end())); } std::unique_ptr MultiConditionDataProviderDefault::getExperimentalDataForCondition( - int conditionIdx) const -{ + int conditionIdx) const { return std::make_unique(edata_[conditionIdx]); } std::vector> -MultiConditionDataProviderDefault::getAllMeasurements() const -{ +MultiConditionDataProviderDefault::getAllMeasurements() const { std::vector> measurements; measurements.reserve(edata_.size()); - for (const auto& e : edata_) { + for (auto const& e : edata_) { measurements.push_back(e.getObservedData()); } return measurements; } std::vector> -MultiConditionDataProviderDefault::getAllSigmas() const -{ +MultiConditionDataProviderDefault::getAllSigmas() const { std::vector> sigmas; sigmas.reserve(edata_.size()); - for (const auto& e : edata_) { + for (auto const& e : edata_) { sigmas.push_back(e.getObservedDataStdDev()); } return sigmas; } -int -MultiConditionDataProviderDefault::getNumOptimizationParameters() const -{ +int MultiConditionDataProviderDefault::getNumOptimizationParameters() const { return model_->np(); } std::unique_ptr -MultiConditionDataProviderDefault::getModel() const -{ +MultiConditionDataProviderDefault::getModel() const { return std::unique_ptr(model_->clone()); } std::unique_ptr -MultiConditionDataProviderDefault::getSolver() const -{ +MultiConditionDataProviderDefault::getSolver() const { return std::unique_ptr(solver_->clone()); } -std::vector MultiConditionDataProviderDefault::getProblemParameterIds() const -{ +std::vector +MultiConditionDataProviderDefault::getProblemParameterIds() const { // not implemented std::terminate(); } -double -applyChainRule(double gradient, - double parameter, - amici::ParameterScaling oldScale, - amici::ParameterScaling newScale) -{ +double applyChainRule( + double gradient, + double parameter, + amici::ParameterScaling oldScale, + amici::ParameterScaling newScale) { if (oldScale == newScale) return gradient; // unapply old switch (oldScale) { - case amici::ParameterScaling::log10: - gradient /= getUnscaledParameter(parameter, oldScale) * log(10); - break; - case amici::ParameterScaling::ln: - gradient /= getUnscaledParameter(parameter, oldScale); - break; - case amici::ParameterScaling::none: - break; + case amici::ParameterScaling::log10: + gradient /= getUnscaledParameter(parameter, oldScale) * log(10); + break; + case amici::ParameterScaling::ln: + gradient /= getUnscaledParameter(parameter, oldScale); + break; + case amici::ParameterScaling::none: + break; } // apply switch (newScale) { - case amici::ParameterScaling::log10: - gradient *= getUnscaledParameter(parameter, oldScale) * log(10); - break; - case amici::ParameterScaling::ln: - gradient *= getUnscaledParameter(parameter, oldScale); - break; - case amici::ParameterScaling::none: - break; + case amici::ParameterScaling::log10: + gradient *= getUnscaledParameter(parameter, oldScale) * log(10); + break; + case amici::ParameterScaling::ln: + gradient *= getUnscaledParameter(parameter, oldScale); + break; + case amici::ParameterScaling::none: + break; } return gradient; diff --git a/src/parpeamici/multiConditionProblem.cpp b/src/parpeamici/multiConditionProblem.cpp index 8f1599dce..30ec83126 100644 --- a/src/parpeamici/multiConditionProblem.cpp +++ b/src/parpeamici/multiConditionProblem.cpp @@ -6,9 +6,9 @@ #include #include +#include #include #include -#include #include @@ -32,56 +32,52 @@ namespace parpe { // For debugging: // skip objective function evaluation completely -//#define NO_OBJ_FUN_EVAL +// #define NO_OBJ_FUN_EVAL -MultiConditionProblem::MultiConditionProblem(MultiConditionDataProvider *dp) +MultiConditionProblem::MultiConditionProblem(MultiConditionDataProvider* dp) : MultiConditionProblem(dp, nullptr, nullptr, nullptr) {} MultiConditionProblem::MultiConditionProblem( - MultiConditionDataProvider *dp, - LoadBalancerMaster *loadBalancer, - std::unique_ptr logger, - std::unique_ptr resultWriter) - : dataProvider(dp), - resultWriter(std::move(resultWriter)) -{ + MultiConditionDataProvider* dp, + LoadBalancerMaster* loadBalancer, + std::unique_ptr logger, + std::unique_ptr resultWriter) + : dataProvider(dp) + , resultWriter(std::move(resultWriter)) { this->logger_ = std::move(logger); // run on all data - std::vector dataIndices(dataProvider->getNumberOfSimulationConditions()); + std::vector dataIndices( + dataProvider->getNumberOfSimulationConditions()); std::iota(dataIndices.begin(), dataIndices.end(), 0); - cost_fun_ = std::make_unique< - SummedGradientFunctionGradientFunctionAdapter - > ( - std::make_unique( - dataProvider, loadBalancer, this->resultWriter.get()), - dataIndices); + cost_fun_ = + std::make_unique>( + std::make_unique( + dataProvider, loadBalancer, this->resultWriter.get()), + dataIndices); - if(auto hdp = dynamic_cast(dp)) { + if (auto hdp = dynamic_cast(dp)) { parametersMin.resize(dp->getNumOptimizationParameters()); hdp->getOptimizationParametersLowerBounds(parametersMin); parametersMax.resize(dp->getNumOptimizationParameters()); hdp->getOptimizationParametersUpperBounds(parametersMax); } - } -void MultiConditionProblem::fillParametersMin(gsl::span buffer) const -{ +void MultiConditionProblem::fillParametersMin(gsl::span buffer) const { Expects(buffer.size() == parametersMin.size()); std::copy(parametersMin.begin(), parametersMin.end(), buffer.begin()); } -void MultiConditionProblem::fillParametersMax(gsl::span buffer) const -{ +void MultiConditionProblem::fillParametersMax(gsl::span buffer) const { Expects(buffer.size() == parametersMax.size()); std::copy(parametersMax.begin(), parametersMax.end(), buffer.begin()); } -void MultiConditionProblem::fillInitialParameters(gsl::span buffer) const -{ - if(!startingPoint.empty()) { +void MultiConditionProblem::fillInitialParameters( + gsl::span buffer) const { + if (!startingPoint.empty()) { Expects(buffer.size() == startingPoint.size()); std::copy(startingPoint.begin(), startingPoint.end(), buffer.begin()); } else { @@ -89,7 +85,6 @@ void MultiConditionProblem::fillInitialParameters(gsl::span buffer) cons } } - int MultiConditionProblem::earlyStopping() { bool stop = false; @@ -109,67 +104,71 @@ int MultiConditionProblem::earlyStopping() { return static_cast(stop); } - void MultiConditionProblem::setInitialParameters( - std::vector const& startingPoint) -{ + std::vector const& startingPoint) { this->startingPoint = startingPoint; } void MultiConditionProblem::setParametersMin( - std::vector const& lowerBounds) -{ + std::vector const& lowerBounds) { parametersMin = lowerBounds; } void MultiConditionProblem::setParametersMax( - std::vector const& upperBounds) -{ + std::vector const& upperBounds) { parametersMax = upperBounds; } -std::unique_ptr MultiConditionProblem::getReporter() const -{ +std::unique_ptr +MultiConditionProblem::getReporter() const { return std::make_unique( - cost_fun_.get(), - std::make_unique(*resultWriter), - std::make_unique(*logger_)); + cost_fun_.get(), + std::make_unique(*resultWriter), + std::make_unique(*logger_)); } -std::vector MultiConditionProblem::getTrainingData() const -{ +std::vector MultiConditionProblem::getTrainingData() const { std::vector dataIndices( - dataProvider->getNumberOfSimulationConditions()); + dataProvider->getNumberOfSimulationConditions()); std::iota(dataIndices.begin(), dataIndices.end(), 0); return dataIndices; } -MultiConditionDataProvider *MultiConditionProblem::getDataProvider() { +MultiConditionDataProvider* MultiConditionProblem::getDataProvider() { return dataProvider; } -OptimizationResultWriter *MultiConditionProblem::getResultWriter() { +OptimizationResultWriter* MultiConditionProblem::getResultWriter() { return resultWriter.get(); } -MultiConditionProblemMultiStartOptimizationProblem -::MultiConditionProblemMultiStartOptimizationProblem( - MultiConditionDataProviderHDF5 *dp, +MultiConditionProblemMultiStartOptimizationProblem :: + MultiConditionProblemMultiStartOptimizationProblem( + MultiConditionDataProviderHDF5* dp, OptimizationOptions options, - OptimizationResultWriter *resultWriter, - LoadBalancerMaster *loadBalancer, std::unique_ptr logger) - : data_provider_(dp), options_(std::move(options)), - result_writer_(resultWriter), load_balancer_(loadBalancer), - logger_(std::move(logger)) -{} - -int MultiConditionProblemMultiStartOptimizationProblem::getNumberOfStarts() const { return options_.numStarts; } + OptimizationResultWriter* resultWriter, + LoadBalancerMaster* loadBalancer, + std::unique_ptr logger) + : data_provider_(dp) + , options_(std::move(options)) + , result_writer_(resultWriter) + , load_balancer_(loadBalancer) + , logger_(std::move(logger)) {} + +int MultiConditionProblemMultiStartOptimizationProblem::getNumberOfStarts() + const { + return options_.numStarts; +} -bool MultiConditionProblemMultiStartOptimizationProblem::restartOnFailure() const { return options_.retryOptimization; } +bool MultiConditionProblemMultiStartOptimizationProblem::restartOnFailure() + const { + return options_.retryOptimization; +} -std::unique_ptr MultiConditionProblemMultiStartOptimizationProblem::getLocalProblem( - int multiStartIndex) const { +std::unique_ptr +MultiConditionProblemMultiStartOptimizationProblem::getLocalProblem( + int multiStartIndex) const { // generate new OptimizationProblem with data from dp Expects(data_provider_ != nullptr); @@ -178,114 +177,135 @@ std::unique_ptr MultiConditionProblemMultiStartOptimization if (result_writer_) { problem = std::make_unique( - data_provider_, load_balancer_, - logger_->getChild(std::string("o") - + std::to_string(multiStartIndex)), - std::make_unique(*result_writer_)); + data_provider_, + load_balancer_, + logger_->getChild( + std::string("o") + std::to_string(multiStartIndex)), + std::make_unique(*result_writer_)); problem->getResultWriter()->setRootPath( - "/multistarts/" + std::to_string(multiStartIndex)); + "/multistarts/" + std::to_string(multiStartIndex)); } else { problem = std::make_unique( - data_provider_, load_balancer_, - logger_->getChild( - std::string("o") + std::to_string(multiStartIndex)), - nullptr); + data_provider_, + load_balancer_, + logger_->getChild( + std::string("o") + std::to_string(multiStartIndex)), + nullptr); } problem->setOptimizationOptions(options_); - problem->setInitialParameters( - parpe::OptimizationOptions::getStartingPoint( - data_provider_->getHdf5File(), multiStartIndex)); + problem->setInitialParameters(parpe::OptimizationOptions::getStartingPoint( + data_provider_->getHdf5File(), multiStartIndex)); - if(options_.hierarchicalOptimization) + if (options_.hierarchicalOptimization) return std::make_unique( std::move(problem), data_provider_); return problem; } -void printSimulationResult(Logger *logger, int jobId, - amici::ReturnData const* rdata, double timeSeconds) { - if(!rdata) { +void printSimulationResult( + Logger* logger, + int jobId, + amici::ReturnData const* rdata, + double timeSeconds) { + if (!rdata) { // This should not happen, but apparently we can't rely on AMICI always // returning some result object - logger->logmessage(loglevel::error, - "AMICI simulation failed unexpectedly."); + logger->logmessage( + loglevel::error, "AMICI simulation failed unexpectedly."); return; } char sensi_mode = '-'; - if(rdata->sensi >= amici::SensitivityOrder::first - && rdata->sensi_meth == amici::SensitivityMethod::adjoint) { + if (rdata->sensi >= amici::SensitivityOrder::first && + rdata->sensi_meth == amici::SensitivityMethod::adjoint) { sensi_mode = 'A'; - } else if(rdata->sensi >= amici::SensitivityOrder::first - && rdata->sensi_meth == amici::SensitivityMethod::forward) { + } else if ( + rdata->sensi >= amici::SensitivityOrder::first && + rdata->sensi_meth == amici::SensitivityMethod::forward) { sensi_mode = 'F'; } - bool with_sensi = rdata->sensi >= amici::SensitivityOrder::first && rdata->sensi_meth != amici::SensitivityMethod::none; - - logger->logmessage(loglevel::debug, "Result for %d: %g (%d) (%d/%d/%.4fs/%c%d)", - jobId, rdata->llh, rdata->status, - rdata->numsteps.empty()?-1:rdata->numsteps[rdata->numsteps.size() - 1], - rdata->numstepsB.empty()?-1:rdata->numstepsB[0], - timeSeconds, - sensi_mode, - rdata->nplist); + bool with_sensi = rdata->sensi >= amici::SensitivityOrder::first && + rdata->sensi_meth != amici::SensitivityMethod::none; + + logger->logmessage( + loglevel::debug, + "Result for %d: %g (%d) (%d/%d/%.4fs/%c%d)", + jobId, + rdata->llh, + rdata->status, + rdata->numsteps.empty() ? -1 + : rdata->numsteps[rdata->numsteps.size() - 1], + rdata->numstepsB.empty() ? -1 : rdata->numstepsB[0], + timeSeconds, + sensi_mode, + rdata->nplist); // check for NaNs, only report first if (with_sensi) { - for (std::vector::size_type i = 0; i < rdata->sllh.size(); ++i) { + for (std::vector::size_type i = 0; + i < rdata->sllh.size(); + ++i) { if (std::isnan(rdata->sllh[i])) { - logger->logmessage(loglevel::debug, - "Gradient contains NaN at %d", i); + logger->logmessage( + loglevel::debug, "Gradient contains NaN at %d", i); break; } if (std::isinf(rdata->sllh[i])) { - logger->logmessage(loglevel::debug, - "Gradient contains Inf at %d", i); + logger->logmessage( + loglevel::debug, "Gradient contains Inf at %d", i); break; } } } } - -void saveSimulation(const H5::H5File &file, std::string const& pathStr, - std::vector const& parameters, - double llh, gsl::span gradient, - double timeElapsedInSeconds, - gsl::span /*states*/, - gsl::span /*stateSensi*/, - gsl::span /*outputs*/, int jobId, - int status, std::string const& label) -{ +void saveSimulation( + const H5::H5File& file, + std::string const& pathStr, + std::vector const& parameters, + double llh, + gsl::span gradient, + double timeElapsedInSeconds, + gsl::span /*states*/, + gsl::span /*stateSensi*/, + gsl::span /*outputs*/, + int jobId, + int status, + std::string const& label) { // TODO replace by SimulationResultWriter - const char *fullGroupPath = pathStr.c_str(); + char const* fullGroupPath = pathStr.c_str(); [[maybe_unused]] auto lock = hdf5MutexGetLock(); hdf5CreateOrExtendAndWriteToDouble2DArray( - file, fullGroupPath, "simulationLogLikelihood", - gsl::make_span(&llh, 1)); + file, + fullGroupPath, + "simulationLogLikelihood", + gsl::make_span(&llh, 1)); hdf5CreateOrExtendAndWriteToInt2DArray( - file, fullGroupPath, "jobId", - gsl::make_span(&jobId, 1)); + file, fullGroupPath, "jobId", gsl::make_span(&jobId, 1)); // save sllh; but sllh may vary depending on the condition-specific plist if (gradient.size() == parameters.size()) { hdf5CreateOrExtendAndWriteToDouble2DArray( - file, fullGroupPath, "simulationLogLikelihoodGradient", - gradient); - } else if(!parameters.empty()) { + file, fullGroupPath, "simulationLogLikelihoodGradient", gradient); + } else if (!parameters.empty()) { double dummyGradient[parameters.size()]; std::fill_n(dummyGradient, parameters.size(), NAN); - if(!gradient.empty()) { - std::copy_n(gradient.begin(), std::min(gradient.size(), parameters.size()), dummyGradient); + if (!gradient.empty()) { + std::copy_n( + gradient.begin(), + std::min(gradient.size(), parameters.size()), + dummyGradient); } hdf5CreateOrExtendAndWriteToDouble2DArray( - file, fullGroupPath, "simulationLogLikelihoodGradient", - gsl::make_span(dummyGradient, parameters.size())); + file, + fullGroupPath, + "simulationLogLikelihoodGradient", + gsl::make_span(dummyGradient, parameters.size())); } if (!parameters.empty()) @@ -293,8 +313,10 @@ void saveSimulation(const H5::H5File &file, std::string const& pathStr, file, fullGroupPath, "simulationParameters", parameters); hdf5CreateOrExtendAndWriteToDouble2DArray( - file, fullGroupPath, "simulationWallTimeInSec", - gsl::make_span(&timeElapsedInSeconds, 1)); + file, + fullGroupPath, + "simulationWallTimeInSec", + gsl::make_span(&timeElapsedInSeconds, 1)); // TODO: This was broken by allowing different numbers of timepoints // for different simulation conditions. Vector lengths now may differ and @@ -305,35 +327,37 @@ void saveSimulation(const H5::H5File &file, std::string const& pathStr, // if (!outputs.empty()) // hdf5CreateOrExtendAndWriteToDouble2DArray( - // file.getId(), fullGroupPath, "simulationObservables", outputs); + // file.getId(), fullGroupPath, "simulationObservables", + // outputs); // if (!stateSensi.empty()) // hdf5CreateOrExtendAndWriteToDouble3DArray( - // file.getId(), fullGroupPath, "simulationStateSensitivities", stateSensi, - // stateSensi.size() / parameters.size(), parameters.size()); + // file.getId(), fullGroupPath, "simulationStateSensitivities", + // stateSensi, stateSensi.size() / parameters.size(), + // parameters.size()); hdf5CreateOrExtendAndWriteToInt2DArray( - file, fullGroupPath, "simulationStatus", - gsl::make_span(&status, 1)); + file, + fullGroupPath, + "simulationStatus", + gsl::make_span(&status, 1)); - hdf5CreateOrExtendAndWriteToString1DArray(file, fullGroupPath, - "simulationLabel", label); + hdf5CreateOrExtendAndWriteToString1DArray( + file, fullGroupPath, "simulationLabel", label); file.flush(H5F_SCOPE_LOCAL); - } AmiciSimulationRunner::AmiciResultPackageSimple runAndLogSimulation( - amici::Solver const &solverTemplate, - amici::Model &model, - int conditionIdx, - int jobId, - MultiConditionDataProvider const *dataProvider, - OptimizationResultWriter *resultWriter, - bool logLineSearch, - Logger* logger, - bool sendStates) -{ + amici::Solver const& solverTemplate, + amici::Model& model, + int conditionIdx, + int jobId, + MultiConditionDataProvider const* dataProvider, + OptimizationResultWriter* resultWriter, + bool logLineSearch, + Logger* logger, + bool sendStates) { // wall time on worker for current simulation WallTimer simulationTimer; @@ -355,17 +379,17 @@ AmiciSimulationRunner::AmiciResultPackageSimple runAndLogSimulation( double errorRelaxation = defaultErrorRelaxation; // Set via environment variables? - if(auto env = std::getenv("PARPE_NUM_SIMULATION_TRIALS")) { + if (auto env = std::getenv("PARPE_NUM_SIMULATION_TRIALS")) { maxNumTrials = std::stoi(env); } - if(auto env = + if (auto env = std::getenv("PARPE_INTEGRATION_TOLERANCE_RELAXATION_FACTOR")) { errorRelaxation = std::stod(env); } std::unique_ptr rdata; - for(int trial = 1; trial <= maxNumTrials; ++trial) { + for (int trial = 1; trial <= maxNumTrials; ++trial) { /* It is currently not safe to reuse solver if an exception has * occurred,so clone every time */ auto solver = std::unique_ptr(solverTemplate.clone()); @@ -378,196 +402,227 @@ AmiciSimulationRunner::AmiciResultPackageSimple runAndLogSimulation( * and RDataReporting::residuals otherwise */ - if(solver->getSensitivityOrder() >= amici::SensitivityOrder::first - && solver->getSensitivityMethod() - != amici::SensitivityMethod::none) { - solver->setReturnDataReportingMode(amici::RDataReporting::likelihood); + if (solver->getSensitivityOrder() >= + amici::SensitivityOrder::first && + solver->getSensitivityMethod() != + amici::SensitivityMethod::none) { + solver->setReturnDataReportingMode( + amici::RDataReporting::likelihood); } else { // unset sensitivity method, because `residuals` is not allowed // with `adjoint`, independent of sensitivity order solver->setSensitivityMethod(amici::SensitivityMethod::none); - solver->setReturnDataReportingMode(amici::RDataReporting::residuals); + solver->setReturnDataReportingMode( + amici::RDataReporting::residuals); } - } - if(trial - 1 == maxNumTrials) { - logger->logmessage(loglevel::error, - "Simulation trial %d/%d failed. Giving up.", - trial, maxNumTrials); + if (trial - 1 == maxNumTrials) { + logger->logmessage( + loglevel::error, + "Simulation trial %d/%d failed. Giving up.", + trial, + maxNumTrials); break; } - if(rdata) { + if (rdata) { /* something went wrong in the previous simulation. until we have * better exception handling, we check those fields to deduce where * the error occurred */ bool forwardFailed = std::isnan(rdata->llh); - bool backwardFailed = solver->getSensitivityOrder() >= amici::SensitivityOrder::first - && solver->getSensitivityMethod() == amici::SensitivityMethod::adjoint - && !rdata->sllh.empty() && std::isnan(rdata->sllh[0]); + bool backwardFailed = solver->getSensitivityOrder() >= + amici::SensitivityOrder::first && + solver->getSensitivityMethod() == + amici::SensitivityMethod::adjoint && + !rdata->sllh.empty() && + std::isnan(rdata->sllh[0]); // relax respective tolerances - if(forwardFailed) { + if (forwardFailed) { solver->setAbsoluteTolerance( - std::pow(errorRelaxation, trial - 1) - * solver->getAbsoluteTolerance()); + std::pow(errorRelaxation, trial - 1) * + solver->getAbsoluteTolerance()); solver->setRelativeTolerance( - std::pow(errorRelaxation, trial - 1) - * solver->getRelativeTolerance()); + std::pow(errorRelaxation, trial - 1) * + solver->getRelativeTolerance()); } else if (backwardFailed) { solver->setAbsoluteToleranceQuadratures( - std::pow(errorRelaxation, trial - 1) - * solver->getAbsoluteToleranceQuadratures()); + std::pow(errorRelaxation, trial - 1) * + solver->getAbsoluteToleranceQuadratures()); solver->setRelativeToleranceQuadratures( - std::pow(errorRelaxation, trial - 1) - * solver->getRelativeToleranceQuadratures()); + std::pow(errorRelaxation, trial - 1) * + solver->getRelativeToleranceQuadratures()); solver->setAbsoluteToleranceB( - std::pow(errorRelaxation, trial - 1) - * solver->getAbsoluteToleranceB()); + std::pow(errorRelaxation, trial - 1) * + solver->getAbsoluteToleranceB()); solver->setRelativeToleranceB( - std::pow(errorRelaxation, trial - 1) - * solver->getRelativeToleranceB()); + std::pow(errorRelaxation, trial - 1) * + solver->getRelativeToleranceB()); } logger->logmessage( - loglevel::warning, - "Error during simulation (try %d/%d), " - "retrying with relaxed error tolerances (*= %g): " - "abs: %g rel: %g quadAbs: %g quadRel: %g " - "abs_asa: %g, rel_asa: %g", - trial - 1, maxNumTrials, errorRelaxation, - solver->getAbsoluteTolerance(), - solver->getRelativeTolerance(), - solver->getAbsoluteToleranceQuadratures(), - solver->getRelativeToleranceQuadratures(), - solver->getAbsoluteToleranceB(), - solver->getRelativeToleranceB()); + loglevel::warning, + "Error during simulation (try %d/%d), " + "retrying with relaxed error tolerances (*= %g): " + "abs: %g rel: %g quadAbs: %g quadRel: %g " + "abs_asa: %g, rel_asa: %g", + trial - 1, + maxNumTrials, + errorRelaxation, + solver->getAbsoluteTolerance(), + solver->getRelativeTolerance(), + solver->getAbsoluteToleranceQuadratures(), + solver->getRelativeToleranceQuadratures(), + solver->getAbsoluteToleranceB(), + solver->getRelativeToleranceB()); } try { - rdata = run_amici_simulation(*solver, edata.get(), model, false, logger); + rdata = run_amici_simulation( + *solver, edata.get(), model, false, logger); } catch (std::exception const& e) { - std::cerr<status); } logger->logmessage( - loglevel::warning, "Error during simulation: %s (%s)", - e.what(), status.c_str()); + loglevel::warning, + "Error during simulation: %s (%s)", + e.what(), + status.c_str()); } - if(rdata && rdata->status == amici::AMICI_SUCCESS) + if (rdata && rdata->status == amici::AMICI_SUCCESS) break; } double timeSeconds = simulationTimer.getTotal(); printSimulationResult(logger, jobId, rdata.get(), timeSeconds); - if (resultWriter && rdata && (solverTemplate.getSensitivityOrder() - > amici::SensitivityOrder::none || logLineSearch)) { - saveSimulation(resultWriter->getH5File(), resultWriter->getRootPath(), - model.getParameters(), rdata->llh, rdata->sllh, - timeSeconds, rdata->x, rdata->sx, rdata->y, - jobId, rdata->status, logger->getPrefix()); + if (resultWriter && rdata && + (solverTemplate.getSensitivityOrder() > amici::SensitivityOrder::none || + logLineSearch)) { + saveSimulation( + resultWriter->getH5File(), + resultWriter->getRootPath(), + model.getParameters(), + rdata->llh, + rdata->sllh, + timeSeconds, + rdata->x, + rdata->sx, + rdata->y, + jobId, + rdata->status, + logger->getPrefix()); } - if(rdata) { - return AmiciSimulationRunner::AmiciResultPackageSimple { + if (rdata) { + return AmiciSimulationRunner::AmiciResultPackageSimple{ rdata->llh, timeSeconds, - (solverTemplate.getSensitivityOrder() - > amici::SensitivityOrder::none - && solverTemplate.getSensitivityMethod() != amici::SensitivityMethod::none) - ? rdata->sllh : std::vector(), - rdata->y, rdata->sigmay, + (solverTemplate.getSensitivityOrder() > + amici::SensitivityOrder::none && + solverTemplate.getSensitivityMethod() != + amici::SensitivityMethod::none) + ? rdata->sllh + : std::vector(), + rdata->y, + rdata->sigmay, sendStates ? rdata->x : std::vector(), - rdata->status - }; + rdata->status}; } // AMICI failed expectedly and did not return anything - return AmiciSimulationRunner::AmiciResultPackageSimple { + return AmiciSimulationRunner::AmiciResultPackageSimple{ NAN, timeSeconds, - (solverTemplate.getSensitivityOrder() - > amici::SensitivityOrder::none) - ? std::vector(model.nplist(), NAN) : std::vector(), + (solverTemplate.getSensitivityOrder() > amici::SensitivityOrder::none) + ? std::vector(model.nplist(), NAN) + : std::vector(), std::vector(model.nytrue, NAN), std::vector(model.nytrue, NAN), - sendStates ? std::vector(model.nx_rdata, NAN) : std::vector(), - amici::AMICI_UNRECOVERABLE_ERROR - }; - + sendStates ? std::vector(model.nx_rdata, NAN) + : std::vector(), + amici::AMICI_UNRECOVERABLE_ERROR}; } FunctionEvaluationStatus getModelOutputsAndSigmas( - MultiConditionDataProvider *dataProvider, - LoadBalancerMaster *loadBalancer, - int maxSimulationsPerPackage, - OptimizationResultWriter *resultWriter, - bool logLineSearch, - gsl::span parameters, - std::vector > &modelOutputs, - std::vector > &modelSigmas, - Logger *logger, double * /*cpuTime*/, bool sendStates) -{ + MultiConditionDataProvider* dataProvider, + LoadBalancerMaster* loadBalancer, + int maxSimulationsPerPackage, + OptimizationResultWriter* resultWriter, + bool logLineSearch, + gsl::span parameters, + std::vector>& modelOutputs, + std::vector>& modelSigmas, + Logger* logger, + double* /*cpuTime*/, + bool sendStates) { int errors = 0; - std::vector dataIndices(dataProvider->getNumberOfSimulationConditions()); + std::vector dataIndices( + dataProvider->getNumberOfSimulationConditions()); std::iota(dataIndices.begin(), dataIndices.end(), 0); modelOutputs.resize(dataIndices.size()); - auto parameterVector = std::vector(parameters.begin(), - parameters.end()); - auto jobFinished = [&errors, &modelOutputs, &modelSigmas](JobData *job, int /*dataIdx*/) { + auto parameterVector = + std::vector(parameters.begin(), parameters.end()); + auto jobFinished = [&errors, &modelOutputs, &modelSigmas]( + JobData* job, int /*dataIdx*/) { // deserialize auto results = - amici::deserializeFromChar ( - job->recvBuffer.data(), job->recvBuffer.size()); + amici::deserializeFromChar( + job->recvBuffer.data(), job->recvBuffer.size()); std::vector().swap(job->recvBuffer); // free buffer - for (auto const& [condition_idx, result]: results) { + for (auto const& [condition_idx, result] : results) { errors += result.status; modelOutputs[condition_idx] = result.modelOutput; modelSigmas[condition_idx] = result.modelSigmas; } }; - AmiciSimulationRunner simRunner(parameterVector, - amici::SensitivityOrder::none, - dataIndices, - jobFinished, - nullptr /* aggregate */, - logger?logger->getPrefix():""); - + AmiciSimulationRunner simRunner( + parameterVector, + amici::SensitivityOrder::none, + dataIndices, + jobFinished, + nullptr /* aggregate */, + logger ? logger->getPrefix() : ""); #ifdef PARPE_ENABLE_MPI if (loadBalancer && loadBalancer->isRunning()) { - errors += simRunner.runDistributedMemory(loadBalancer, - maxSimulationsPerPackage); + errors += simRunner.runDistributedMemory( + loadBalancer, maxSimulationsPerPackage); } else { #endif errors += simRunner.runSharedMemory( - [&dataProvider, &resultWriter, logLineSearch, &sendStates](std::vector &buffer, int jobId) { - messageHandler(dataProvider, resultWriter, logLineSearch, - buffer, jobId, sendStates); - }); + [&dataProvider, &resultWriter, logLineSearch, &sendStates]( + std::vector& buffer, int jobId) { + messageHandler( + dataProvider, + resultWriter, + logLineSearch, + buffer, + jobId, + sendStates); + }); #ifdef PARPE_ENABLE_MPI } #endif - return errors == 0 ? functionEvaluationSuccess - : functionEvaluationFailure; + return errors == 0 ? functionEvaluationSuccess : functionEvaluationFailure; } - - -void messageHandler(MultiConditionDataProvider *dataProvider, - OptimizationResultWriter *resultWriter, - bool logLineSearch, - std::vector &buffer, int jobId, - bool sendStates) { +void messageHandler( + MultiConditionDataProvider* dataProvider, + OptimizationResultWriter* resultWriter, + bool logLineSearch, + std::vector& buffer, + int jobId, + bool sendStates) { #if QUEUE_WORKER_H_VERBOSE >= 2 int mpiRank; @@ -580,23 +635,29 @@ void messageHandler(MultiConditionDataProvider *dataProvider, auto model = dataProvider->getModel(); // unpack simulation job data - auto workPackage = amici::deserializeFromChar( - buffer.data(), buffer.size()); + auto workPackage = + amici::deserializeFromChar( + buffer.data(), buffer.size()); solver->setSensitivityOrder(workPackage.sensitivityOrder); AmiciSummedGradientFunction::ResultMap results; // run simulations for all condition indices - for(auto conditionIdx: workPackage.conditionIndices) { + for (auto conditionIdx : workPackage.conditionIndices) { dataProvider->updateSimulationParametersAndScale( - conditionIdx, - workPackage.optimizationParameters, - *model); - Logger logger(workPackage.logPrefix - + "c" + std::to_string(conditionIdx)); + conditionIdx, workPackage.optimizationParameters, *model); + Logger logger( + workPackage.logPrefix + "c" + std::to_string(conditionIdx)); auto result = runAndLogSimulation( - *solver, *model, conditionIdx, jobId, dataProvider, - resultWriter, logLineSearch, &logger, sendStates); + *solver, + *model, + conditionIdx, + jobId, + dataProvider, + resultWriter, + logLineSearch, + &logger, + sendStates); results[conditionIdx] = result; } @@ -609,50 +670,53 @@ void messageHandler(MultiConditionDataProvider *dataProvider, } AmiciSummedGradientFunction::AmiciSummedGradientFunction( - MultiConditionDataProvider *dataProvider, - LoadBalancerMaster *loadBalancer, - OptimizationResultWriter *resultWriter) - : dataProvider(dataProvider), - loadBalancer(loadBalancer), - model(dataProvider->getModel()), - solver(dataProvider->getSolver()), - solverOriginal(solver->clone()), - resultWriter(resultWriter) -{ - if(auto env = std::getenv("PARPE_LOG_SIMULATIONS")) { + MultiConditionDataProvider* dataProvider, + LoadBalancerMaster* loadBalancer, + OptimizationResultWriter* resultWriter) + : dataProvider(dataProvider) + , loadBalancer(loadBalancer) + , model(dataProvider->getModel()) + , solver(dataProvider->getSolver()) + , solverOriginal(solver->clone()) + , resultWriter(resultWriter) { + if (auto env = std::getenv("PARPE_LOG_SIMULATIONS")) { logLineSearch = env[0] == '1'; } - if(auto env = std::getenv("PARPE_MAX_SIMULATIONS_PER_PACKAGE")) { + if (auto env = std::getenv("PARPE_MAX_SIMULATIONS_PER_PACKAGE")) { maxSimulationsPerPackage = std::stoi(env); } - if(auto env = - std::getenv("PARPE_MAX_GRADIENT_SIMULATIONS_PER_PACKAGE")) { + if (auto env = std::getenv("PARPE_MAX_GRADIENT_SIMULATIONS_PER_PACKAGE")) { maxGradientSimulationsPerPackage = std::stoi(env); } } FunctionEvaluationStatus AmiciSummedGradientFunction::evaluate( - gsl::span parameters, int dataset, - double &fval, gsl::span gradient, Logger *logger, - double *cpuTime) const -{ + gsl::span parameters, + int dataset, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime) const { std::vector datasets(1); datasets.at(0) = dataset; return evaluate(parameters, datasets, fval, gradient, logger, cpuTime); } FunctionEvaluationStatus AmiciSummedGradientFunction::evaluate( - gsl::span parameters, std::vector datasets, - double &fval, gsl::span gradient, Logger *logger, - double *cpuTime) const -{ + gsl::span parameters, + std::vector datasets, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime) const { #ifdef NO_OBJ_FUN_EVAL if (objectiveFunctionGradient) - std::fill(objectiveFunctionGradient, - objectiveFunctionGradient + numOptimizationParameters_, - 0); + std::fill( + objectiveFunctionGradient, + objectiveFunctionGradient + numOptimizationParameters_, + 0); *objectiveFunctionValue = 1; return 0; #endif @@ -662,8 +726,8 @@ FunctionEvaluationStatus AmiciSummedGradientFunction::evaluate( if (!gradient.empty()) std::fill(gradient.begin(), gradient.end(), 0.0); - int errors = runSimulations(parameters, fval, gradient, datasets, - logger, cpuTime); + int errors = + runSimulations(parameters, fval, gradient, datasets, logger, cpuTime); if (errors || !std::isfinite(fval)) { fval = std::numeric_limits::infinity(); @@ -673,72 +737,87 @@ FunctionEvaluationStatus AmiciSummedGradientFunction::evaluate( return functionEvaluationSuccess; } -int AmiciSummedGradientFunction::numParameters() const -{ +int AmiciSummedGradientFunction::numParameters() const { return dataProvider->getNumOptimizationParameters(); } -std::vector AmiciSummedGradientFunction::getParameterIds() const -{ +std::vector AmiciSummedGradientFunction::getParameterIds() const { return dataProvider->getProblemParameterIds(); } FunctionEvaluationStatus AmiciSummedGradientFunction::getModelOutputsAndSigmas( - gsl::span parameters, - std::vector > &modelOutputs, - std::vector > &modelSigmas, - Logger *logger, double *cpuTime) const -{ + gsl::span parameters, + std::vector>& modelOutputs, + std::vector>& modelSigmas, + Logger* logger, + double* cpuTime) const { return parpe::getModelOutputsAndSigmas( - dataProvider, loadBalancer, maxSimulationsPerPackage, resultWriter, - logLineSearch, parameters, modelOutputs, modelSigmas, - logger, cpuTime, sendStates); + dataProvider, + loadBalancer, + maxSimulationsPerPackage, + resultWriter, + logLineSearch, + parameters, + modelOutputs, + modelSigmas, + logger, + cpuTime, + sendStates); } - -std::vector > AmiciSummedGradientFunction::getAllMeasurements() const { +std::vector> +AmiciSummedGradientFunction::getAllMeasurements() const { return dataProvider->getAllMeasurements(); } -void AmiciSummedGradientFunction::messageHandler(std::vector &buffer, int jobId) const { - parpe::messageHandler(dataProvider, resultWriter, logLineSearch, buffer, - jobId, sendStates); +void AmiciSummedGradientFunction::messageHandler( + std::vector& buffer, + int jobId) const { + parpe::messageHandler( + dataProvider, resultWriter, logLineSearch, buffer, jobId, sendStates); } -amici::ParameterScaling AmiciSummedGradientFunction::getParameterScaling( - int parameterIndex) const -{ +amici::ParameterScaling +AmiciSummedGradientFunction::getParameterScaling(int parameterIndex) const { // parameterIndex is optimization parameter index, // not necessarily model parameter index! return dataProvider->getParameterScaleOpt(parameterIndex); } int AmiciSummedGradientFunction::runSimulations( - gsl::span optimizationParameters, - double &nllh, gsl::span objectiveFunctionGradient, - std::vector const& dataIndices, Logger *logger, double *cpuTime) const { + gsl::span optimizationParameters, + double& nllh, + gsl::span objectiveFunctionGradient, + std::vector const& dataIndices, + Logger* logger, + double* cpuTime) const { int errors = 0; auto parameterVector = std::vector( - optimizationParameters.begin(), - optimizationParameters.end()); + optimizationParameters.begin(), optimizationParameters.end()); double simulationTimeSec = 0.0; AmiciSimulationRunner simRunner( - parameterVector, - !objectiveFunctionGradient.empty() - ? amici::SensitivityOrder::first - : amici::SensitivityOrder::none, - dataIndices, - [&nllh, &objectiveFunctionGradient, &simulationTimeSec, - &optimizationParameters, &errors, this](JobData *job, int /*jobIdx*/) { - errors += this->aggregateLikelihood(*job, - nllh, - objectiveFunctionGradient, - simulationTimeSec, - optimizationParameters); - }, nullptr, logger?logger->getPrefix():""); + parameterVector, + !objectiveFunctionGradient.empty() ? amici::SensitivityOrder::first + : amici::SensitivityOrder::none, + dataIndices, + [&nllh, + &objectiveFunctionGradient, + &simulationTimeSec, + &optimizationParameters, + &errors, + this](JobData* job, int /*jobIdx*/) { + errors += this->aggregateLikelihood( + *job, + nllh, + objectiveFunctionGradient, + simulationTimeSec, + optimizationParameters); + }, + nullptr, + logger ? logger->getPrefix() : ""); #ifdef PARPE_ENABLE_MPI if (loadBalancer && loadBalancer->isRunning()) { @@ -746,40 +825,38 @@ int AmiciSummedGradientFunction::runSimulations( // send more simulations to each worker // to reduce communication overhead errors += simRunner.runDistributedMemory( - loadBalancer, - !objectiveFunctionGradient.empty() - ? maxGradientSimulationsPerPackage - : maxSimulationsPerPackage); + loadBalancer, + !objectiveFunctionGradient.empty() + ? maxGradientSimulationsPerPackage + : maxSimulationsPerPackage); } else { #endif errors += simRunner.runSharedMemory( - [this](std::vector &buffer, int jobId) { + [this](std::vector& buffer, int jobId) { this->messageHandler(buffer, jobId); - }); + }); #ifdef PARPE_ENABLE_MPI } #endif - if(cpuTime) + if (cpuTime) *cpuTime = simulationTimeSec; return errors; } int AmiciSummedGradientFunction::aggregateLikelihood( - JobData &data, double &negLogLikelihood, - gsl::span negLogLikelihoodGradient, - double &simulationTimeInS, - gsl::span optimizationParameters) const -{ + JobData& data, + double& negLogLikelihood, + gsl::span negLogLikelihoodGradient, + double& simulationTimeInS, + gsl::span optimizationParameters) const { int errors = 0; // deserialize - auto results = - amici::deserializeFromChar ( - data.recvBuffer.data(), data.recvBuffer.size()); + auto results = amici::deserializeFromChar( + data.recvBuffer.data(), data.recvBuffer.size()); std::vector().swap(data.recvBuffer); // free buffer - for (auto const& result : results) { int conditionIdx; ResultPackage resultPackage; @@ -797,26 +874,33 @@ int AmiciSummedGradientFunction::aggregateLikelihood( auto scaleOpt = dataProvider->getParameterScaleOpt(); dataProvider->mapAndSetOptimizationToSimulationVariables( - conditionIdx, optimizationParameters, p, scaleOpt, - scaleSim); + conditionIdx, optimizationParameters, p, scaleOpt, scaleSim); addSimulationGradientToObjectiveFunctionGradient( - conditionIdx, resultPackage.gradient, - negLogLikelihoodGradient, p); + conditionIdx, + resultPackage.gradient, + negLogLikelihoodGradient, + p); } } return errors; } -void AmiciSummedGradientFunction::addSimulationGradientToObjectiveFunctionGradient( - int conditionIdx, gsl::span simulationGradient, +void AmiciSummedGradientFunction:: + addSimulationGradientToObjectiveFunctionGradient( + int conditionIdx, + gsl::span simulationGradient, gsl::span objectiveFunctionGradient, - gsl::span simulationParameters) const { + gsl::span simulationParameters) const { dataProvider->mapSimulationToOptimizationGradientAddMultiply( - conditionIdx, simulationGradient, - objectiveFunctionGradient, simulationParameters, -1.0); + conditionIdx, + simulationGradient, + objectiveFunctionGradient, + simulationParameters, + -1.0); } -void AmiciSummedGradientFunction::setSensitivityOptions(bool sensiRequired) const { +void AmiciSummedGradientFunction::setSensitivityOptions( + bool sensiRequired) const { // sensitivities requested? if (sensiRequired) { solver->setSensitivityOrder(solverOriginal->getSensitivityOrder()); @@ -827,5 +911,4 @@ void AmiciSummedGradientFunction::setSensitivityOptions(bool sensiRequired) cons } } - } // namespace parpe diff --git a/src/parpeamici/optimizationApplication.cpp b/src/parpeamici/optimizationApplication.cpp index 100d0fa0b..921c6517d 100644 --- a/src/parpeamici/optimizationApplication.cpp +++ b/src/parpeamici/optimizationApplication.cpp @@ -1,23 +1,23 @@ #include +#include #include #include #include -#include #include -#include +#include #ifdef PARPE_ENABLE_MPI #include #endif +#include +#include +#include #include #include #include -#include #include -#include -#include namespace parpe { @@ -33,19 +33,19 @@ void signalHandler(int sig) { (*oldact.sa_sigaction)(sig, nullptr, nullptr); } -int OptimizationApplication::init(int argc, char **argv) { +int OptimizationApplication::init(int argc, char** argv) { // reduce verbosity - if(std::getenv("PARPE_NO_DEBUG")) + if (std::getenv("PARPE_NO_DEBUG")) minimumLogLevel = loglevel::info; - if(auto status = parseCliOptionsPreMpiInit(argc, argv)) + if (auto status = parseCliOptionsPreMpiInit(argc, argv)) return status; // install signal handler for backtrace on error sigaction(SIGSEGV, &act, &oldact); sigaction(SIGHUP, &act, nullptr); - if(withMPI && !getMpiActive()) + if (withMPI && !getMpiActive()) initMPI(&argc, &argv); printMPIInfo(); @@ -54,19 +54,18 @@ int OptimizationApplication::init(int argc, char **argv) { return parseCliOptionsPostMpiInit(argc, argv); } -void OptimizationApplication::runMultiStarts() const -{ +void OptimizationApplication::runMultiStarts() const { // TODO: use uniqe_ptr, not ref - MultiStartOptimization optimizer(*multiStartOptimizationProblem, true, - first_start_idx); + MultiStartOptimization optimizer( + *multiStartOptimizationProblem, true, first_start_idx); optimizer.run(); } -int OptimizationApplication::parseCliOptionsPreMpiInit(int argc, char **argv) -{ +int OptimizationApplication::parseCliOptionsPreMpiInit(int argc, char** argv) { while (true) { int optionIndex = 0; - auto c = getopt_long(argc, argv, shortOptions, longOptions, &optionIndex); + auto c = + getopt_long(argc, argv, shortOptions, longOptions, &optionIndex); if (c == -1) break; // no more options @@ -83,13 +82,14 @@ int OptimizationApplication::parseCliOptionsPreMpiInit(int argc, char **argv) return 0; } -int OptimizationApplication::parseCliOptionsPostMpiInit(int argc, char **argv) { +int OptimizationApplication::parseCliOptionsPostMpiInit(int argc, char** argv) { // restart from first argument optind = 1; while (true) { int optionIndex = 0; - int c = getopt_long(argc, argv, shortOptions, longOptions, &optionIndex); + int c = + getopt_long(argc, argv, shortOptions, longOptions, &optionIndex); if (c == -1) break; // no more options @@ -123,42 +123,42 @@ int OptimizationApplication::parseCliOptionsPostMpiInit(int argc, char **argv) { if (optind < argc) { dataFileName = argv[optind++]; } else { - logmessage(loglevel::critical, - "Must provide input file as first and only argument to %s.", - argv[0]); + logmessage( + loglevel::critical, + "Must provide input file as first and only argument to %s.", + argv[0]); return 1; } return 0; } -void OptimizationApplication::printUsage(char * const argZero) -{ +void OptimizationApplication::printUsage(char* const argZero) { printf("Usage: %s [OPTION]... FILE\n\n", argZero); printf("FILE: HDF5 data file\n\n"); - printf("Options: \n" - " -o, --outfile-prefix Prefix for result files (path + " - "filename)\n" - " -t, --task What to do? Parameter estimation (default) " - "or check gradient ('gradient_check')\n" - " -s, --first-start-idx Starting point index for first optimization\n" - " -m, --mpi Enable MPI (default: off)\n" - " -h, --help Print this help text\n" - " -v, --version Print version info\n"); + printf( + "Options: \n" + " -o, --outfile-prefix Prefix for result files (path + " + "filename)\n" + " -t, --task What to do? Parameter estimation (default) " + "or check gradient ('gradient_check')\n" + " -s, --first-start-idx Starting point index for first optimization\n" + " -m, --mpi Enable MPI (default: off)\n" + " -h, --help Print this help text\n" + " -v, --version Print version info\n"); printf("\nSupported optimizers:\n"); printAvailableOptimizers(" "); } -void OptimizationApplication::logParPEVersion(H5::H5File const& file) const -{ +void OptimizationApplication::logParPEVersion(H5::H5File const& file) const { hdf5WriteStringAttribute(file, "/", "PARPE_VERSION", PARPE_VERSION); } -void OptimizationApplication::initMPI(int *argc, char ***argv) { +void OptimizationApplication::initMPI(int* argc, char*** argv) { #ifdef PARPE_ENABLE_MPI int thread_support_provided = 0; - int mpiErr = MPI_Init_thread(argc, argv, MPI_THREAD_MULTIPLE, - &thread_support_provided); + int mpiErr = MPI_Init_thread( + argc, argv, MPI_THREAD_MULTIPLE, &thread_support_provided); if (thread_support_provided != MPI_THREAD_MULTIPLE) throw std::runtime_error("MPI_THREAD_MULTIPLE not supported?"); @@ -177,20 +177,21 @@ void OptimizationApplication::initMPI(int *argc, char ***argv) { #endif } -int OptimizationApplication::run(int argc, char **argv) { +int OptimizationApplication::run(int argc, char** argv) { // start Timers WallTimer wallTimer; CpuTimer cpuTimer; int status = init(argc, argv); - if(status) + if (status) return status; if (dataFileName.empty()) { - logmessage(loglevel::critical, - "No input file provided. Must provide input file as first " - "and only argument or set " - "OptimizationApplication::inputFileName manually."); + logmessage( + loglevel::critical, + "No input file provided. Must provide input file as first " + "and only argument or set " + "OptimizationApplication::inputFileName manually."); return 1; } @@ -225,9 +226,9 @@ int OptimizationApplication::run(int argc, char **argv) { void OptimizationApplication::runMaster() { switch (operationType) { case OperationType::gradientCheck: { - const int numParameterIndicesToCheck = 10000; + int const numParameterIndicesToCheck = 10000; optimizationProblemGradientCheckMultiEps( - problem.get(), numParameterIndicesToCheck); + problem.get(), numParameterIndicesToCheck); break; } case OperationType::parameterEstimation: @@ -240,15 +241,20 @@ void OptimizationApplication::runMaster() { void OptimizationApplication::runWorker() { // TODO: Move out of here LoadBalancerWorker lbw; - lbw.run([this](std::vector &buffer, int jobId) { + lbw.run([this](std::vector& buffer, int jobId) { // TODO: this is so damn ugly - auto sgf = dynamic_cast*>(problem->cost_fun_.get()); - if(sgf) { + auto sgf = + dynamic_cast*>( + problem->cost_fun_.get()); + if (sgf) { // non-hierarchical - dynamic_cast(sgf->getWrappedFunction())->messageHandler(buffer, jobId); + dynamic_cast( + sgf->getWrappedFunction()) + ->messageHandler(buffer, jobId); } else { // hierarchical - auto hierarch = dynamic_cast(problem->cost_fun_.get()); + auto hierarch = dynamic_cast( + problem->cost_fun_.get()); Expects(hierarch != nullptr); hierarch->getWrappedFunction()->messageHandler(buffer, jobId); } @@ -260,9 +266,9 @@ void OptimizationApplication::runSingleProcess() { // run serially switch (operationType) { case OperationType::gradientCheck: { - const int numParameterIndicesToCheck = 10000; + int const numParameterIndicesToCheck = 10000; optimizationProblemGradientCheckMultiEps( - problem.get(), numParameterIndicesToCheck); + problem.get(), numParameterIndicesToCheck); break; } case OperationType::parameterEstimation: @@ -275,33 +281,47 @@ void OptimizationApplication::runSingleProcess() { } } -void OptimizationApplication::finalizeTiming(double wallTimeSeconds, double cpuTimeSeconds) { +void OptimizationApplication::finalizeTiming( + double wallTimeSeconds, + double cpuTimeSeconds) { #ifdef PARPE_ENABLE_MPI // wall-time for current MPI process // total run-time double totalCpuTimeInSeconds = 0; - if(getMpiActive()) { - MPI_Reduce(&cpuTimeSeconds, &totalCpuTimeInSeconds, 1, MPI_DOUBLE, MPI_SUM, 0, - MPI_COMM_WORLD); + if (getMpiActive()) { + MPI_Reduce( + &cpuTimeSeconds, + &totalCpuTimeInSeconds, + 1, + MPI_DOUBLE, + MPI_SUM, + 0, + MPI_COMM_WORLD); } else { totalCpuTimeInSeconds = cpuTimeSeconds; } if (getMpiRank() < 1) { - logmessage(loglevel::info, "Walltime on master: %fs, CPU time of all processes: %fs", - wallTimeSeconds, totalCpuTimeInSeconds); + logmessage( + loglevel::info, + "Walltime on master: %fs, CPU time of all processes: %fs", + wallTimeSeconds, + totalCpuTimeInSeconds); saveTotalCpuTime(h5File, totalCpuTimeInSeconds); } #else - logmessage(LOGLVL_INFO, "Total walltime: %fs, CPU time: %fs", - wallTimeSeconds, cpuTimeSeconds); + logmessage( + LOGLVL_INFO, + "Total walltime: %fs, CPU time: %fs", + wallTimeSeconds, + cpuTimeSeconds); saveTotalCpuTime(file_id, cpuTimeSeconds); #endif } std::string OptimizationApplication::processResultFilenameCommandLineArgument( - const char *commandLineArg) { + char const* commandLineArg) { std::size_t bufSize = 1024; char tmpFileName[bufSize]; int rank = std::max(getMpiRank(), 0); @@ -343,22 +363,25 @@ OptimizationApplication::~OptimizationApplication() { h5File.close(); problem.reset(nullptr); #ifdef PARPE_ENABLE_MPI - if(getMpiActive()) + if (getMpiActive()) MPI_Finalize(); #endif } -void saveTotalCpuTime(H5::H5File const& file, const double timeInSeconds) -{ +void saveTotalCpuTime(H5::H5File const& file, double const timeInSeconds) { hsize_t dims[1] = {1}; [[maybe_unused]] auto lock = hdf5MutexGetLock(); - //std::string pathStr = rootPath + "/totalTimeInSec"; + // std::string pathStr = rootPath + "/totalTimeInSec"; std::string pathStr = "/totalTimeInSec"; - H5LTmake_dataset(file.getId(), pathStr.c_str(), 1, dims, H5T_NATIVE_DOUBLE, - &timeInSeconds); - + H5LTmake_dataset( + file.getId(), + pathStr.c_str(), + 1, + dims, + H5T_NATIVE_DOUBLE, + &timeInSeconds); } } // namespace parpe diff --git a/src/parpeamici/simulationResultWriter.cpp b/src/parpeamici/simulationResultWriter.cpp index 75f633aaf..c1a29d575 100644 --- a/src/parpeamici/simulationResultWriter.cpp +++ b/src/parpeamici/simulationResultWriter.cpp @@ -2,92 +2,83 @@ #include -#include #include // NAN +#include #include namespace parpe { -SimulationResultWriter::SimulationResultWriter(H5::H5File const& file, - std::string rootPath) - : rootPath(std::move(rootPath)) -{ +SimulationResultWriter::SimulationResultWriter( + H5::H5File const& file, + std::string rootPath) + : rootPath(std::move(rootPath)) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); this->file = file; updatePaths(); } -SimulationResultWriter::~SimulationResultWriter() -{ +SimulationResultWriter::~SimulationResultWriter() { [[maybe_unused]] auto lock = hdf5MutexGetLock(); file.close(); } - -SimulationResultWriter::SimulationResultWriter(const std::string &hdf5FileName, - std::string rootPath) - : rootPath(std::move(rootPath)) -{ +SimulationResultWriter::SimulationResultWriter( + std::string const& hdf5FileName, + std::string rootPath) + : rootPath(std::move(rootPath)) { file = hdf5OpenForAppending(hdf5FileName); updatePaths(); } - -void SimulationResultWriter::createDatasets(hsize_t numSimulations) -{ +void SimulationResultWriter::createDatasets(hsize_t numSimulations) { [[maybe_unused]] auto lock = parpe::hdf5MutexGetLock(); - - double fillValueDbl = NAN; /* Fill value for the dataset */ + double fillValueDbl = NAN; /* Fill value for the dataset */ H5::DSetCreatPropList propList; propList.setFillValue(H5::PredType::NATIVE_DOUBLE, &fillValueDbl); hdf5EnsureGroupExists(file, rootPath); - if(saveX || saveYMes || saveYSim) + if (saveX || saveYMes || saveYSim) hdf5EnsureGroupExists(file, timePath); - if(saveX) + if (saveX) hdf5EnsureGroupExists(file, xPath); - if(saveYMes) + if (saveYMes) hdf5EnsureGroupExists(file, yMesPath); - if(saveYSim) + if (saveYSim) hdf5EnsureGroupExists(file, ySimPath); - if(save_parameters_) + if (save_parameters_) hdf5EnsureGroupExists(file, parametersPath); // Individual datasets will be created per condition, since we need // condition-specific number of timepoints // Can only create llh dataset in advance - if(saveLlh && !file.nameExists(llhPath)) { + if (saveLlh && !file.nameExists(llhPath)) { hsize_t dims[] = {numSimulations}; H5::DataSpace dataspace(1, dims); hsize_t one = 1; propList.setChunk(1, &one); - file.createDataSet(llhPath, H5::PredType::NATIVE_DOUBLE, - dataspace, propList); + file.createDataSet( + llhPath, H5::PredType::NATIVE_DOUBLE, dataspace, propList); } file.flush(H5F_SCOPE_LOCAL); } - -void SimulationResultWriter::createDatasets(int numberOfSimulations) -{ +void SimulationResultWriter::createDatasets(int numberOfSimulations) { createDatasets(static_cast(numberOfSimulations)); } - void SimulationResultWriter::saveSimulationResults( - const amici::ExpData *edata, - const amici::ReturnData *rdata, - int simulationIdx) -{ - saveMeasurements(edata->getObservedData(), edata->nt(), - edata->nytrue(), simulationIdx); + amici::ExpData const* edata, + amici::ReturnData const* rdata, + int simulationIdx) { + saveMeasurements( + edata->getObservedData(), edata->nt(), edata->nytrue(), simulationIdx); saveTimepoints(rdata->ts, simulationIdx); - saveModelOutputs(rdata->y, edata->nt(), edata->nytrue(), simulationIdx); + saveModelOutputs(rdata->y, edata->nt(), edata->nytrue(), simulationIdx); saveStates(rdata->x, rdata->nt, rdata->nx, simulationIdx); saveLikelihood(rdata->llh, simulationIdx); // TODO: model or edata? saveParameters(edata->parameters) @@ -96,10 +87,10 @@ void SimulationResultWriter::saveSimulationResults( file.flush(H5F_SCOPE_LOCAL); } -void SimulationResultWriter::saveTimepoints(gsl::span timepoints, - int simulationIdx) const -{ - if(timepoints.empty()) +void SimulationResultWriter::saveTimepoints( + gsl::span timepoints, + int simulationIdx) const { + if (timepoints.empty()) return; [[maybe_unused]] auto lock = parpe::hdf5MutexGetLock(); @@ -109,91 +100,99 @@ void SimulationResultWriter::saveTimepoints(gsl::span timepoints, hsize_t dims[rank] = {static_cast(timepoints.size())}; H5::DataSpace dataspace(rank, dims); auto dataset = file.createDataSet( - timePath + "/" + std::to_string(simulationIdx), - H5::PredType::NATIVE_DOUBLE, dataspace); + timePath + "/" + std::to_string(simulationIdx), + H5::PredType::NATIVE_DOUBLE, + dataspace); dataset.write(timepoints.data(), H5::PredType::NATIVE_DOUBLE); } - void SimulationResultWriter::saveMeasurements( - gsl::span measurements, int nt, - int nytrue, int simulationIdx) const -{ - if(!saveYMes || nt < 1 || nytrue < 1 || measurements.empty()) { + gsl::span measurements, + int nt, + int nytrue, + int simulationIdx) const { + if (!saveYMes || nt < 1 || nytrue < 1 || measurements.empty()) { return; } - Expects(measurements.size() == - static_cast(nt * nytrue)); + Expects( + measurements.size() == + static_cast(nt * nytrue)); [[maybe_unused]] auto lock = parpe::hdf5MutexGetLock(); // Create dataset constexpr int rank = 2; - hsize_t dims[rank] = {static_cast(nt), - static_cast(nytrue)}; + hsize_t dims[rank] = { + static_cast(nt), static_cast(nytrue)}; H5::DataSpace dataspace(rank, dims); auto dataset = file.createDataSet( - yMesPath + "/" + std::to_string(simulationIdx), - H5::PredType::NATIVE_DOUBLE, dataspace); + yMesPath + "/" + std::to_string(simulationIdx), + H5::PredType::NATIVE_DOUBLE, + dataspace); dataset.write(measurements.data(), H5::PredType::NATIVE_DOUBLE); } void SimulationResultWriter::saveModelOutputs( - gsl::span outputs, int nt, - int nytrue, int simulationIdx) const -{ - if(!saveYSim || nt < 1 || nytrue < 1 || outputs.empty()) { + gsl::span outputs, + int nt, + int nytrue, + int simulationIdx) const { + if (!saveYSim || nt < 1 || nytrue < 1 || outputs.empty()) { return; } - Expects(outputs.size() == - static_cast(nt * nytrue)); + Expects( + outputs.size() == + static_cast(nt * nytrue)); [[maybe_unused]] auto lock = parpe::hdf5MutexGetLock(); // Create dataset constexpr int rank = 2; - hsize_t dims[rank] = {static_cast(nt), - static_cast(nytrue)}; + hsize_t dims[rank] = { + static_cast(nt), static_cast(nytrue)}; H5::DataSpace dataspace(rank, dims); auto dataset = file.createDataSet( - ySimPath + "/" + std::to_string(simulationIdx), - H5::PredType::NATIVE_DOUBLE, dataspace); + ySimPath + "/" + std::to_string(simulationIdx), + H5::PredType::NATIVE_DOUBLE, + dataspace); dataset.write(outputs.data(), H5::PredType::NATIVE_DOUBLE); } - void SimulationResultWriter::saveStates( - gsl::span states, int nt, int nx, int simulationIdx) const -{ - if(!saveX || states.empty() || nx < 1 || nt < 1) { + gsl::span states, + int nt, + int nx, + int simulationIdx) const { + if (!saveX || states.empty() || nx < 1 || nt < 1) { return; } - Expects(states.size() == - static_cast(nt * nx)); + Expects( + states.size() == static_cast(nt * nx)); [[maybe_unused]] auto lock = parpe::hdf5MutexGetLock(); // Create dataset constexpr int rank = 2; - hsize_t dims[rank] = {static_cast(nt), - static_cast(nx)}; + hsize_t dims[rank] = {static_cast(nt), static_cast(nx)}; H5::DataSpace dataspace(rank, dims); auto dataset = file.createDataSet( - xPath + "/" + std::to_string(simulationIdx), - H5::PredType::NATIVE_DOUBLE, dataspace); + xPath + "/" + std::to_string(simulationIdx), + H5::PredType::NATIVE_DOUBLE, + dataspace); dataset.write(states.data(), H5::PredType::NATIVE_DOUBLE); } -void SimulationResultWriter::saveParameters(gsl::span parameters, int simulationIdx) const -{ - if(parameters.empty()) +void SimulationResultWriter::saveParameters( + gsl::span parameters, + int simulationIdx) const { + if (parameters.empty()) return; [[maybe_unused]] auto lock = parpe::hdf5MutexGetLock(); @@ -204,15 +203,15 @@ void SimulationResultWriter::saveParameters(gsl::span parameters, H5::DataSpace dataspace(rank, dims); auto dataset = file.createDataSet( parametersPath + "/" + std::to_string(simulationIdx), - H5::PredType::NATIVE_DOUBLE, dataspace); + H5::PredType::NATIVE_DOUBLE, + dataspace); dataset.write(parameters.data(), H5::PredType::NATIVE_DOUBLE); } - -void SimulationResultWriter::saveLikelihood(double llh, int simulationIdx) const -{ - if(!saveLlh) { +void SimulationResultWriter::saveLikelihood(double llh, int simulationIdx) + const { + if (!saveLlh) { return; } @@ -232,24 +231,18 @@ void SimulationResultWriter::saveLikelihood(double llh, int simulationIdx) const dataset.write(&llh, H5::PredType::NATIVE_DOUBLE, memspace, filespace); } - -H5::H5File SimulationResultWriter::reopenFile() -{ +H5::H5File SimulationResultWriter::reopenFile() { [[maybe_unused]] auto lock = hdf5MutexGetLock(); return H5::H5File(file); } - -void SimulationResultWriter::updatePaths() -{ +void SimulationResultWriter::updatePaths() { yMesPath = rootPath + "/yMes"; ySimPath = rootPath + "/ySim"; xPath = rootPath + "/x"; - llhPath = rootPath + "/llh"; - timePath = rootPath + "/t"; - parametersPath = rootPath + "/parameters"; - + llhPath = rootPath + "/llh"; + timePath = rootPath + "/t"; + parametersPath = rootPath + "/parameters"; } - } // namespace parpe diff --git a/src/parpeamici/standaloneSimulator.cpp b/src/parpeamici/standaloneSimulator.cpp index ae8a75295..2450f5f4a 100644 --- a/src/parpeamici/standaloneSimulator.cpp +++ b/src/parpeamici/standaloneSimulator.cpp @@ -1,10 +1,10 @@ #include +#include #include #include #include #include -#include #include #include #include @@ -21,22 +21,20 @@ namespace parpe { StandaloneSimulator::StandaloneSimulator(MultiConditionDataProvider* dp) - : dataProvider(dp) -{ + : dataProvider(dp) { if (auto env = std::getenv("PARPE_MAX_SIMULATIONS_PER_PACKAGE")) { maxSimulationsPerPackage = std::stoi(env); } } -int -StandaloneSimulator::run(const std::string& resultFile, - const std::string& resultPath, - std::map& optimizationParameters, - LoadBalancerMaster* loadBalancer, - H5::H5File const& conditionFile, - std::string conditionFilePath, - bool computeInnerParameters) -{ +int StandaloneSimulator::run( + std::string const& resultFile, + std::string const& resultPath, + std::map& optimizationParameters, + LoadBalancerMaster* loadBalancer, + H5::H5File const& conditionFile, + std::string conditionFilePath, + bool computeInnerParameters) { // Data to save SimulationResultWriter rw(resultFile, resultPath); rw.saveYMes = true; @@ -71,25 +69,22 @@ StandaloneSimulator::run(const std::string& resultFile, hdf5EnsureGroupExists(resultFileH5.getId(), resultPath.c_str()); { [[maybe_unused]] auto lock = hdf5MutexGetLock(); - hdf5Write1dStringDataset(resultFileH5, - resultPath, - "stateIds", - model->getStateIds()); - hdf5Write1dStringDataset(resultFileH5, - resultPath, - "observableIds", - model->getObservableIds()); - hdf5Write1dStringDataset(resultFileH5, - resultPath, - "parameterIds", - model->getParameterIds()); + hdf5Write1dStringDataset( + resultFileH5, resultPath, "stateIds", model->getStateIds()); + hdf5Write1dStringDataset( + resultFileH5, + resultPath, + "observableIds", + model->getObservableIds()); + hdf5Write1dStringDataset( + resultFileH5, resultPath, "parameterIds", model->getParameterIds()); } std::cout << "Starting simulation. Number of conditions: " << dataProvider->getNumberOfSimulationConditions() << std::endl; std::unique_ptr simRunner; - if(computeInnerParameters) { + if (computeInnerParameters) { // hierarchical optimization HierarchicalOptimizationWrapper hierarchical(nullptr, 0, 0); @@ -129,10 +124,11 @@ StandaloneSimulator::run(const std::string& resultFile, // Collect parameter values auto outerParameterNames = hierarchical.getParameterIds(); std::vector outerParameters(outerParameterNames.size()); - for(int i = 0; i < static_cast(outerParameterNames.size()); ++i) { + for (int i = 0; i < static_cast(outerParameterNames.size()); ++i) { outerParameters[i] = optimizationParameters[outerParameterNames[i]]; } - Expects(outerParameters.size() == (unsigned)hierarchical.numParameters()); + Expects( + outerParameters.size() == (unsigned)hierarchical.numParameters()); // expand parameter vector auto scalingDummy = hierarchical.getDefaultScalingFactors(); @@ -147,27 +143,29 @@ StandaloneSimulator::run(const std::string& resultFile, offsetDummy, sigmaDummy); - allFinished = [this, &dataIndices, &hierarchical, ¶meterValues, - &outerParameters, &resultFileH5, &resultPath, &model, - &rw](std::vector& jobs) - { + allFinished = [this, + &dataIndices, + &hierarchical, + ¶meterValues, + &outerParameters, + &resultFileH5, + &resultPath, + &model, + &rw](std::vector& jobs) { /* all finished */ // must wait for all jobs to finish because of hierarchical // optimization and scaling factors std::vector simulationResults(dataIndices.size()); - std::vector> modelOutputs( - dataIndices.size()); - std::vector> modelSigmas( - dataIndices.size()); - std::vector> modelStates( - dataIndices.size()); + std::vector> modelOutputs(dataIndices.size()); + std::vector> modelSigmas(dataIndices.size()); + std::vector> modelStates(dataIndices.size()); // collect all model outputs for (auto& job : jobs) { - auto results = amici::deserializeFromChar< - std::map>( + auto results = amici::deserializeFromChar>( job.recvBuffer.data(), job.recvBuffer.size()); std::vector().swap(job.recvBuffer); // free buffer for (auto& [condition_idx, result] : results) { @@ -178,7 +176,6 @@ StandaloneSimulator::run(const std::string& resultFile, simulationResults[condition_idx].modelSigmas; modelStates[condition_idx] = simulationResults[condition_idx].modelStates; - } } @@ -211,7 +208,9 @@ StandaloneSimulator::run(const std::string& resultFile, { [[maybe_unused]] auto lock = hdf5MutexGetLock(); amici::hdf5::createAndWriteDouble1DDataset( - resultFileH5, resultPath + "/problemParameters", parameterValues); + resultFileH5, + resultPath + "/problemParameters", + parameterValues); } // compute llh @@ -223,29 +222,32 @@ StandaloneSimulator::run(const std::string& resultFile, modelOutputs[conditionIdx], modelSigmas[conditionIdx]); - auto edata = dataProvider->getExperimentalDataForCondition( - conditionIdx); + auto edata = + dataProvider->getExperimentalDataForCondition(conditionIdx); rw.saveTimepoints(edata->getTimepoints(), conditionIdx); - if(!modelStates[conditionIdx].empty()) { - rw.saveStates(modelStates[conditionIdx], edata->nt(), - model->nx_rdata, conditionIdx); + if (!modelStates[conditionIdx].empty()) { + rw.saveStates( + modelStates[conditionIdx], + edata->nt(), + model->nx_rdata, + conditionIdx); } - rw.saveMeasurements(edata->getObservedData(), - edata->nt(), - edata->nytrue(), - conditionIdx); - rw.saveModelOutputs(modelOutputs[conditionIdx], - edata->nt(), - model->nytrue, - conditionIdx); + rw.saveMeasurements( + edata->getObservedData(), + edata->nt(), + edata->nytrue(), + conditionIdx); + rw.saveModelOutputs( + modelOutputs[conditionIdx], + edata->nt(), + model->nytrue, + conditionIdx); rw.saveLikelihood(llh, conditionIdx); // to save simulation parameters dataProvider->updateSimulationParametersAndScale( conditionIdx, parameterValues, *model); - rw.saveParameters(model->getParameters(), - conditionIdx); - + rw.saveParameters(model->getParameters(), conditionIdx); } return 0; }; @@ -263,8 +265,8 @@ StandaloneSimulator::run(const std::string& resultFile, loadBalancer, maxSimulationsPerPackage); } else { #endif - errors += - simRunner->runSharedMemory([this](std::vector& buffer, int jobId) { + errors += simRunner->runSharedMemory( + [this](std::vector& buffer, int jobId) { messageHandler(buffer, jobId); }); #ifdef PARPE_ENABLE_MPI @@ -274,54 +276,57 @@ StandaloneSimulator::run(const std::string& resultFile, // Collect parameter values auto parameterNames = wrappedFun->getParameterIds(); parameterValues.resize(parameterNames.size()); - for(int i = 0; i < static_cast(parameterNames.size()); ++i) { + for (int i = 0; i < static_cast(parameterNames.size()); ++i) { parameterValues[i] = optimizationParameters[parameterNames[i]]; } - Expects(parameterValues.size() == (unsigned)wrappedFun->numParameters()); + Expects( + parameterValues.size() == (unsigned)wrappedFun->numParameters()); { [[maybe_unused]] auto lock = hdf5MutexGetLock(); amici::hdf5::createAndWriteDouble1DDataset( - resultFileH5, resultPath + "/problemParameters", parameterValues); + resultFileH5, + resultPath + "/problemParameters", + parameterValues); } - jobFinished = - [&]( - JobData* job, - int /*dataIdx*/) { - /* job finished */ - auto results = amici::deserializeFromChar>( - job->recvBuffer.data(), job->recvBuffer.size()); - std::vector().swap(job->recvBuffer); // free buffer - - for (auto const& [condition_idx, result] : results) { - errors += result.status; - auto edata = - dataProvider->getExperimentalDataForCondition( - condition_idx); - - rw.saveTimepoints(edata->getTimepoints(), condition_idx); - if(!result.modelStates.empty()) { - rw.saveStates(result.modelStates, edata->nt(), - model->nx_rdata, condition_idx); - } - rw.saveMeasurements(edata->getObservedData(), - edata->nt(), - edata->nytrue(), - condition_idx); - rw.saveModelOutputs(result.modelOutput, - edata->nt(), - model->nytrue, - condition_idx); - rw.saveLikelihood(result.llh, condition_idx); - - // to save simulation parameters - dataProvider->updateSimulationParametersAndScale( - condition_idx, parameterValues, *model); - rw.saveParameters(model->getParameters(), condition_idx); + jobFinished = [&](JobData* job, int /*dataIdx*/) { + /* job finished */ + auto results = amici::deserializeFromChar< + std::map>( + job->recvBuffer.data(), job->recvBuffer.size()); + std::vector().swap(job->recvBuffer); // free buffer + + for (auto const& [condition_idx, result] : results) { + errors += result.status; + auto edata = dataProvider->getExperimentalDataForCondition( + condition_idx); + + rw.saveTimepoints(edata->getTimepoints(), condition_idx); + if (!result.modelStates.empty()) { + rw.saveStates( + result.modelStates, + edata->nt(), + model->nx_rdata, + condition_idx); } - }; + rw.saveMeasurements( + edata->getObservedData(), + edata->nt(), + edata->nytrue(), + condition_idx); + rw.saveModelOutputs( + result.modelOutput, + edata->nt(), + model->nytrue, + condition_idx); + rw.saveLikelihood(result.llh, condition_idx); + + // to save simulation parameters + dataProvider->updateSimulationParametersAndScale( + condition_idx, parameterValues, *model); + rw.saveParameters(model->getParameters(), condition_idx); + } + }; simRunner = std::make_unique( parameterValues, @@ -336,8 +341,8 @@ StandaloneSimulator::run(const std::string& resultFile, loadBalancer, maxSimulationsPerPackage); } else { #endif - errors += - simRunner->runSharedMemory([this](std::vector& buffer, int jobId) { + errors += simRunner->runSharedMemory( + [this](std::vector& buffer, int jobId) { messageHandler(buffer, jobId); }); #ifdef PARPE_ENABLE_MPI @@ -348,16 +353,16 @@ StandaloneSimulator::run(const std::string& resultFile, return errors; } -void -StandaloneSimulator::messageHandler(std::vector& buffer, int /*jobId*/) -{ +void StandaloneSimulator::messageHandler( + std::vector& buffer, + int /*jobId*/) { // TODO: pretty redundant with messageHandler in multiconditionproblem // unpack simulation job data auto model = dataProvider->getModel(); auto solver = dataProvider->getSolver(); - auto sim = - amici::deserializeFromChar( - buffer.data(), buffer.size()); + auto sim = amici::deserializeFromChar< + AmiciSimulationRunner::AmiciWorkPackageSimple>( + buffer.data(), buffer.size()); solver->setSensitivityOrder(sim.sensitivityOrder); #if QUEUE_WORKER_H_VERBOSE >= 2 @@ -385,10 +390,10 @@ StandaloneSimulator::messageHandler(std::vector& buffer, int /*jobId*/) } AmiciSimulationRunner::AmiciResultPackageSimple -StandaloneSimulator::runSimulation(int conditionIdx, - amici::Solver& solver, - amici::Model& model) -{ +StandaloneSimulator::runSimulation( + int conditionIdx, + amici::Solver& solver, + amici::Model& model) { // currently requires edata, since all condition specific parameters are set // via edata auto edata = dataProvider->getExperimentalDataForCondition(conditionIdx); @@ -396,7 +401,8 @@ StandaloneSimulator::runSimulation(int conditionIdx, // redirect AMICI output to parPE logging Logger logger("c" + std::to_string(conditionIdx)); - auto rdata = run_amici_simulation(solver, edata.get(), model, false, &logger); + auto rdata = + run_amici_simulation(solver, edata.get(), model, false, &logger); Expects(rdata != nullptr); @@ -409,13 +415,11 @@ StandaloneSimulator::runSimulation(int conditionIdx, rdata->y, rdata->sigmay, rdata->x, - rdata->status - }; + rdata->status}; } std::vector -getFinalParameters(std::string const& startIndex, H5::H5File const& file) -{ +getFinalParameters(std::string const& startIndex, H5::H5File const& file) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); // find last iteration /multistarts/$/iteration/$/costFunParameters @@ -424,14 +428,15 @@ getFinalParameters(std::string const& startIndex, H5::H5File const& file) int iteration = 0; while ( hdf5GroupExists(file, iterationPath + std::to_string(iteration)) && - file.nameExists(iterationPath + std::to_string(iteration) - + "/costFunParameters")) { + file.nameExists( + iterationPath + std::to_string(iteration) + "/costFunParameters")) { ++iteration; } --iteration; // last one did not exist - auto [costFunEvaluationIndexLast, costFunValLast] = getFunctionEvaluationWithMinimalCost( - iterationPath + std::to_string(iteration) + "/costFunCost", file); + auto [costFunEvaluationIndexLast, costFunValLast] = + getFunctionEvaluationWithMinimalCost( + iterationPath + std::to_string(iteration) + "/costFunCost", file); int costFunEvaluationIndex = costFunEvaluationIndexLast; if (iteration > 0) { @@ -440,7 +445,8 @@ getFinalParameters(std::string const& startIndex, H5::H5File const& file) // iteration auto [costFunEvaluationIndexSecondLast, costFunValSecondLast] = getFunctionEvaluationWithMinimalCost( - iterationPath + std::to_string(iteration - 1) + "/costFunCost", file); + iterationPath + std::to_string(iteration - 1) + "/costFunCost", + file); if (costFunValSecondLast < costFunValLast) { --iteration; costFunEvaluationIndex = costFunEvaluationIndexSecondLast; @@ -462,13 +468,14 @@ getFinalParameters(std::string const& startIndex, H5::H5File const& file) std::vector parameters(numParam); - parpe::hdf5Read2DDoubleHyperslab(file.getId(), - parameterPath.c_str(), - numParam, - 1, - 0, - costFunEvaluationIndex, - parameters); + parpe::hdf5Read2DDoubleHyperslab( + file.getId(), + parameterPath.c_str(), + numParam, + 1, + 0, + costFunEvaluationIndex, + parameters); /* // read from last iteration (last column in @@ -491,13 +498,12 @@ getFinalParameters(std::string const& startIndex, H5::H5File const& file) numParam, 1, 0, numIter - 1, parameters.data()); */ - return parameters; + return parameters; } -std::pair -getFunctionEvaluationWithMinimalCost(std::string const& datasetPath, - H5::H5File const& file) -{ +std::pair getFunctionEvaluationWithMinimalCost( + std::string const& datasetPath, + H5::H5File const& file) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); H5::DataSet dataset = file.openDataSet(datasetPath); @@ -516,12 +522,11 @@ getFunctionEvaluationWithMinimalCost(std::string const& datasetPath, parpe::hdf5Read2DDoubleHyperslab( file, datasetPath, 1, numFunctionEvalations, 0, 0, cost); int minIndex = std::min_element(cost.begin(), cost.end()) - cost.begin(); - return { minIndex, cost[minIndex] }; + return {minIndex, cost[minIndex]}; } std::vector> -getParameterTrajectory(std::string const& startIndex, H5::H5File const& file) -{ +getParameterTrajectory(std::string const& startIndex, H5::H5File const& file) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); std::string parameterPath = @@ -541,47 +546,41 @@ getParameterTrajectory(std::string const& startIndex, H5::H5File const& file) for (int iter = 0; iter < numIter; ++iter) { parameters[iter] = std::vector(numParam); - parpe::hdf5Read2DDoubleHyperslab(file, - parameterPath, - numParam, - 1, - 0, - iter, - parameters[iter]); + parpe::hdf5Read2DDoubleHyperslab( + file, parameterPath, numParam, 1, 0, iter, parameters[iter]); } return parameters; } -int -getNumStarts(H5::H5File const& file, std::string const& rootPath) -{ +int getNumStarts(H5::H5File const& file, std::string const& rootPath) { auto o = parpe::OptimizationOptions::fromHDF5( file, rootPath + "/optimizationOptions"); return o->numStarts; } -int -runFinalParameters(StandaloneSimulator& sim, - std::string const& conditionFileName, - std::string const& conditionFilePath, - std::string const& parameterFileName, - std::string const& parameterFilePath, - std::string const& resultFileName, - std::string const& resultPath, - LoadBalancerMaster* loadBalancer, bool computeInnerParameters) -{ +int runFinalParameters( + StandaloneSimulator& sim, + std::string const& conditionFileName, + std::string const& conditionFilePath, + std::string const& parameterFileName, + std::string const& parameterFilePath, + std::string const& resultFileName, + std::string const& resultPath, + LoadBalancerMaster* loadBalancer, + bool computeInnerParameters) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); H5::H5File parameterFile(parameterFileName, H5F_ACC_RDONLY); H5::H5File conditionFile(conditionFileName, H5F_ACC_RDONLY); std::vector parameterNames; - if(hdf5GroupExists(parameterFile, - parameterFilePath + "/parameters/parameterNames")){ + if (hdf5GroupExists( + parameterFile, parameterFilePath + "/parameters/parameterNames")) { parameterNames = hdf5Read1dStringDataset( parameterFile, parameterFilePath + "/parameters/parameterNames"); } else { parameterNames = hdf5Read1dStringDataset( - parameterFile, parameterFilePath + "/inputData/parameters/parameterNames"); + parameterFile, + parameterFilePath + "/inputData/parameters/parameterNames"); } lock.unlock(); @@ -591,23 +590,24 @@ runFinalParameters(StandaloneSimulator& sim, for (int iStart = 0; iStart < numStarts; ++iStart) { std::cout << "Running for start " << iStart << std::endl; try { - auto parameterValues = - parpe::getFinalParameters(std::to_string(iStart), parameterFile); + auto parameterValues = parpe::getFinalParameters( + std::to_string(iStart), parameterFile); Expects(parameterValues.size() == parameterNames.size()); std::map parameters; - for(int i = 0; i < static_cast(parameters.size()); ++i) + for (int i = 0; i < static_cast(parameters.size()); ++i) parameters[parameterNames[i]] = parameterValues[i]; std::string curResultPath = resultPath + "multistarts/" + std::to_string(iStart); - errors += sim.run(resultFileName, - curResultPath, - parameters, - loadBalancer, - conditionFile, - conditionFilePath, - computeInnerParameters); + errors += sim.run( + resultFileName, + curResultPath, + parameters, + loadBalancer, + conditionFile, + conditionFilePath, + computeInnerParameters); } catch (H5::FileIException const& e) { std::cerr << "Exception during start " << iStart << " " << e.getDetailMsg() << std::endl; @@ -622,27 +622,28 @@ runFinalParameters(StandaloneSimulator& sim, return errors; } -int -runAlongTrajectory(StandaloneSimulator& sim, - const std::string& conditionFileName, - const std::string& conditionFilePath, - const std::string& parameterFileName, - const std::string& parameterFilePath, - std::string const& resultFileName, - std::string const& resultPath, - LoadBalancerMaster* loadBalancer, bool computeInnerParameters) -{ +int runAlongTrajectory( + StandaloneSimulator& sim, + std::string const& conditionFileName, + std::string const& conditionFilePath, + std::string const& parameterFileName, + std::string const& parameterFilePath, + std::string const& resultFileName, + std::string const& resultPath, + LoadBalancerMaster* loadBalancer, + bool computeInnerParameters) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); H5::H5File parameterFile(parameterFileName, H5F_ACC_RDONLY); H5::H5File conditionFile(conditionFileName, H5F_ACC_RDONLY); std::vector parameterNames; - if(hdf5GroupExists(parameterFile, - parameterFilePath + "/parameters/parameterNames")){ + if (hdf5GroupExists( + parameterFile, parameterFilePath + "/parameters/parameterNames")) { parameterNames = hdf5Read1dStringDataset( parameterFile, parameterFilePath + "/parameters/parameterNames"); } else { parameterNames = hdf5Read1dStringDataset( - parameterFile, parameterFilePath + "/inputData/parameters/parameterNames"); + parameterFile, + parameterFilePath + "/inputData/parameters/parameterNames"); } lock.unlock(); @@ -654,7 +655,8 @@ runAlongTrajectory(StandaloneSimulator& sim, auto parameterTrajectory = getParameterTrajectory(std::to_string(startIdx), parameterFile); - for (int iter = 0; (unsigned)iter < parameterTrajectory.size(); ++iter) { + for (int iter = 0; (unsigned)iter < parameterTrajectory.size(); + ++iter) { std::cout << "Running for start " << startIdx << " iter " << iter << std::endl; std::string curResultPath = resultPath + "/multistarts/" + @@ -664,16 +666,17 @@ runAlongTrajectory(StandaloneSimulator& sim, Expects(parameterValues.size() == parameterNames.size()); std::map parameters; - for(int i = 0; i < static_cast(parameters.size()); ++i) + for (int i = 0; i < static_cast(parameters.size()); ++i) parameters[parameterNames[i]] = parameterValues[i]; - errors += sim.run(resultFileName, - curResultPath, - parameters, - loadBalancer, - conditionFile, - conditionFilePath, - computeInnerParameters); + errors += sim.run( + resultFileName, + curResultPath, + parameters, + loadBalancer, + conditionFile, + conditionFilePath, + computeInnerParameters); } } catch (std::exception const& e) { std::cerr << e.what() << std::endl; @@ -687,27 +690,25 @@ runAlongTrajectory(StandaloneSimulator& sim, return errors; } - -int -runNominalParameters(StandaloneSimulator& sim, - std::string const& conditionFileName, - std::string const& conditionFilePath, - std::string const& parameterFileName, - std::string const& parameterFilePath, - std::string const& resultFileName, - std::string const& resultPath, - LoadBalancerMaster* loadBalancer, - bool computeInnerParameters) -{ +int runNominalParameters( + StandaloneSimulator& sim, + std::string const& conditionFileName, + std::string const& conditionFilePath, + std::string const& parameterFileName, + std::string const& parameterFilePath, + std::string const& resultFileName, + std::string const& resultPath, + LoadBalancerMaster* loadBalancer, + bool computeInnerParameters) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); H5::H5File parameterFile(parameterFileName, H5F_ACC_RDONLY); H5::H5File conditionFile(conditionFileName, H5F_ACC_RDONLY); int errors = 0; - std::cout << "Running for nominal parameter from " - < parameters; - for(int i = 0; i < static_cast(parameterValues.size()); ++i) + for (int i = 0; i < static_cast(parameterValues.size()); ++i) parameters[parameterNames[i]] = parameterValues[i]; std::string curResultPath = resultPath + "nominal/"; - - errors += sim.run(resultFileName, - curResultPath, - parameters, - loadBalancer, - conditionFile, - conditionFilePath, - computeInnerParameters); + errors += sim.run( + resultFileName, + curResultPath, + parameters, + loadBalancer, + conditionFile, + conditionFilePath, + computeInnerParameters); // lock for destruction of H5Files // FIXME: won't lock if an unhandled exception occurs @@ -738,96 +739,100 @@ runNominalParameters(StandaloneSimulator& sim, return errors; } -int -runSimulationTasks(StandaloneSimulator& sim, - std::string const& simulationMode, - std::string const& conditionFileName, - std::string const& conditionFilePath, - std::string const& parameterFileName, - std::string const& parameterFilePath, - std::string const& resultFileName, - std::string const& resultPath, - LoadBalancerMaster* loadBalancer, - bool computeInnerParameters) -{ +int runSimulationTasks( + StandaloneSimulator& sim, + std::string const& simulationMode, + std::string const& conditionFileName, + std::string const& conditionFilePath, + std::string const& parameterFileName, + std::string const& parameterFilePath, + std::string const& resultFileName, + std::string const& resultPath, + LoadBalancerMaster* loadBalancer, + bool computeInnerParameters) { { - std::cout<<"Running "< " - < " << resultFileName << ":" + << resultPath << std::endl; // copy input data [[maybe_unused]] auto lock = hdf5MutexGetLock(); H5::H5File conditionFile = hdf5OpenForReading(conditionFileName); H5::H5File resultFile = hdf5OpenForAppending(resultFileName); // TODO: this may not always be present. decide elsewhere what to copy - std::vector datasetsToCopy {"/inputData"}; + std::vector datasetsToCopy{"/inputData"}; for (auto const& datasetToCopy : datasetsToCopy) { auto source = conditionFilePath + datasetToCopy; - if(!conditionFile.exists(source)) { + if (!conditionFile.exists(source)) { continue; } auto dest = resultPath + "/" + datasetToCopy; - H5Ocopy(conditionFile.getId(), source.c_str(), - resultFile.getId(), dest.c_str(), - H5P_DEFAULT, H5P_DEFAULT); + H5Ocopy( + conditionFile.getId(), + source.c_str(), + resultFile.getId(), + dest.c_str(), + H5P_DEFAULT, + H5P_DEFAULT); } } if (simulationMode == "--at-optimum") { - return parpe::runFinalParameters(sim, - conditionFileName, - conditionFilePath, - parameterFileName, - parameterFilePath, - resultFileName, - resultPath, - loadBalancer, - computeInnerParameters); + return parpe::runFinalParameters( + sim, + conditionFileName, + conditionFilePath, + parameterFileName, + parameterFilePath, + resultFileName, + resultPath, + loadBalancer, + computeInnerParameters); } if (simulationMode == "--along-trajectory") { - return parpe::runAlongTrajectory(sim, - conditionFileName, - conditionFilePath, - parameterFileName, - parameterFilePath, - resultFileName, - resultPath, - loadBalancer, - computeInnerParameters); + return parpe::runAlongTrajectory( + sim, + conditionFileName, + conditionFilePath, + parameterFileName, + parameterFilePath, + resultFileName, + resultPath, + loadBalancer, + computeInnerParameters); } if (simulationMode == "--nominal") { - return parpe::runNominalParameters(sim, - conditionFileName, - conditionFilePath, - parameterFileName, - parameterFilePath, - resultFileName, - resultPath, - loadBalancer, - computeInnerParameters); - + return parpe::runNominalParameters( + sim, + conditionFileName, + conditionFilePath, + parameterFileName, + parameterFilePath, + resultFileName, + resultPath, + loadBalancer, + computeInnerParameters); } return EXIT_FAILURE; } -int -runSimulator(MultiConditionDataProvider& dp, - std::string const& simulationMode, - std::string const& conditionFileName, - std::string const& conditionFilePath, - std::string const& parameterFileName, - std::string const& parameterFilePath, - std::string const& resultFileName, - std::string const& resultPath, - bool computeInnerParameters) -{ +int runSimulator( + MultiConditionDataProvider& dp, + std::string const& simulationMode, + std::string const& conditionFileName, + std::string const& conditionFilePath, + std::string const& parameterFileName, + std::string const& parameterFilePath, + std::string const& resultFileName, + std::string const& resultPath, + bool computeInnerParameters) { parpe::StandaloneSimulator sim(&dp); int status = 0; @@ -837,16 +842,17 @@ runSimulator(MultiConditionDataProvider& dp, if (parpe::getMpiRank() == 0) { parpe::LoadBalancerMaster loadBalancer; loadBalancer.run(); - status = runSimulationTasks(sim, - simulationMode, - conditionFileName, - conditionFilePath, - parameterFileName, - parameterFilePath, - resultFileName, - resultPath, - &loadBalancer, - computeInnerParameters); + status = runSimulationTasks( + sim, + simulationMode, + conditionFileName, + conditionFilePath, + parameterFileName, + parameterFilePath, + resultFileName, + resultPath, + &loadBalancer, + computeInnerParameters); loadBalancer.terminate(); loadBalancer.sendTerminationSignalToAllWorkers(); } else { @@ -857,16 +863,17 @@ runSimulator(MultiConditionDataProvider& dp, } } else { #endif - status = runSimulationTasks(sim, - simulationMode, - conditionFileName, - conditionFilePath, - parameterFileName, - parameterFilePath, - resultFileName, - resultPath, - nullptr, - computeInnerParameters); + status = runSimulationTasks( + sim, + simulationMode, + conditionFileName, + conditionFilePath, + parameterFileName, + parameterFilePath, + resultFileName, + resultPath, + nullptr, + computeInnerParameters); #ifdef PARPE_ENABLE_MPI } #endif @@ -874,5 +881,4 @@ runSimulator(MultiConditionDataProvider& dp, return status; } - } // namespace parpe diff --git a/src/parpecommon/costFunction.cpp b/src/parpecommon/costFunction.cpp index 4beab181c..62823a5ac 100644 --- a/src/parpecommon/costFunction.cpp +++ b/src/parpecommon/costFunction.cpp @@ -1,7 +1,3 @@ #include -namespace parpe { - - - -} // namespace parpe +namespace parpe {} // namespace parpe diff --git a/src/parpecommon/functions.cpp b/src/parpecommon/functions.cpp index a9bb41675..ddd5e4fa1 100644 --- a/src/parpecommon/functions.cpp +++ b/src/parpecommon/functions.cpp @@ -3,8 +3,9 @@ namespace parpe { FunctionEvaluationStatus GradientFunction::evaluate( - gsl::span parameters, double &fval, - gsl::span gradient) const { + gsl::span parameters, + double& fval, + gsl::span gradient) const { return evaluate(parameters, fval, gradient, nullptr, nullptr); } diff --git a/src/parpecommon/hdf5Misc.cpp b/src/parpecommon/hdf5Misc.cpp index a8b1ed4cd..35ab99fad 100644 --- a/src/parpecommon/hdf5Misc.cpp +++ b/src/parpecommon/hdf5Misc.cpp @@ -15,22 +15,22 @@ #include #include #include -#include -#include #include +#include +#include #ifdef __has_include -# if __has_include() -# include -using std::filesystem::path; +#if __has_include() +#include using std::filesystem::create_directories; -# elif __has_include() -# include -using std::experimental::filesystem::path; +using std::filesystem::path; +#elif __has_include() +#include using std::experimental::filesystem::create_directories; -# else -# error "Missing " -# endif +using std::experimental::filesystem::path; +#else +#error "Missing " +#endif #else #error "Missing " #endif @@ -49,96 +49,123 @@ void initHDF5Mutex() { H5dont_atexit(); } -std::unique_lock hdf5MutexGetLock() -{ +std::unique_lock hdf5MutexGetLock() { return std::unique_lock(mutexHdf); } -herr_t hdf5ErrorStackWalker_cb(unsigned int n, const H5E_error_t *err_desc, - void* /*client_data*/) { +herr_t hdf5ErrorStackWalker_cb( + unsigned int n, + H5E_error_t const* err_desc, + void* /*client_data*/) { Ensures(err_desc != nullptr); constexpr int indent = 2; [[maybe_unused]] auto lock = hdf5MutexGetLock(); - std::unique_ptr - maj_str { H5Eget_major(err_desc->maj_num), &std::free }; - std::unique_ptr - min_str { H5Eget_minor(err_desc->min_num), &std::free }; - - logmessage(loglevel::critical, "%*s#%03d: %s line %u in %s(): %s", indent, "", - n, err_desc->file_name, err_desc->line, err_desc->func_name, - err_desc->desc); - logmessage(loglevel::critical, "%*smajor(%02d): %s", indent * 2, "", - err_desc->maj_num, maj_str.get()); - logmessage(loglevel::critical, "%*sminor(%02d): %s", indent * 2, "", - err_desc->min_num, min_str.get()); + std::unique_ptr maj_str{ + H5Eget_major(err_desc->maj_num), &std::free}; + std::unique_ptr min_str{ + H5Eget_minor(err_desc->min_num), &std::free}; + + logmessage( + loglevel::critical, + "%*s#%03d: %s line %u in %s(): %s", + indent, + "", + n, + err_desc->file_name, + err_desc->line, + err_desc->func_name, + err_desc->desc); + logmessage( + loglevel::critical, + "%*smajor(%02d): %s", + indent * 2, + "", + err_desc->maj_num, + maj_str.get()); + logmessage( + loglevel::critical, + "%*sminor(%02d): %s", + indent * 2, + "", + err_desc->min_num, + min_str.get()); return 0; } - -bool hdf5GroupExists(H5::H5File const& file, const std::string &groupName) -{ +bool hdf5GroupExists(H5::H5File const& file, std::string const& groupName) { std::lock_guard lock(mutexHdf); // switch off error handler, check existence and re-enable H5_SAVE_ERROR_HANDLER; - herr_t status = H5Gget_objinfo(file.getId(), groupName.c_str(), false, nullptr); + herr_t status = + H5Gget_objinfo(file.getId(), groupName.c_str(), false, nullptr); H5_RESTORE_ERROR_HANDLER; return status >= 0; } -void hdf5EnsureGroupExists(H5::H5File const& file, - const std::string &groupName) { +void hdf5EnsureGroupExists( + H5::H5File const& file, + std::string const& groupName) { if (!hdf5GroupExists(file, groupName)) { hdf5CreateGroup(file, groupName, true); } } -void hdf5CreateGroup(const H5::H5File &file, const std::string &groupPath, bool recursively) -{ +void hdf5CreateGroup( + const H5::H5File& file, + std::string const& groupPath, + bool recursively) { std::lock_guard lock(mutexHdf); // requires HDF5 >=1.10.6, so needs some C here // groupCreationPropertyList.setCreateIntermediateGroup(recursively); auto groupCreationPropertyListTmp = H5Pcreate(H5P_LINK_CREATE); H5Pset_create_intermediate_group(groupCreationPropertyListTmp, recursively); - H5::LinkCreatPropList groupCreationPropertyList(groupCreationPropertyListTmp); + H5::LinkCreatPropList groupCreationPropertyList( + groupCreationPropertyListTmp); try { - auto group = file.createGroup(groupPath.c_str(), groupCreationPropertyList); - } catch (H5::Exception const&) { - throw HDF5Exception("Failed to create group in hdf5CreateGroup:" + - groupPath); + auto group = + file.createGroup(groupPath.c_str(), groupCreationPropertyList); + } catch (H5::Exception const&) { + throw HDF5Exception( + "Failed to create group in hdf5CreateGroup:" + groupPath); } } -void hdf5CreateExtendableDouble2DArray(const H5::H5File &file, - const std::string &datasetPath, - hsize_t stride) -{ +void hdf5CreateExtendableDouble2DArray( + const H5::H5File& file, + std::string const& datasetPath, + hsize_t stride) { constexpr int rank = 2; - hsize_t initialDimensions[2] {stride, 0}; - hsize_t maximumDimensions[2] {stride, H5S_UNLIMITED}; + hsize_t initialDimensions[2]{stride, 0}; + hsize_t maximumDimensions[2]{stride, H5S_UNLIMITED}; std::lock_guard lock(mutexHdf); H5::DataSpace dataspace(rank, initialDimensions, maximumDimensions); // need chunking for extendable dataset - hsize_t chunkDimensions[2] {stride, 1}; + hsize_t chunkDimensions[2]{stride, 1}; H5::DSetCreatPropList dSetCreatPropList; dSetCreatPropList.setChunk(rank, chunkDimensions); - auto dataset = file.createDataSet(datasetPath.c_str(), H5::PredType::NATIVE_DOUBLE, - dataspace, dSetCreatPropList); + auto dataset = file.createDataSet( + datasetPath.c_str(), + H5::PredType::NATIVE_DOUBLE, + dataspace, + dSetCreatPropList); } -void hdf5Extend2ndDimensionAndWriteToDouble2DArray(const H5::H5File &file, const std::string &datasetPath, gsl::span buffer) -{ +void hdf5Extend2ndDimensionAndWriteToDouble2DArray( + const H5::H5File& file, + std::string const& datasetPath, + gsl::span buffer) { std::lock_guard lock(mutexHdf); auto dataset = file.openDataSet(datasetPath.c_str()); @@ -147,10 +174,12 @@ void hdf5Extend2ndDimensionAndWriteToDouble2DArray(const H5::H5File &file, const auto filespace = dataset.getSpace(); int rank = filespace.getSimpleExtentNdims(); if (rank != 2) { - throw HDF5Exception("Failed to write data in " - "hdf5Extend2ndDimensionAndWriteToDouble2DArray: " - "not of rank 2 (%d) when writing %s", - rank, datasetPath.c_str()); + throw HDF5Exception( + "Failed to write data in " + "hdf5Extend2ndDimensionAndWriteToDouble2DArray: " + "not of rank 2 (%d) when writing %s", + rank, + datasetPath.c_str()); } // extend @@ -159,23 +188,24 @@ void hdf5Extend2ndDimensionAndWriteToDouble2DArray(const H5::H5File &file, const Expects(buffer.size() == currentDimensions[0]); - hsize_t newDimensions[2] {currentDimensions[0], currentDimensions[1] + 1}; + hsize_t newDimensions[2]{currentDimensions[0], currentDimensions[1] + 1}; dataset.extend(newDimensions); filespace = dataset.getSpace(); - hsize_t offset[2] {0, currentDimensions[1]}; - hsize_t slabsize[2] {currentDimensions[0], 1}; + hsize_t offset[2]{0, currentDimensions[1]}; + hsize_t slabsize[2]{currentDimensions[0], 1}; filespace.selectHyperslab(H5S_SELECT_SET, slabsize, offset); H5::DataSpace memspace(rank, slabsize); - dataset.write(buffer.data(), H5::PredType::NATIVE_DOUBLE, memspace, filespace); + dataset.write( + buffer.data(), H5::PredType::NATIVE_DOUBLE, memspace, filespace); } -void hdf5Extend3rdDimensionAndWriteToDouble3DArray(const H5::H5File &file, - std::string const& datasetPath, - gsl::span buffer) -{ +void hdf5Extend3rdDimensionAndWriteToDouble3DArray( + const H5::H5File& file, + std::string const& datasetPath, + gsl::span buffer) { std::lock_guard lock(mutexHdf); auto dataset = file.openDataSet(datasetPath.c_str()); @@ -188,27 +218,27 @@ void hdf5Extend3rdDimensionAndWriteToDouble3DArray(const H5::H5File &file, hsize_t currentDimensions[3]; filespace.getSimpleExtentDims(currentDimensions); - hsize_t newDimensions[3] {currentDimensions[0], - currentDimensions[1], - currentDimensions[2] + 1}; + hsize_t newDimensions[3]{ + currentDimensions[0], currentDimensions[1], currentDimensions[2] + 1}; dataset.extend(newDimensions); filespace = dataset.getSpace(); - hsize_t offset[3] {0, 0, currentDimensions[2]}; - hsize_t slabsize[3] {currentDimensions[0], currentDimensions[1], 1}; + hsize_t offset[3]{0, 0, currentDimensions[2]}; + hsize_t slabsize[3]{currentDimensions[0], currentDimensions[1], 1}; filespace.selectHyperslab(H5S_SELECT_SET, slabsize, offset); H5::DataSpace memspace(rank, slabsize); - dataset.write(buffer.data(), H5::PredType::NATIVE_DOUBLE, memspace, filespace); + dataset.write( + buffer.data(), H5::PredType::NATIVE_DOUBLE, memspace, filespace); } -void hdf5CreateOrExtendAndWriteToDouble2DArray(const H5::H5File &file, - const std::string &parentPath, - const std::string &datasetName, - gsl::span buffer) -{ +void hdf5CreateOrExtendAndWriteToDouble2DArray( + const H5::H5File& file, + std::string const& parentPath, + std::string const& datasetName, + gsl::span buffer) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); hdf5EnsureGroupExists(file, parentPath); @@ -216,20 +246,20 @@ void hdf5CreateOrExtendAndWriteToDouble2DArray(const H5::H5File &file, auto fullDatasetPath = parentPath + "/" + datasetName; if (!file.nameExists(fullDatasetPath.c_str())) { - hdf5CreateExtendableDouble2DArray( - file, fullDatasetPath, buffer.size()); + hdf5CreateExtendableDouble2DArray(file, fullDatasetPath, buffer.size()); } hdf5Extend2ndDimensionAndWriteToDouble2DArray( - file, fullDatasetPath, buffer); + file, fullDatasetPath, buffer); } -void hdf5CreateOrExtendAndWriteToDouble3DArray(const H5::H5File &file, - const std::string &parentPath, - const std::string &datasetName, - gsl::span buffer, - hsize_t stride1, - hsize_t stride2) { +void hdf5CreateOrExtendAndWriteToDouble3DArray( + const H5::H5File& file, + std::string const& parentPath, + std::string const& datasetName, + gsl::span buffer, + hsize_t stride1, + hsize_t stride2) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); hdf5EnsureGroupExists(file, parentPath); @@ -238,20 +268,18 @@ void hdf5CreateOrExtendAndWriteToDouble3DArray(const H5::H5File &file, if (!file.nameExists(fullDatasetPath.c_str())) { hdf5CreateExtendableDouble3DArray( - file, fullDatasetPath, stride1, stride2); + file, fullDatasetPath, stride1, stride2); } - hdf5Extend3rdDimensionAndWriteToDouble3DArray(file, - fullDatasetPath, - buffer); - + hdf5Extend3rdDimensionAndWriteToDouble3DArray( + file, fullDatasetPath, buffer); } -void hdf5CreateOrExtendAndWriteToInt2DArray(const H5::H5File &file, - const std::string &parentPath, - const std::string &datasetName, - gsl::span buffer) -{ +void hdf5CreateOrExtendAndWriteToInt2DArray( + const H5::H5File& file, + std::string const& parentPath, + std::string const& datasetName, + gsl::span buffer) { std::lock_guard lock(mutexHdf); hdf5EnsureGroupExists(file, parentPath); @@ -259,18 +287,16 @@ void hdf5CreateOrExtendAndWriteToInt2DArray(const H5::H5File &file, auto fullDatasetPath = parentPath + "/" + datasetName; if (!file.nameExists(fullDatasetPath.c_str())) { - hdf5CreateExtendableInt2DArray( - file, fullDatasetPath, buffer.size()); + hdf5CreateExtendableInt2DArray(file, fullDatasetPath, buffer.size()); } - hdf5Extend2ndDimensionAndWriteToInt2DArray(file, fullDatasetPath, - buffer); + hdf5Extend2ndDimensionAndWriteToInt2DArray(file, fullDatasetPath, buffer); } -void hdf5Extend2ndDimensionAndWriteToInt2DArray(const H5::H5File &file, - const std::string &datasetPath, - gsl::span buffer) -{ +void hdf5Extend2ndDimensionAndWriteToInt2DArray( + const H5::H5File& file, + std::string const& datasetPath, + gsl::span buffer) { std::lock_guard lock(mutexHdf); auto dataset = file.openDataSet(datasetPath.c_str()); @@ -278,19 +304,19 @@ void hdf5Extend2ndDimensionAndWriteToInt2DArray(const H5::H5File &file, // extend auto filespace = dataset.getSpace(); int rank = filespace.getSimpleExtentNdims(); - if(rank != 2) + if (rank != 2) throw HDF5Exception("Only works for 2D arrays!"); hsize_t currentDimensions[2]; filespace.getSimpleExtentDims(currentDimensions); Expects(buffer.size() == currentDimensions[0]); - hsize_t newDimensions[2] {currentDimensions[0], currentDimensions[1] + 1}; + hsize_t newDimensions[2]{currentDimensions[0], currentDimensions[1] + 1}; dataset.extend(newDimensions); filespace = dataset.getSpace(); - hsize_t offset[2] {0, currentDimensions[1]}; - hsize_t slabsize[2] {currentDimensions[0], 1}; + hsize_t offset[2]{0, currentDimensions[1]}; + hsize_t slabsize[2]{currentDimensions[0], 1}; filespace.selectHyperslab(H5S_SELECT_SET, slabsize, offset); @@ -298,38 +324,40 @@ void hdf5Extend2ndDimensionAndWriteToInt2DArray(const H5::H5File &file, dataset.write(buffer.data(), H5::PredType::NATIVE_INT, memspace, filespace); } -void hdf5CreateExtendableInt2DArray(const H5::H5File &file, - const std::string &datasetPath, - hsize_t stride) -{ +void hdf5CreateExtendableInt2DArray( + const H5::H5File& file, + std::string const& datasetPath, + hsize_t stride) { std::lock_guard lock(mutexHdf); constexpr int rank = 2; - hsize_t initialDimensions[rank] {stride, 0}; - hsize_t maximumDimensions[rank] {stride, H5S_UNLIMITED}; + hsize_t initialDimensions[rank]{stride, 0}; + hsize_t maximumDimensions[rank]{stride, H5S_UNLIMITED}; H5::DataSpace dataspace(rank, initialDimensions, maximumDimensions); // need chunking for extendable dataset - hsize_t chunkDimensions[2] {stride, 1}; + hsize_t chunkDimensions[2]{stride, 1}; H5::DSetCreatPropList datasetCreationProperty; datasetCreationProperty.setChunk(rank, chunkDimensions); Expects(H5Tget_size(H5T_NATIVE_INT) == sizeof(int)); - auto dataset = file.createDataSet(datasetPath.c_str(), H5::PredType::NATIVE_INT, - dataspace, datasetCreationProperty); - + auto dataset = file.createDataSet( + datasetPath.c_str(), + H5::PredType::NATIVE_INT, + dataspace, + datasetCreationProperty); } -void hdf5CreateExtendableDouble3DArray(const H5::H5File &file, - const std::string &datasetPath, - hsize_t stride1, - hsize_t stride2) -{ +void hdf5CreateExtendableDouble3DArray( + const H5::H5File& file, + std::string const& datasetPath, + hsize_t stride1, + hsize_t stride2) { constexpr int rank = 3; - hsize_t initialDimensions[rank] {stride1, stride2, 0}; - hsize_t maximumDimensions[rank] {stride1, stride2, H5S_UNLIMITED}; + hsize_t initialDimensions[rank]{stride1, stride2, 0}; + hsize_t maximumDimensions[rank]{stride1, stride2, H5S_UNLIMITED}; std::lock_guard lock(mutexHdf); @@ -340,62 +368,68 @@ void hdf5CreateExtendableDouble3DArray(const H5::H5File &file, H5::DSetCreatPropList datasetCreationProperty; datasetCreationProperty.setChunk(rank, chunkDimensions); - auto dataset = file.createDataSet(datasetPath.c_str(), H5::PredType::NATIVE_DOUBLE, - dataspace, datasetCreationProperty); -} - -void hdf5Read2DDoubleHyperslab(const H5::H5File &file, - const std::string &path, - hsize_t size0, - hsize_t size1, - hsize_t offset0, - hsize_t offset1, - gsl::span buffer) -{ + auto dataset = file.createDataSet( + datasetPath.c_str(), + H5::PredType::NATIVE_DOUBLE, + dataspace, + datasetCreationProperty); +} + +void hdf5Read2DDoubleHyperslab( + const H5::H5File& file, + std::string const& path, + hsize_t size0, + hsize_t size1, + hsize_t offset0, + hsize_t offset1, + gsl::span buffer) { Expects(buffer.size() == size0 * size1); std::lock_guard lock(mutexHdf); auto dataset = file.openDataSet(path.c_str()); auto dataspace = dataset.getSpace(); - hsize_t offset[] {offset0, offset1}; - hsize_t count[] {size0, size1}; + hsize_t offset[]{offset0, offset1}; + hsize_t count[]{size0, size1}; - const int ndims = dataspace.getSimpleExtentNdims(); + int const ndims = dataspace.getSimpleExtentNdims(); RELEASE_ASSERT(ndims == 2 && "Only works for 2D arrays!", ""); hsize_t dims[ndims]; dataspace.getSimpleExtentDims(dims); // printf("%lld %lld, %lld %lld, %lld %lld\n", dims[0], dims[1], offset0, // offset1, size0, size1); - RELEASE_ASSERT(dims[0] >= offset0 && dims[0] >= size0, - "Offset larger than dataspace dimensions!"); - RELEASE_ASSERT(dims[1] >= offset1 && dims[1] >= size1, - "Offset larger than dataspace dimensions!"); + RELEASE_ASSERT( + dims[0] >= offset0 && dims[0] >= size0, + "Offset larger than dataspace dimensions!"); + RELEASE_ASSERT( + dims[1] >= offset1 && dims[1] >= size1, + "Offset larger than dataspace dimensions!"); dataspace.selectHyperslab(H5S_SELECT_SET, count, offset); H5::DataSpace memspace(2, count); - dataset.read(buffer.data(), H5::PredType::NATIVE_DOUBLE, memspace, dataspace); + dataset.read( + buffer.data(), H5::PredType::NATIVE_DOUBLE, memspace, dataspace); } -std::vector hdf5Read1DIntegerHyperslab(H5::H5File const& file, - std::string const& path, - hsize_t count, - hsize_t offset) -{ +std::vector hdf5Read1DIntegerHyperslab( + H5::H5File const& file, + std::string const& path, + hsize_t count, + hsize_t offset) { std::lock_guard lock(mutexHdf); H5::DataSet dataset = file.openDataSet(path.c_str()); H5::DataSpace filespace = dataset.getSpace(); - const int ndims = filespace.getSimpleExtentNdims(); + int const ndims = filespace.getSimpleExtentNdims(); RELEASE_ASSERT(ndims == 1, "Only works for 1D arrays!"); hsize_t length; filespace.getSimpleExtentDims(&length); - RELEASE_ASSERT(length >= offset, - "Offset larger than dataspace dimensions!"); + RELEASE_ASSERT( + length >= offset, "Offset larger than dataspace dimensions!"); filespace.selectHyperslab(H5S_SELECT_SET, &count, &offset); @@ -407,27 +441,27 @@ std::vector hdf5Read1DIntegerHyperslab(H5::H5File const& file, return buffer; } -std::vector hdf5Read2DIntegerHyperslab(const H5::H5File &file, - std::string const& path, - hsize_t size0, - hsize_t size1, - hsize_t offset0, - hsize_t offset1) -{ +std::vector hdf5Read2DIntegerHyperslab( + const H5::H5File& file, + std::string const& path, + hsize_t size0, + hsize_t size1, + hsize_t offset0, + hsize_t offset1) { std::lock_guard lock(mutexHdf); H5::DataSet dataset = file.openDataSet(path.c_str()); H5::DataSpace filespace = dataset.getSpace(); - const int ndims = filespace.getSimpleExtentNdims(); + int const ndims = filespace.getSimpleExtentNdims(); RELEASE_ASSERT(ndims == 2, "Only works for 2D arrays!"); hsize_t dims[ndims]; filespace.getSimpleExtentDims(dims); hsize_t offset[] = {offset0, offset1}; hsize_t count[] = {size0, size1}; - if(offset0 >= dims[0] || size0 > dims[0] || offset1 >= dims[1] - || size1 > dims[1]) { + if (offset0 >= dims[0] || size0 > dims[0] || offset1 >= dims[1] || + size1 > dims[1]) { std::stringstream ss; ss << "Offset larger than dataspace dimensions! " << "dims: " << dims[0] << "," << dims[1] << " offsets: " << offset0 << "," << offset1 @@ -446,59 +480,64 @@ std::vector hdf5Read2DIntegerHyperslab(const H5::H5File &file, return buffer; } -void hdf5Read3DDoubleHyperslab(H5::H5File const& file, - std::string const& path, - hsize_t size0, - hsize_t size1, - hsize_t size2, - hsize_t offset0, - hsize_t offset1, - hsize_t offset2, - gsl::span buffer) -{ +void hdf5Read3DDoubleHyperslab( + H5::H5File const& file, + std::string const& path, + hsize_t size0, + hsize_t size1, + hsize_t size2, + hsize_t offset0, + hsize_t offset1, + hsize_t offset2, + gsl::span buffer) { std::lock_guard lock(mutexHdf); - const int rank = 3; + int const rank = 3; auto dataset = file.openDataSet(path.c_str()); auto dataspace = dataset.getSpace(); hsize_t offset[] = {offset0, offset1, offset2}; hsize_t count[] = {size0, size1, size2}; - const int ndims = dataspace.getSimpleExtentNdims(); + int const ndims = dataspace.getSimpleExtentNdims(); RELEASE_ASSERT(ndims == rank, "Only works for 3D arrays!"); hsize_t dims[ndims]; dataspace.getSimpleExtentDims(dims); - RELEASE_ASSERT(dims[0] >= offset0 && dims[0] >= size0, - "Offset larger than dataspace dimensions!"); - RELEASE_ASSERT(dims[1] >= offset1 && dims[1] >= size1, - "Offset larger than dataspace dimensions!"); - RELEASE_ASSERT(dims[2] >= offset2 && dims[2] >= size2, - "Offset larger than dataspace dimensions!"); + RELEASE_ASSERT( + dims[0] >= offset0 && dims[0] >= size0, + "Offset larger than dataspace dimensions!"); + RELEASE_ASSERT( + dims[1] >= offset1 && dims[1] >= size1, + "Offset larger than dataspace dimensions!"); + RELEASE_ASSERT( + dims[2] >= offset2 && dims[2] >= size2, + "Offset larger than dataspace dimensions!"); dataspace.selectHyperslab(H5S_SELECT_SET, count, offset); H5::DataSpace memspace(rank, count); - dataset.read(buffer.data(), H5::PredType::NATIVE_DOUBLE, memspace, dataspace); -} - - -std::vector hdf5Get3DDoubleHyperslab(const H5::H5File &file, const std::string &path, - hsize_t size0, hsize_t size1, - hsize_t size2, - hsize_t offset0, hsize_t offset1, - hsize_t offset2) -{ + dataset.read( + buffer.data(), H5::PredType::NATIVE_DOUBLE, memspace, dataspace); +} + +std::vector hdf5Get3DDoubleHyperslab( + const H5::H5File& file, + std::string const& path, + hsize_t size0, + hsize_t size1, + hsize_t size2, + hsize_t offset0, + hsize_t offset1, + hsize_t offset2) { std::vector buffer(size0 * size1 * size2); - hdf5Read3DDoubleHyperslab(file, path, size0, size1, size2, - offset0, offset1, offset2, buffer); + hdf5Read3DDoubleHyperslab( + file, path, size0, size1, size2, offset0, offset1, offset2, buffer); return buffer; } - -bool hdf5AttributeExists(const H5::H5File &file, - const std::string &datasetPath, - const std::string &attributeName) -{ +bool hdf5AttributeExists( + const H5::H5File& file, + std::string const& datasetPath, + std::string const& attributeName) { std::lock_guard lock(mutexHdf); int exists = false; @@ -516,28 +555,30 @@ bool hdf5AttributeExists(const H5::H5File &file, return exists; } -void hdf5WriteStringAttribute(const H5::H5File &file, - const std::string &datasetPath, - const std::string &attributeName, - const std::string &attributeValue) -{ +void hdf5WriteStringAttribute( + const H5::H5File& file, + std::string const& datasetPath, + std::string const& attributeName, + std::string const& attributeValue) { std::lock_guard lock(mutexHdf); int ret = H5LTset_attribute_string( - file.getId(), datasetPath.c_str(), - attributeName.c_str(), attributeValue.c_str()); - if(ret < 0) - throw HDF5Exception("Unable to write attribute %s on %s", - datasetPath.c_str(), attributeName.c_str()); -} - -H5::H5File hdf5CreateFile(const std::string &filename, - bool overwrite) -{ + file.getId(), + datasetPath.c_str(), + attributeName.c_str(), + attributeValue.c_str()); + if (ret < 0) + throw HDF5Exception( + "Unable to write attribute %s on %s", + datasetPath.c_str(), + attributeName.c_str()); +} + +H5::H5File hdf5CreateFile(std::string const& filename, bool overwrite) { // Create parent folders path dirname(filename); dirname.remove_filename(); - if(!dirname.empty()) + if (!dirname.empty()) create_directories(dirname); std::lock_guard lock(mutexHdf); @@ -546,81 +587,81 @@ H5::H5File hdf5CreateFile(const std::string &filename, struct stat st = {}; bool fileExists = stat(filename.c_str(), &st) == 0; - if(fileExists) + if (fileExists) throw HDF5Exception("Result file exists " + filename); } try { return H5::H5File(filename.c_str(), H5F_ACC_TRUNC); - } catch (H5::Exception const& e) { + } catch (H5::Exception const& e) { printBacktrace(); - throw HDF5Exception("hdf5CreateFile: Failed to create file %s. " - "Is this file opened by another process?", - filename.c_str()); + throw HDF5Exception( + "hdf5CreateFile: Failed to create file %s. " + "Is this file opened by another process?", + filename.c_str()); } } - -void hdf5GetDatasetDimensions(const H5::H5File &file, - const std::string &path, - hsize_t nDimsExpected, - int *d1, - int *d2, - int *d3, - int *d4) -{ +void hdf5GetDatasetDimensions( + const H5::H5File& file, + std::string const& path, + hsize_t nDimsExpected, + int* d1, + int* d2, + int* d3, + int* d4) { std::lock_guard lock(mutexHdf); H5_SAVE_ERROR_HANDLER; auto dataset = file.openDataSet(path.c_str()); auto dataspace = dataset.getSpace(); - const int nDimsActual = dataspace.getSimpleExtentNdims(); - if(nDimsActual != (signed)nDimsExpected) - throw HDF5Exception("Dataset rank (" + std::to_string(nDimsActual) - + ") does not match nDims argument (" - + std::to_string(nDimsExpected) + ")"); + int const nDimsActual = dataspace.getSimpleExtentNdims(); + if (nDimsActual != (signed)nDimsExpected) + throw HDF5Exception( + "Dataset rank (" + std::to_string(nDimsActual) + + ") does not match nDims argument (" + + std::to_string(nDimsExpected) + ")"); hsize_t dims[nDimsExpected]; dataspace.getSimpleExtentDims(dims); - if(nDimsExpected > 0 && d1) + if (nDimsExpected > 0 && d1) *d1 = dims[0]; - if(nDimsExpected > 1 && d2) + if (nDimsExpected > 1 && d2) *d2 = dims[1]; - if(nDimsExpected > 2 && d3) + if (nDimsExpected > 2 && d3) *d3 = dims[2]; - if(nDimsExpected > 3 && d4) + if (nDimsExpected > 3 && d4) *d4 = dims[3]; H5_RESTORE_ERROR_HANDLER; } -HDF5Exception::HDF5Exception(std::string msg) : msg(std::move(msg)) { +HDF5Exception::HDF5Exception(std::string msg) + : msg(std::move(msg)) { stackTrace = getBacktrace(20); } -HDF5Exception::HDF5Exception(const char *format, ...) -{ +HDF5Exception::HDF5Exception(char const* format, ...) { va_list argptr; - va_start(argptr,format); + va_start(argptr, format); size_t needed = vsnprintf(nullptr, 0, format, argptr) + 1; char buf[needed]; va_end(argptr); - va_start(argptr,format); + va_start(argptr, format); vsprintf(buf, format, argptr); va_end(argptr); msg = buf; } -const char *HDF5Exception::what() const noexcept { return msg.c_str(); } - - +char const* HDF5Exception::what() const noexcept { return msg.c_str(); } -void hdf5CreateExtendableString1DArray(const H5::H5File &file, const std::string &datasetPath) -{ +void hdf5CreateExtendableString1DArray( + const H5::H5File& file, + std::string const& datasetPath) { int rank = 1; hsize_t initialDimensions[1] = {0}; hsize_t maximumDimensions[1] = {H5S_UNLIMITED}; @@ -635,16 +676,18 @@ void hdf5CreateExtendableString1DArray(const H5::H5File &file, const std::string datasetCreationProperty.setChunk(rank, chunkDimensions); H5::StrType strType(0, H5T_VARIABLE); - Expects(H5T_STRING == H5Tget_class(strType.getId()) - && H5Tis_variable_str(strType.getId())); + Expects( + H5T_STRING == H5Tget_class(strType.getId()) && + H5Tis_variable_str(strType.getId())); - file.createDataSet(datasetPath.c_str(), strType, - dataspace, datasetCreationProperty); + file.createDataSet( + datasetPath.c_str(), strType, dataspace, datasetCreationProperty); } -void hdf5ExtendAndWriteToString1DArray(const H5::H5File &file, const std::string &datasetPath, - const std::string &buffer) -{ +void hdf5ExtendAndWriteToString1DArray( + const H5::H5File& file, + std::string const& datasetPath, + std::string const& buffer) { std::lock_guard lock(mutexHdf); auto dataset = file.openDataSet(datasetPath.c_str()); @@ -652,7 +695,7 @@ void hdf5ExtendAndWriteToString1DArray(const H5::H5File &file, const std::string // extend auto filespace = dataset.getSpace(); int rank = filespace.getSimpleExtentNdims(); - if(rank != 1) + if (rank != 1) throw HDF5Exception("Only works for 1D arrays!"); hsize_t currentDimensions[1]; @@ -671,11 +714,11 @@ void hdf5ExtendAndWriteToString1DArray(const H5::H5File &file, const std::string dataset.write(buffer, strType, memspace, filespace); } -void hdf5CreateOrExtendAndWriteToString1DArray(const H5::H5File &file, - const std::string &parentPath, - const std::string &datasetName, - const std::string &buffer) -{ +void hdf5CreateOrExtendAndWriteToString1DArray( + const H5::H5File& file, + std::string const& parentPath, + std::string const& datasetName, + std::string const& buffer) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); hdf5EnsureGroupExists(file, parentPath); @@ -689,8 +732,7 @@ void hdf5CreateOrExtendAndWriteToString1DArray(const H5::H5File &file, hdf5ExtendAndWriteToString1DArray(file, fullDatasetPath, buffer); } -H5::H5File hdf5OpenForReading(const std::string &hdf5Filename) -{ +H5::H5File hdf5OpenForReading(std::string const& hdf5Filename) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); H5_SAVE_ERROR_HANDLER; @@ -699,19 +741,19 @@ H5::H5File hdf5OpenForReading(const std::string &hdf5Filename) H5_RESTORE_ERROR_HANDLER; return file; } catch (...) { - logmessage(loglevel::critical, - "failed to open HDF5 file '%s'.", - hdf5Filename.c_str()); + logmessage( + loglevel::critical, + "failed to open HDF5 file '%s'.", + hdf5Filename.c_str()); printBacktrace(20); - H5Ewalk2(H5E_DEFAULT, H5E_WALK_DOWNWARD, hdf5ErrorStackWalker_cb, - nullptr); + H5Ewalk2( + H5E_DEFAULT, H5E_WALK_DOWNWARD, hdf5ErrorStackWalker_cb, nullptr); H5_RESTORE_ERROR_HANDLER; throw HDF5Exception("Unable to open HDF5 file " + hdf5Filename); } } -H5::H5File hdf5OpenForAppending(const std::string &hdf5Filename) -{ +H5::H5File hdf5OpenForAppending(std::string const& hdf5Filename) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); H5::H5File file; @@ -731,19 +773,19 @@ H5::H5File hdf5OpenForAppending(const std::string &hdf5Filename) } std::vector hdf5Read1dStringDataset( - H5::H5File const& file, const std::string &datasetPath) -{ + H5::H5File const& file, + std::string const& datasetPath) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); auto dataset = file.openDataSet(datasetPath.c_str()); auto filespace = dataset.getSpace(); - const int ndims = filespace.getSimpleExtentNdims(); + int const ndims = filespace.getSimpleExtentNdims(); RELEASE_ASSERT(ndims == 1, "Only works for 1D arrays!"); auto dtype = dataset.getDataType(); auto native_type = H5Tget_native_type(dtype.getId(), H5T_DIR_DEFAULT); H5::StrType tid1(0, H5T_VARIABLE); - if(!H5Tequal(native_type, tid1.getId())) + if (!H5Tequal(native_type, tid1.getId())) throw HDF5Exception("Data type mismatch"); hsize_t length; @@ -752,33 +794,36 @@ std::vector hdf5Read1dStringDataset( dataset.read((void*)buffer.data(), dtype); std::vector strBuffer(buffer.size()); - for(int i = 0; i < (int) buffer.size(); ++i) { + for (int i = 0; i < (int)buffer.size(); ++i) { strBuffer[i] = buffer[i]; } return strBuffer; } void hdf5Write1dStringDataset( - const H5::H5File &file, const std::string &parentPath, - const std::string &datasetPath, std::vector const& buffer) -{ + const H5::H5File& file, + std::string const& parentPath, + std::string const& datasetPath, + std::vector const& buffer) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); - const int dims = 1; + int const dims = 1; hsize_t dims0 = buffer.size(); H5::DataSpace sid1(dims, &dims0); H5::StrType tid1(0, H5T_VARIABLE); - RELEASE_ASSERT(H5T_STRING == H5Tget_class(tid1.getId()) - || !H5Tis_variable_str(tid1.getId()), "String type failure."); + RELEASE_ASSERT( + H5T_STRING == H5Tget_class(tid1.getId()) || + !H5Tis_variable_str(tid1.getId()), + "String type failure."); hdf5EnsureGroupExists(file, parentPath); std::string fullpath(parentPath + "/" + datasetPath); auto dataset = file.createDataSet(fullpath.c_str(), tid1, sid1); // we need character pointers - std::vector charPtrBuffer(buffer.size()); - for(int i = 0; i < (int) buffer.size(); ++i) { + std::vector charPtrBuffer(buffer.size()); + for (int i = 0; i < (int)buffer.size(); ++i) { charPtrBuffer[i] = buffer[i].c_str(); } dataset.write((void*)charPtrBuffer.data(), tid1); diff --git a/src/parpecommon/logging.cpp b/src/parpecommon/logging.cpp index 471773511..4a851d2f9 100644 --- a/src/parpecommon/logging.cpp +++ b/src/parpecommon/logging.cpp @@ -1,17 +1,17 @@ #include -#include #include // getMpiActive +#include -#include -#include -#include #include +#include #include #include -#include +#include +#include #include #include +#include #ifdef PARPE_ENABLE_MPI #include @@ -19,11 +19,11 @@ namespace parpe { -const char *loglevelShortStr[] = {"", "CRI", "ERR", "WRN", "INF", "DBG"}; +char const* loglevelShortStr[] = {"", "CRI", "ERR", "WRN", "INF", "DBG"}; loglevel minimumLogLevel = loglevel::debug; -static void printlogmessage(loglevel lvl, const char *message); +static void printlogmessage(loglevel lvl, char const* message); -std::string printfToString(const char *fmt, va_list ap) { +std::string printfToString(char const* fmt, va_list ap) { // Get size of string va_list ap_count; va_copy(ap_count, ap); @@ -32,14 +32,13 @@ std::string printfToString(const char *fmt, va_list ap) { ++size; // actual formatting - auto buf = std::make_unique(size); + auto buf = std::make_unique(size); size = vsnprintf(buf.get(), size, fmt, ap); return std::string(buf.get(), size); } -void logmessage(loglevel lvl, const char *format, ...) -{ +void logmessage(loglevel lvl, char const* format, ...) { va_list argptr; va_start(argptr, format); auto str = printfToString(format, argptr); @@ -47,18 +46,16 @@ void logmessage(loglevel lvl, const char *format, ...) logmessage(lvl, str); } -void logmessage(loglevel lvl, const char *format, va_list argptr) { +void logmessage(loglevel lvl, char const* format, va_list argptr) { logmessage(lvl, printfToString(format, argptr)); } -void logProcessStats() -{ +void logProcessStats() { std::ifstream file("/proc/self/status"); if (file.is_open()) { std::string line; while (std::getline(file, line)) { - if(line.rfind("Vm", 0) == 0 - || line.rfind("Rss", 0) == 0) { + if (line.rfind("Vm", 0) == 0 || line.rfind("Rss", 0) == 0) { logmessage(loglevel::debug, line); } } @@ -70,7 +67,7 @@ void printMPIInfo() { #ifdef PARPE_ENABLE_MPI int mpiActive = getMpiActive(); - if(mpiActive) { + if (mpiActive) { int mpiCommSize, mpiRank; MPI_Comm_size(MPI_COMM_WORLD, &mpiCommSize); MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); @@ -79,8 +76,12 @@ void printMPIInfo() { int procNameLen; MPI_Get_processor_name(procName, &procNameLen); - logmessage(loglevel::debug, "Rank %d/%d running on %s.", mpiRank, - mpiCommSize, procName); + logmessage( + loglevel::debug, + "Rank %d/%d running on %s.", + mpiRank, + mpiCommSize, + procName); } else { logmessage(loglevel::debug, "MPI not initialized."); } @@ -89,34 +90,33 @@ void printMPIInfo() { #endif } - void printDebugInfoAndWait(int seconds) { - //int i = 0; + // int i = 0; char hostname[256]; gethostname(hostname, sizeof(hostname)); - logmessage(loglevel::debug, - "PID %d on %s ready for attach (will wait for %ds)", getpid(), - hostname, seconds); + logmessage( + loglevel::debug, + "PID %d on %s ready for attach (will wait for %ds)", + getpid(), + hostname, + seconds); fflush(stdout); - //while (0 == i) + // while (0 == i) sleep(seconds); } -void logmessage(loglevel lvl, const std::string &msg) -{ +void logmessage(loglevel lvl, std::string const& msg) { std::stringstream ss(msg); std::string line; - while(std::getline(ss, line, '\n')) + while (std::getline(ss, line, '\n')) printlogmessage(lvl, line.c_str()); } -void printlogmessage(loglevel lvl, const char *message) -{ - if(minimumLogLevel < lvl) +void printlogmessage(loglevel lvl, char const* message) { + if (minimumLogLevel < lvl) return; - // TODO: fileLogLevel, consoleLogLevel // Coloring switch (lvl) { @@ -150,14 +150,14 @@ void printlogmessage(loglevel lvl, const char *message) #ifdef PARPE_ENABLE_MPI auto mpiActive = getMpiActive(); - if(mpiActive) { + if (mpiActive) { MPI_Comm_size(MPI_COMM_WORLD, &mpiCommSize); MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank); } char procName[MPI_MAX_PROCESSOR_NAME]; procName[0] = '\0'; - if(mpiActive) { + if (mpiActive) { int procNameLen; MPI_Get_processor_name(procName, &procNameLen); } @@ -166,10 +166,14 @@ void printlogmessage(loglevel lvl, const char *message) #endif std::ostringstream thread_id_oss; thread_id_oss << std::this_thread::get_id(); - auto thread_id {thread_id_oss.str()}; - - printf("[%*d:%s/%s] ", 1 + static_cast(log10(mpiCommSize)), - mpiRank, thread_id.c_str(), procName ); + auto thread_id{thread_id_oss.str()}; + + printf( + "[%*d:%s/%s] ", + 1 + static_cast(log10(mpiCommSize)), + mpiRank, + thread_id.c_str(), + procName); printf("%s", message); printf("%s\n", ANSI_COLOR_RESET); @@ -182,36 +186,34 @@ void printlogmessage(loglevel lvl, const char *message) default: break; } - } -Logger::Logger(std::string prefix) : prefix(std::move(prefix)) {} +Logger::Logger(std::string prefix) + : prefix(std::move(prefix)) {} -std::unique_ptr Logger::getChild(const std::string &appendedPrefix) const { +std::unique_ptr +Logger::getChild(std::string const& appendedPrefix) const { return std::make_unique(prefix + appendedPrefix); } -void Logger::logmessage(loglevel lvl, const std::string &msg) const { +void Logger::logmessage(loglevel lvl, std::string const& msg) const { parpe::logmessage(lvl, "[" + prefix + "] " + msg); } -void Logger::logmessage(loglevel lvl, const char *format, ...) const { +void Logger::logmessage(loglevel lvl, char const* format, ...) const { va_list argptr; va_start(argptr, format); logmessage(lvl, format, argptr); va_end(argptr); } -void Logger::logmessage(loglevel lvl, const char *format, va_list argptr) const { +void Logger::logmessage(loglevel lvl, char const* format, va_list argptr) + const { logmessage(lvl, printfToString(format, argptr)); } -void Logger::setPrefix(const std::string &pre) { - prefix = pre; -} +void Logger::setPrefix(std::string const& pre) { prefix = pre; } -const std::string &Logger::getPrefix() const { - return prefix; -} +std::string const& Logger::getPrefix() const { return prefix; } } // namespace parpe diff --git a/src/parpecommon/misc.cpp b/src/parpecommon/misc.cpp index c09ea370c..bd8f8f553 100644 --- a/src/parpecommon/misc.cpp +++ b/src/parpecommon/misc.cpp @@ -1,18 +1,18 @@ -#include #include +#include -#include +#include #include #include -#include +#include // getenv #include -#include -#include // dladdr #include // __cxa_demangle -#include -#include // getenv -#include +#include // dladdr +#include #include +#include +#include +#include #include @@ -20,10 +20,9 @@ #include #endif - namespace parpe { -void strFormatCurrentLocaltime(gsl::span buffer, const char *format) { +void strFormatCurrentLocaltime(gsl::span buffer, char const* format) { time_t current_time; struct tm local_time; time(¤t_time); @@ -33,18 +32,17 @@ void strFormatCurrentLocaltime(gsl::span buffer, const char *format) { } void printBacktrace(int nMaxFrames) { - void *array[nMaxFrames]; + void* array[nMaxFrames]; auto size = backtrace(array, nMaxFrames); backtrace_symbols_fd(array, size, STDERR_FILENO); } -std::string getBacktrace(int nMaxFrames) -{ +std::string getBacktrace(int nMaxFrames) { std::ostringstream oss; - void *callstack[nMaxFrames]; + void* callstack[nMaxFrames]; int nFrames = backtrace(callstack, nMaxFrames); - auto symbols = std::unique_ptr{ + auto symbols = std::unique_ptr{ backtrace_symbols(callstack, nFrames), free}; char buf[1024]; @@ -52,21 +50,31 @@ std::string getBacktrace(int nMaxFrames) Dl_info info; if (dladdr(callstack[i], &info) && info.dli_sname) { auto demangled = - std::unique_ptr{nullptr, free}; + std::unique_ptr{nullptr, free}; int status = -1; if (info.dli_sname[0] == '_') - demangled.reset( - abi::__cxa_demangle(info.dli_sname,nullptr, nullptr, - &status)); - snprintf(buf, sizeof(buf), "%-3d %*p %s + %td\n", i, - int(2 + sizeof(void*) * 2), callstack[i], - status == 0 ? demangled.get() : - info.dli_sname == nullptr ? - symbols.get()[i] : info.dli_sname, - (char *)callstack[i] - (char *)info.dli_saddr); + demangled.reset(abi::__cxa_demangle( + info.dli_sname, nullptr, nullptr, &status)); + snprintf( + buf, + sizeof(buf), + "%-3d %*p %s + %td\n", + i, + int(2 + sizeof(void*) * 2), + callstack[i], + status == 0 ? demangled.get() + : info.dli_sname == nullptr ? symbols.get()[i] + : info.dli_sname, + (char*)callstack[i] - (char*)info.dli_saddr); } else { - snprintf(buf, sizeof(buf), "%-3d %*p %s\n", - i, int(2 + sizeof(void*) * 2), callstack[i], symbols.get()[i]); + snprintf( + buf, + sizeof(buf), + "%-3d %*p %s\n", + i, + int(2 + sizeof(void*) * 2), + callstack[i], + symbols.get()[i]); } oss << buf; } @@ -74,7 +82,6 @@ std::string getBacktrace(int nMaxFrames) oss << "[truncated]\n"; return oss.str(); - } double randDouble(double min, double max) { @@ -84,26 +91,28 @@ double randDouble(double min, double max) { return dis(gen); } -void fillArrayRandomDoubleIndividualInterval(gsl::span min, - gsl::span max, - gsl::span buffer) { +void fillArrayRandomDoubleIndividualInterval( + gsl::span min, + gsl::span max, + gsl::span buffer) { Expects(min.size() == max.size()); Expects(min.size() == buffer.size()); - std::transform(min.begin(), min.end(), max.begin(), buffer.begin(), - randDouble); + std::transform( + min.begin(), min.end(), max.begin(), buffer.begin(), randDouble); } -void fillArrayRandomDoubleSameInterval(double min, double max, - gsl::span buffer) { +void fillArrayRandomDoubleSameInterval( + double min, + double max, + gsl::span buffer) { std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<> dis(min, max); - std::generate(buffer.begin(), buffer.end(), [&]{ return dis(gen); }); + std::generate(buffer.begin(), buffer.end(), [&] { return dis(gen); }); } - int getMpiRank() { int mpiRank = -1; #ifdef PARPE_ENABLE_MPI @@ -126,13 +135,12 @@ int getMpiCommSize() { return mpiCommSize; } -int getMpiActive() -{ +int getMpiActive() { #ifdef PARPE_ENABLE_MPI int result = 0; MPI_Initialized(&result); - if(!result) + if (!result) return false; MPI_Finalized(&result); @@ -142,13 +150,9 @@ int getMpiActive() #endif } -void CpuTimer::reset() -{ - start = roundStart = clock(); -} +void CpuTimer::reset() { start = roundStart = clock(); } -double CpuTimer::getRound() -{ +double CpuTimer::getRound() { auto now = clock(); auto timeRound = static_cast(now - roundStart) / CLOCKS_PER_SEC; roundStart = now; @@ -156,51 +160,43 @@ double CpuTimer::getRound() return timeRound; } -double CpuTimer::getTotal() const -{ +double CpuTimer::getTotal() const { auto now = clock(); return (double)(now - start) / CLOCKS_PER_SEC; } -WallTimer::WallTimer() -{ - reset(); -} +WallTimer::WallTimer() { reset(); } -void WallTimer::reset() -{ +void WallTimer::reset() { roundStart = start = std::chrono::system_clock::now(); } -double WallTimer::getRound() -{ - std::chrono::duration duration = (std::chrono::system_clock::now() - roundStart); +double WallTimer::getRound() { + std::chrono::duration duration = + (std::chrono::system_clock::now() - roundStart); roundStart = std::chrono::system_clock::now(); return duration.count(); } -double WallTimer::getTotal() const -{ - std::chrono::duration duration = (std::chrono::system_clock::now() - start); +double WallTimer::getTotal() const { + std::chrono::duration duration = + (std::chrono::system_clock::now() - start); return duration.count(); } -void finalizeMpiIfNeeded() -{ +void finalizeMpiIfNeeded() { #ifdef PARPE_ENABLE_MPI - if(parpe::getMpiActive()) + if (parpe::getMpiActive()) MPI_Finalize(); #endif } -bool almostEqual(double a, double b) -{ +bool almostEqual(double a, double b) { if (std::isnan(a) && std::isnan(b)) return true; - return std::fabs(a - b) < (std::fabs(a) + std::fabs(b)) - * std::numeric_limits::epsilon(); + return std::fabs(a - b) < (std::fabs(a) + std::fabs(b)) * + std::numeric_limits::epsilon(); } - } // namespace parpe diff --git a/src/parpecommon/model.cpp b/src/parpecommon/model.cpp index 2fee63ce4..bf026a722 100644 --- a/src/parpecommon/model.cpp +++ b/src/parpecommon/model.cpp @@ -4,35 +4,40 @@ namespace parpe { - -template -void Model::evaluate(gsl::span parameters, const std::vector &features, std::vector &outputs) const { +template +void Model::evaluate( + gsl::span parameters, + std::vector const& features, + std::vector& outputs) const { auto unusedGrad = std::vector>(); evaluate(parameters, features, outputs, unusedGrad); - } -void LinearModel::evaluate(gsl::span parameters, const std::vector > &features, std::vector &outputs, std::vector > &outputGradients) const { +void LinearModel::evaluate( + gsl::span parameters, + std::vector> const& features, + std::vector& outputs, + std::vector>& outputGradients) const { - const int numObservations = features.size(); - const int numFeatures = features[0].size(); - const int numParams = numFeatures + 1; - const int idxOffset = numParams - 1; + int const numObservations = features.size(); + int const numFeatures = features[0].size(); + int const numParams = numFeatures + 1; + int const idxOffset = numParams - 1; - for(int i = 0; i < numObservations; ++i) { + for (int i = 0; i < numObservations; ++i) { outputs[i] = 0.0; - for(int j = 0; j < numFeatures; ++j) { + for (int j = 0; j < numFeatures; ++j) { outputs[i] += features[i][j] * parameters[j]; } outputs[i] += parameters[idxOffset]; } - if(!outputGradients.empty()) { + if (!outputGradients.empty()) { // Simplify: [A, 1.0] // for each observation - for(int i = 0; i < numObservations; ++i) { + for (int i = 0; i < numObservations; ++i) { // for each parameter - for(int j = 0; j < numParams - 1; ++j) { + for (int j = 0; j < numParams - 1; ++j) { outputGradients[i][j] = 0.0; // for each feature outputGradients[i][j] += features[i][j]; @@ -42,39 +47,40 @@ void LinearModel::evaluate(gsl::span parameters, const std::vector } } -FunctionEvaluationStatus LinearModelMSE::evaluate(gsl::span parameters, - std::vector dataIndices, - double &fval, - gsl::span gradient, - Logger * /*logger*/, double * /*cpuTime*/) const -{ +FunctionEvaluationStatus LinearModelMSE::evaluate( + gsl::span parameters, + std::vector dataIndices, + double& fval, + gsl::span gradient, + Logger* /*logger*/, + double* /*cpuTime*/) const { int numDatasets = dataIndices.size(); // get data for indices std::vector> data(numDatasets); - for(int i = 0; (unsigned) i < data.size(); ++i) + for (int i = 0; (unsigned)i < data.size(); ++i) data[i] = datasets[dataIndices[i]]; // evaluate std::vector outputs(numDatasets); - std::vector> - outputGradients(numDatasets, - std::vector(numParameters(), NAN)); + std::vector> outputGradients( + numDatasets, std::vector(numParameters(), NAN)); lm.evaluate(parameters, data, outputs, outputGradients); // compute MSE fval = 0.0; - for(int i = 0; i < numDatasets; ++i) { + for (int i = 0; i < numDatasets; ++i) { fval += std::pow(labels[dataIndices[i]] - outputs[i], 2.0); } fval /= numDatasets; // and MSE gradient - if(!gradient.empty()) { - for(int p = 0; p < numParameters(); ++p) { + if (!gradient.empty()) { + for (int p = 0; p < numParameters(); ++p) { gradient[p] = 0.0; - for(int i = 0; i < numDatasets; ++i) { - gradient[p] += -2.0 * outputGradients[i][p] * (labels[dataIndices[i]] - outputs[i]); + for (int i = 0; i < numDatasets; ++i) { + gradient[p] += -2.0 * outputGradients[i][p] * + (labels[dataIndices[i]] - outputs[i]); } gradient[p] /= numDatasets; } diff --git a/src/parpecommon/parpeException.cpp b/src/parpecommon/parpeException.cpp index 72f161f6a..3f4d9b7be 100644 --- a/src/parpecommon/parpeException.cpp +++ b/src/parpecommon/parpeException.cpp @@ -2,10 +2,12 @@ namespace parpe { -ParPEException::ParPEException(const char *message) : message(message) {} +ParPEException::ParPEException(char const* message) + : message(message) {} -ParPEException::ParPEException(std::string message) : message(std::move(message)) {} +ParPEException::ParPEException(std::string message) + : message(std::move(message)) {} -const char *ParPEException::what() const noexcept { return message.c_str(); } +char const* ParPEException::what() const noexcept { return message.c_str(); } } // namespace parpe diff --git a/src/parpeloadbalancer/loadBalancerMaster.cpp b/src/parpeloadbalancer/loadBalancerMaster.cpp index e72e50144..6bef86b14 100644 --- a/src/parpeloadbalancer/loadBalancerMaster.cpp +++ b/src/parpeloadbalancer/loadBalancerMaster.cpp @@ -3,13 +3,13 @@ #ifdef PARPE_ENABLE_MPI #include -#include #include +#include #include #include -//#define MASTER_QUEUE_H_SHOW_COMMUNICATION 1 +// #define MASTER_QUEUE_H_SHOW_COMMUNICATION 1 namespace parpe { @@ -24,7 +24,7 @@ void LoadBalancerMaster::run() { int mpiCommSize; MPI_Comm_size(mpiComm, &mpiCommSize); - if(mpiCommSize <= 2) { + if (mpiCommSize <= 2) { // crashes otherwise throw std::runtime_error("Need at least 2 MPI processes!"); } @@ -48,19 +48,15 @@ void LoadBalancerMaster::run() { isRunning_ = true; } -LoadBalancerMaster::~LoadBalancerMaster() -{ +LoadBalancerMaster::~LoadBalancerMaster() { terminate(); sem_destroy(&semQueue); } #ifndef QUEUE_MASTER_TEST -void LoadBalancerMaster::assertMpiActive() { - Expects(getMpiActive()); -} +void LoadBalancerMaster::assertMpiActive() { Expects(getMpiActive()); } #endif - void LoadBalancerMaster::loadBalancerThreadRun() { // dispatch queued work packages @@ -68,8 +64,9 @@ void LoadBalancerMaster::loadBalancerThreadRun() { int freeWorkerIndex = NO_FREE_WORKER; // empty send queue while there are free workers - while(queue_thread_continue_ && (freeWorkerIndex = getNextFreeWorkerIndex()) >= 0 - && sendQueuedJob(freeWorkerIndex)) {} + while (queue_thread_continue_ && + (freeWorkerIndex = getNextFreeWorkerIndex()) >= 0 && + sendQueuedJob(freeWorkerIndex)) {} // check if any job finished handleFinishedJobs(); @@ -83,15 +80,21 @@ void LoadBalancerMaster::freeEmptiedSendBuffers() { while (true) { int emptiedBufferIdx = MPI_UNDEFINED; int anySendCompleted = 0; - MPI_Testany(sendRequests.size(), sendRequests.data(), &emptiedBufferIdx, - &anySendCompleted, MPI_STATUS_IGNORE); - - if (anySendCompleted && emptiedBufferIdx != MPI_UNDEFINED - && sentJobsData[emptiedBufferIdx]) { - /* By the time we check for send to be finished, we might have received the reply - * already and the pointed-to object might have been already destroyed. This - * is therefore set to nullptr when receiving the reply. */ - std::vector().swap(sentJobsData[emptiedBufferIdx]->sendBuffer); + MPI_Testany( + sendRequests.size(), + sendRequests.data(), + &emptiedBufferIdx, + &anySendCompleted, + MPI_STATUS_IGNORE); + + if (anySendCompleted && emptiedBufferIdx != MPI_UNDEFINED && + sentJobsData[emptiedBufferIdx]) { + /* By the time we check for send to be finished, we might have + * received the reply already and the pointed-to object might have + * been already destroyed. This is therefore set to nullptr when + * receiving the reply. */ + std::vector().swap( + sentJobsData[emptiedBufferIdx]->sendBuffer); } else { break; } @@ -106,15 +109,15 @@ int LoadBalancerMaster::handleFinishedJobs() { // check for waiting incoming message MPI_Status status; int messageWaiting = 0; - MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpiComm, - &messageWaiting, &status); + MPI_Iprobe( + MPI_ANY_SOURCE, MPI_ANY_TAG, mpiComm, &messageWaiting, &status); if (messageWaiting) { // some job is finished, process that finishedWorkerIdx = handleReply(&status); // directly send new work if available - if(sendQueuedJob(finishedWorkerIdx)) + if (sendQueuedJob(finishedWorkerIdx)) finishedWorkerIdx = NO_FREE_WORKER; // not free anymore } else { // there was nothing to be finished @@ -133,11 +136,11 @@ int LoadBalancerMaster::getNextFreeWorkerIndex() { return NO_FREE_WORKER; } -JobData *LoadBalancerMaster::getNextJob() { +JobData* LoadBalancerMaster::getNextJob() { std::unique_lock lock(mutexQueue); - JobData *nextJob = nullptr; + JobData* nextJob = nullptr; if (!queue.empty()) { nextJob = queue.front(); queue.pop(); @@ -146,7 +149,7 @@ JobData *LoadBalancerMaster::getNextJob() { return nextJob; } -void LoadBalancerMaster::sendToWorker(int workerIdx, JobData *data) { +void LoadBalancerMaster::sendToWorker(int workerIdx, JobData* data) { Expects(workerIdx >= 0); Expects(workerIdx < numWorkers); @@ -156,17 +159,26 @@ void LoadBalancerMaster::sendToWorker(int workerIdx, JobData *data) { int workerRank = workerIdx + 1; #ifdef MASTER_QUEUE_H_SHOW_COMMUNICATION - printf("\x1b[31mSending job #%d to rank %d (%luB).\x1b[0m\n", tag, workerRank, data->sendBuffer.size()); + printf( + "\x1b[31mSending job #%d to rank %d (%luB).\x1b[0m\n", + tag, + workerRank, + data->sendBuffer.size()); #endif - MPI_Isend(data->sendBuffer.data(), data->sendBuffer.size(), mpiJobDataType, - workerRank, tag, - mpiComm, &sendRequests[workerIdx]); + MPI_Isend( + data->sendBuffer.data(), + data->sendBuffer.size(), + mpiJobDataType, + workerRank, + tag, + mpiComm, + &sendRequests[workerIdx]); sem_post(&semQueue); } -void LoadBalancerMaster::queueJob(JobData *data) { +void LoadBalancerMaster::queueJob(JobData* data) { RELEASE_ASSERT(isRunning_, "Can't queue job while not running."); sem_wait(&semQueue); @@ -181,10 +193,13 @@ void LoadBalancerMaster::queueJob(JobData *data) { queue.push(data); #ifdef MASTER_QUEUE_H_SHOW_COMMUNICATION - int size = sizeof(*data) + data->sendBuffer.size() + data->recvBuffer.size(); - printf("\x1b[33mQueued job with size %dB. New queue length is %d.\x1b[0m\n", size, queue.size()); + int size = + sizeof(*data) + data->sendBuffer.size() + data->recvBuffer.size(); + printf( + "\x1b[33mQueued job with size %dB. New queue length is %d.\x1b[0m\n", + size, + queue.size()); #endif - } void LoadBalancerMaster::terminate() { @@ -197,10 +212,10 @@ void LoadBalancerMaster::terminate() { queueThread.join(); } -int LoadBalancerMaster::handleReply(MPI_Status *mpiStatus) { +int LoadBalancerMaster::handleReply(MPI_Status* mpiStatus) { int workerIdx = mpiStatus->MPI_SOURCE - 1; - JobData *data = sentJobsData[workerIdx]; + JobData* data = sentJobsData[workerIdx]; sentJobsData[workerIdx] = nullptr; // allocate memory for result @@ -209,33 +224,42 @@ int LoadBalancerMaster::handleReply(MPI_Status *mpiStatus) { data->recvBuffer.resize(lenRecvBuffer); #ifdef MASTER_QUEUE_H_SHOW_COMMUNICATION - printf("\x1b[32mReceiving result for job %d from %d (%luB)\x1b[0m\n", - mpiStatus->MPI_TAG, mpiStatus->MPI_SOURCE, data->recvBuffer.size()); + printf( + "\x1b[32mReceiving result for job %d from %d (%luB)\x1b[0m\n", + mpiStatus->MPI_TAG, + mpiStatus->MPI_SOURCE, + data->recvBuffer.size()); #endif // receive - MPI_Recv(data->recvBuffer.data(), data->recvBuffer.size(), mpiJobDataType, - mpiStatus->MPI_SOURCE, mpiStatus->MPI_TAG, mpiComm, - MPI_STATUS_IGNORE); + MPI_Recv( + data->recvBuffer.data(), + data->recvBuffer.size(), + mpiJobDataType, + mpiStatus->MPI_SOURCE, + mpiStatus->MPI_TAG, + mpiComm, + MPI_STATUS_IGNORE); workerIsBusy[workerIdx] = false; #ifdef MASTER_QUEUE_H_SHOW_COMMUNICATION - printf("\x1b[32mReceived result for job %d from %d\x1b[0m\n", - mpiStatus->MPI_TAG, mpiStatus->MPI_SOURCE); + printf( + "\x1b[32mReceived result for job %d from %d\x1b[0m\n", + mpiStatus->MPI_TAG, + mpiStatus->MPI_SOURCE); #endif // user-provided callback if specified - if(data->callbackJobFinished) + if (data->callbackJobFinished) data->callbackJobFinished(data); - // signal job done std::unique_lock lock; - if(data->jobDoneChangedMutex) { + if (data->jobDoneChangedMutex) { lock = std::unique_lock(*data->jobDoneChangedMutex); } - if(data->jobDone) + if (data->jobDone) ++(*data->jobDone); if (data->jobDoneChangedCondition) { data->jobDoneChangedCondition->notify_all(); @@ -243,12 +267,11 @@ int LoadBalancerMaster::handleReply(MPI_Status *mpiStatus) { return workerIdx; } -bool LoadBalancerMaster::sendQueuedJob(int freeWorkerIndex) -{ +bool LoadBalancerMaster::sendQueuedJob(int freeWorkerIndex) { if (freeWorkerIndex < 0) return false; - JobData *currentQueueElement = getNextJob(); + JobData* currentQueueElement = getNextJob(); if (currentQueueElement) { sendToWorker(freeWorkerIndex, currentQueueElement); @@ -271,16 +294,14 @@ void LoadBalancerMaster::sendTerminationSignalToAllWorkers() { MPI_Waitall(commSize - 1, reqs, MPI_STATUS_IGNORE); } -bool LoadBalancerMaster::isRunning() const -{ +bool LoadBalancerMaster::isRunning() const { #ifdef MASTER_QUEUE_H_SHOW_COMMUNICATION printf("LoadBalancerMaster::isRunning -> %d\n", isRunning_); #endif return isRunning_; } -int LoadBalancerMaster::getNumQueuedJobs() const -{ +int LoadBalancerMaster::getNumQueuedJobs() const { std::unique_lock lock(mutexQueue); return queue.size(); } diff --git a/src/parpeloadbalancer/loadBalancerWorker.cpp b/src/parpeloadbalancer/loadBalancerWorker.cpp index 04be15886..1567f904f 100644 --- a/src/parpeloadbalancer/loadBalancerWorker.cpp +++ b/src/parpeloadbalancer/loadBalancerWorker.cpp @@ -25,7 +25,8 @@ void LoadBalancerWorker::run(messageHandlerFunc const& messageHandler) { } } -bool LoadBalancerWorker::waitForAndHandleJobs(const messageHandlerFunc& messageHandler) { +bool LoadBalancerWorker::waitForAndHandleJobs( + messageHandlerFunc const& messageHandler) { int rank, err; MPI_Comm_rank(MPI_COMM_WORLD, &rank); #ifdef LOADBALANCERWORKER_REPORT_WAITING_TIME @@ -45,8 +46,14 @@ bool LoadBalancerWorker::waitForAndHandleJobs(const messageHandlerFunc& messageH // receive message int source = 0; - err = MPI_Recv(buffer.data(), msgSize, MPI_BYTE, source, MPI_ANY_TAG, - MPI_COMM_WORLD, &mpiStatus); + err = MPI_Recv( + buffer.data(), + msgSize, + MPI_BYTE, + source, + MPI_ANY_TAG, + MPI_COMM_WORLD, + &mpiStatus); #if QUEUE_WORKER_H_VERBOSE >= 3 printf("W%d: Received job %d\n", rank, mpiStatus.MPI_TAG); @@ -61,7 +68,11 @@ bool LoadBalancerWorker::waitForAndHandleJobs(const messageHandlerFunc& messageH #ifdef LOADBALANCERWORKER_REPORT_WAITING_TIME double endTime = MPI_Wtime(); double waitedSeconds = (endTime - startTime); - logmessage(loglevel::debug, "Message received after waiting %fs.", rank, waitedSeconds); + logmessage( + loglevel::debug, + "Message received after waiting %fs.", + rank, + waitedSeconds); #endif messageHandler(buffer, mpiStatus.MPI_TAG); @@ -69,7 +80,13 @@ bool LoadBalancerWorker::waitForAndHandleJobs(const messageHandlerFunc& messageH #if QUEUE_WORKER_H_VERBOSE >= 2 printf("[%d] Job done, sending results, %dB.\n", rank, msgSize); #endif - MPI_Send(buffer.data(), buffer.size(), MPI_BYTE, 0, mpiStatus.MPI_TAG, MPI_COMM_WORLD); + MPI_Send( + buffer.data(), + buffer.size(), + MPI_BYTE, + 0, + mpiStatus.MPI_TAG, + MPI_COMM_WORLD); return false; } diff --git a/src/parpeoptimization/localOptimizationCeres.cpp b/src/parpeoptimization/localOptimizationCeres.cpp index 0ff76b7ba..293304078 100644 --- a/src/parpeoptimization/localOptimizationCeres.cpp +++ b/src/parpeoptimization/localOptimizationCeres.cpp @@ -1,14 +1,15 @@ #include +#include +#include #include #include #include -#include -#include #include -// !! Don't use. Leads to race conditions. Also: unable to assign sinks to specific ceres instances. +// !! Don't use. Leads to race conditions. Also: unable to assign sinks to +// specific ceres instances. #undef PARPE_CERES_MINIGLOG_REDIRECT #ifdef PARPE_CERES_MINIGLOG_REDIRECT #include @@ -16,24 +17,25 @@ namespace parpe { -void setCeresOption(const std::pair &pair, ceres::GradientProblemSolver::Options* options); +void setCeresOption( + std::pair const& pair, + ceres::GradientProblemSolver::Options* options); #ifdef PARPE_CERES_MINIGLOG_REDIRECT /** * @brief LogSinkAdapter redirectsceres miniglog output to logging.cpp. */ class LogSinkAdapter : public google::LogSink { -public: - LogSinkAdapter() { - id = counter++; - } - void send(google::LogSeverity severity, - const char* full_filename, - const char* base_filename, - int line, - const struct tm* tm_time, - const char* message, - size_t message_len) override { + public: + LogSinkAdapter() { id = counter++; } + void send( + google::LogSeverity severity, + char const* full_filename, + char const* base_filename, + int line, + const struct tm* tm_time, + char const* message, + size_t message_len) override { // Map log levels loglevel lvl = loglevel::info; switch (severity) { @@ -52,11 +54,10 @@ class LogSinkAdapter : public google::LogSink { } parpe::logmessage(lvl, "ceres #%d: %s", id, message); - } void WaitTillSent() override {} -private: + private: /** count instantiations */ static int counter; /** prefix for logging output to identifiy concurrent optimizer runs */ @@ -67,16 +68,22 @@ int LogSinkAdapter::counter = 0; #endif /** - * @brief Adapter class for parpe::OptimizationProblem and ceres::FirstOrderFunction + * @brief Adapter class for parpe::OptimizationProblem and + * ceres::FirstOrderFunction */ class MyCeresFirstOrderFunction : public ceres::FirstOrderFunction { public: - MyCeresFirstOrderFunction(OptimizationProblem *problem, OptimizationReporter *reporter) - : problem(problem), reporter(reporter) { + MyCeresFirstOrderFunction( + OptimizationProblem* problem, + OptimizationReporter* reporter) + : problem(problem) + , reporter(reporter) { numParameters = problem->cost_fun_->numParameters(); - // bounds are not natively supported by CERES; a naive check is currently implemented which fails function evaluation if parameters are out of bounds + // bounds are not natively supported by CERES; a naive check is + // currently implemented which fails function evaluation if parameters + // are out of bounds parametersMin.resize(numParameters); problem->fillParametersMin(parametersMin); parametersMax.resize(numParameters); @@ -90,27 +97,29 @@ class MyCeresFirstOrderFunction : public ceres::FirstOrderFunction { * @param gradient If not NULL, evaluate gradient * @return true on success, false otherwise */ - bool Evaluate(const double *parameters, double *cost, - double *gradient) const override { + bool Evaluate(double const* parameters, double* cost, double* gradient) + const override { // Naive bounds check: report failure if not within - if(!withinBounds(numParameters, parameters, - parametersMin.data(), parametersMax.data())) + if (!withinBounds( + numParameters, + parameters, + parametersMin.data(), + parametersMax.data())) return false; - auto result = reporter->evaluate(gsl::make_span(parameters, numParameters), - *cost, - gsl::make_span(gradient, gradient?numParameters:0)); + auto result = reporter->evaluate( + gsl::make_span(parameters, numParameters), + *cost, + gsl::make_span(gradient, gradient ? numParameters : 0)); return result == functionEvaluationSuccess; } - int NumParameters() const override { - return numParameters; - } + int NumParameters() const override { return numParameters; } private: - OptimizationProblem *problem; + OptimizationProblem* problem; int numParameters = 0; // non-owning @@ -120,21 +129,22 @@ class MyCeresFirstOrderFunction : public ceres::FirstOrderFunction { std::vector parametersMax; }; - /** * @brief Callback functor for to be called between ceres iterations */ class MyIterationCallback : public ceres::IterationCallback { public: // Non-owning - explicit MyIterationCallback(OptimizationReporter *reporter) : reporter(reporter) {} + explicit MyIterationCallback(OptimizationReporter* reporter) + : reporter(reporter) {} ceres::CallbackReturnType - operator()(const ceres::IterationSummary &summary) override { + operator()(ceres::IterationSummary const& summary) override { // TODO: print here - int status = reporter->iterationFinished(gsl::span(), summary.cost, gsl::span()); + int status = reporter->iterationFinished( + gsl::span(), summary.cost, gsl::span()); switch (status) { case 0: return ceres::SOLVER_CONTINUE; @@ -144,27 +154,28 @@ class MyIterationCallback : public ceres::IterationCallback { } private: - OptimizationReporter *reporter = nullptr; + OptimizationReporter* reporter = nullptr; }; - -ceres::GradientProblemSolver::Options getCeresOptions( - OptimizationProblem *problem) { +ceres::GradientProblemSolver::Options +getCeresOptions(OptimizationProblem* problem) { ceres::GradientProblemSolver::Options options; // don't: use vlog which we can redirect more easily - //options.minimizer_progress_to_stdout = + // options.minimizer_progress_to_stdout = // problem->getOptimizationOptions().printToStdout; options.max_num_iterations = problem->getOptimizationOptions().maxOptimizerIterations; - problem->getOptimizationOptions().for_each(setCeresOption, &options); + problem->getOptimizationOptions() + .for_each( + setCeresOption, &options); return options; } - -std::tuple > OptimizerCeres::optimize(OptimizationProblem *problem) { +std::tuple> +OptimizerCeres::optimize(OptimizationProblem* problem) { #ifdef PARPE_CERES_MINIGLOG_REDIRECT // Redirect ceres output (actually it's copied; can't remove ceres sink) LogSinkAdapter log; @@ -176,10 +187,12 @@ std::tuple > OptimizerCeres::optimize(Optimizat auto reporter = problem->getReporter(); // GradientProblem takes ownership of - ceres::GradientProblem ceresProblem(new MyCeresFirstOrderFunction(problem, reporter.get())); + ceres::GradientProblem ceresProblem( + new MyCeresFirstOrderFunction(problem, reporter.get())); ceres::GradientProblemSolver::Options options = getCeresOptions(problem); - // Can use reporter from unique_ptr here, since callback will be destroyed first + // Can use reporter from unique_ptr here, since callback will be destroyed + // first MyIterationCallback callback(reporter.get()); options.callbacks.push_back(&callback); @@ -188,9 +201,10 @@ std::tuple > OptimizerCeres::optimize(Optimizat reporter->starting(gsl::span(parameters)); ceres::Solve(options, ceresProblem, parameters.data(), &summary); - reporter->finished(summary.final_cost, - gsl::span(parameters), - summary.termination_type); + reporter->finished( + summary.final_cost, + gsl::span(parameters), + summary.termination_type); // std::cout< > OptimizerCeres::optimize(Optimizat google::RemoveLogSink(&log); // before going out of scope #endif - return std::tuple >(summary.termination_type == ceres::FAILURE || - summary.termination_type == ceres::USER_FAILURE, summary.final_cost, parameters); + return std::tuple>( + summary.termination_type == ceres::FAILURE || + summary.termination_type == ceres::USER_FAILURE, + summary.final_cost, + parameters); } /** @@ -209,66 +226,73 @@ std::tuple > OptimizerCeres::optimize(Optimizat * @param pair key => value pair * @param options */ -void setCeresOption(const std::pair &pair, - ceres::GradientProblemSolver::Options* options) { - const std::string &key = pair.first; - const std::string &val = pair.second; +void setCeresOption( + std::pair const& pair, + ceres::GradientProblemSolver::Options* options) { + std::string const& key = pair.first; + std::string const& val = pair.second; // TODO: set enums from string - if(key == "line_search_direction_type") { + if (key == "line_search_direction_type") { options->line_search_direction_type = - static_cast(std::stoi(val)); - } else if(key == "line_search_type") { + static_cast(std::stoi(val)); + } else if (key == "line_search_type") { options->line_search_type = - static_cast(std::stoi(val)); - } else if(key == "nonlinear_conjugate_gradient_type") { + static_cast(std::stoi(val)); + } else if (key == "nonlinear_conjugate_gradient_type") { options->nonlinear_conjugate_gradient_type = - static_cast(std::stoi(val)); - } else if(key == "max_lbfgs_rank") { + static_cast(std::stoi(val)); + } else if (key == "max_lbfgs_rank") { options->max_lbfgs_rank = std::stoi(val); - } else if(key == "use_approximate_eigenvalue_bfgs_scaling") { + } else if (key == "use_approximate_eigenvalue_bfgs_scaling") { options->use_approximate_eigenvalue_bfgs_scaling = std::stoi(val); - } else if(key == "line_search_interpolation_type") { + } else if (key == "line_search_interpolation_type") { options->line_search_interpolation_type = - static_cast(std::stoi(val)); - } else if(key == "min_line_search_step_size") { + static_cast(std::stoi(val)); + } else if (key == "min_line_search_step_size") { options->min_line_search_step_size = std::stod(val); - } else if(key == "line_search_sufficient_function_decrease") { + } else if (key == "line_search_sufficient_function_decrease") { options->line_search_sufficient_function_decrease = std::stod(val); - } else if(key == "max_line_search_step_contraction") { + } else if (key == "max_line_search_step_contraction") { options->max_line_search_step_contraction = std::stod(val); - } else if(key == "min_line_search_step_contraction") { + } else if (key == "min_line_search_step_contraction") { options->min_line_search_step_contraction = std::stod(val); - } else if(key == "max_num_line_search_step_size_iterations") { + } else if (key == "max_num_line_search_step_size_iterations") { options->max_num_line_search_step_size_iterations = std::stoi(val); - } else if(key == "max_num_line_search_direction_restarts") { + } else if (key == "max_num_line_search_direction_restarts") { options->max_num_line_search_direction_restarts = std::stoi(val); - } else if(key == "line_search_sufficient_curvature_decrease") { + } else if (key == "line_search_sufficient_curvature_decrease") { options->line_search_sufficient_curvature_decrease = std::stod(val); - } else if(key == "max_line_search_step_expansion") { + } else if (key == "max_line_search_step_expansion") { options->max_line_search_step_expansion = std::stod(val); - } else if(key == "max_num_iterations") { + } else if (key == "max_num_iterations") { options->max_num_iterations = std::stoi(val); - } else if(key == "max_solver_time_in_seconds") { + } else if (key == "max_solver_time_in_seconds") { options->max_solver_time_in_seconds = std::stod(val); - } else if(key == "function_tolerance") { + } else if (key == "function_tolerance") { options->function_tolerance = std::stod(val); - } else if(key == "gradient_tolerance") { + } else if (key == "gradient_tolerance") { options->gradient_tolerance = std::stod(val); - } else if(key == "parameter_tolerance") { + } else if (key == "parameter_tolerance") { options->parameter_tolerance = std::stod(val); - } else if(key == "logging_type") { + } else if (key == "logging_type") { options->logging_type = static_cast(std::stoi(val)); - } else if(key == "minimizer_progress_to_stdout") { + } else if (key == "minimizer_progress_to_stdout") { options->minimizer_progress_to_stdout = std::stoi(val); } else { - logmessage(loglevel::warning, "Ignoring unknown optimization option %s.", key.c_str()); + logmessage( + loglevel::warning, + "Ignoring unknown optimization option %s.", + key.c_str()); return; } - logmessage(loglevel::debug, "Set optimization option %s to %s.", key.c_str(), val.c_str()); + logmessage( + loglevel::debug, + "Set optimization option %s to %s.", + key.c_str(), + val.c_str()); } - } // namespace parpe diff --git a/src/parpeoptimization/localOptimizationDlib.cpp b/src/parpeoptimization/localOptimizationDlib.cpp index 66aa04597..06d7a9fee 100644 --- a/src/parpeoptimization/localOptimizationDlib.cpp +++ b/src/parpeoptimization/localOptimizationDlib.cpp @@ -4,10 +4,10 @@ #include #include -#include #include +#include -typedef dlib::matrix column_vector; +typedef dlib::matrix column_vector; typedef dlib::matrix general_matrix; namespace gsl { @@ -17,10 +17,10 @@ template < long num_rows, long num_cols, typename mem_manager, - typename layout - > -span make_span(dlib::matrix const& matrix) { - return span(matrix.begin(), matrix.size()); + typename layout> +span make_span( + dlib::matrix const& matrix) { + return span(matrix.begin(), matrix.size()); } template < @@ -28,26 +28,24 @@ template < long num_rows, long num_cols, typename mem_manager, - typename layout - > -span make_span(dlib::matrix &matrix) { + typename layout> +span +make_span(dlib::matrix& matrix) { return span(matrix.begin(), matrix.size()); } -} +} // namespace gsl namespace parpe { - -column_vector dlibColumnVectorFromDoubleArray(double const *src, int len) { +column_vector dlibColumnVectorFromDoubleArray(double const* src, int len) { column_vector colVec(len); std::copy(src, src + len, colVec.begin()); return colVec; } -std::tuple > -parpe::OptimizerDlibLineSearch::optimize(OptimizationProblem *problem) -{ +std::tuple> +parpe::OptimizerDlibLineSearch::optimize(OptimizationProblem* problem) { int numParams = problem->cost_fun_->numParameters(); column_vector startingPoint(numParams); @@ -59,7 +57,6 @@ parpe::OptimizerDlibLineSearch::optimize(OptimizationProblem *problem) column_vector max(numParams); problem->fillParametersMax(gsl::make_span(startingPoint)); - auto optimizationController = problem->getReporter(); optimizationController->starting(gsl::make_span(startingPoint)); @@ -68,17 +65,15 @@ parpe::OptimizerDlibLineSearch::optimize(OptimizationProblem *problem) dlib::lbfgs_search_strategy(10), dlib::objective_delta_stop_strategy( 1e-9, problem->getOptimizationOptions().maxOptimizerIterations), - [&optimizationController](const column_vector& x){ + [&optimizationController](column_vector const& x) { // objective function double fval = NAN; optimizationController->evaluate( - gsl::make_span(x), - fval, gsl::span(nullptr, 0)); + gsl::make_span(x), fval, gsl::span(nullptr, 0)); return fval; - }, - [&optimizationController](const column_vector& x){ + [&optimizationController](column_vector const& x) { // objective function gradient double fVal = NAN; @@ -87,13 +82,16 @@ parpe::OptimizerDlibLineSearch::optimize(OptimizationProblem *problem) gsl::make_span(x), fVal, gsl::make_span(fGrad)); return fGrad; }, - startingPoint, min, max); + startingPoint, + min, + max); optimizationController->finished( finalFVal, gsl::make_span(startingPoint), 0); return std::make_tuple( - 0, finalFVal, + 0, + finalFVal, std::vector(startingPoint.begin(), startingPoint.end())); } diff --git a/src/parpeoptimization/localOptimizationFides.cpp b/src/parpeoptimization/localOptimizationFides.cpp index bb06d626c..e5b66e23f 100644 --- a/src/parpeoptimization/localOptimizationFides.cpp +++ b/src/parpeoptimization/localOptimizationFides.cpp @@ -16,18 +16,14 @@ using UnalignedUnpadded = CustomVector; namespace gsl { -template -auto -make_span(DynamicVector& dv) -{ +template +auto make_span(DynamicVector& dv) { return gsl::span(dv.data(), dv.size()); } -template -auto -make_span(DynamicVector const& dv) -{ - return gsl::span(dv.data(), dv.size()); +template +auto make_span(DynamicVector const& dv) { + return gsl::span(dv.data(), dv.size()); } } // namespace gsl @@ -35,91 +31,94 @@ make_span(DynamicVector const& dv) namespace parpe { fides::Options -get_optimization_options(OptimizationOptions const& parpe_options) -{ +get_optimization_options(OptimizationOptions const& parpe_options) { fides::Options fides_options; fides_options.maxiter = parpe_options.maxOptimizerIterations; parpe_options.for_each( - [&fides_options](const std::pair& pair, int) { - const std::string& key = pair.first; - const std::string& val = pair.second; - auto &options = fides_options; - if (key == "maxiter") { - options.maxiter = std::stoi(val); - } else if (key == "maxtime") { - options.maxtime = std::chrono::seconds(std::stoi(val)); - } else if (key == "fatol") { - options.fatol = std::stod(val); - } else if (key == "frtol") { - options.frtol = std::stod(val); - } else if (key == "xtol") { - options.xtol = std::stod(val); - } else if (key == "gatol") { - options.gatol = std::stod(val); - } else if (key == "grtol") { - options.grtol = std::stod(val); - } else if (key == "subspace_solver") { - auto result = std::find_if( - fides::subspace_dim_to_str.cbegin(), - fides::subspace_dim_to_str.cend(), - [key](const auto& kv) { return kv.second == key; }); - if (result != fides::subspace_dim_to_str.cend()) - options.subspace_solver = result->first; - else - logmessage(loglevel::warning, - "Invalid value %s provided for option " - "'subspace_solver'. Ignoring.", - val.c_str()); - } else if (key == "stepback_strategy") { - auto result = std::find_if( - fides::step_back_strategy_str.cbegin(), - fides::step_back_strategy_str.cend(), - [key](const auto& kv) { return kv.second == key; }); - if (result != fides::step_back_strategy_str.cend()) - options.stepback_strategy = result->first; - else - logmessage(loglevel::warning, - "Invalid value %s provided for option " - "'stepback_strategy'. Ignoring.", - val.c_str()); - } else if (key == "theta_max") { - options.theta_max = std::stoi(val); - } else if (key == "delta_init") { - options.delta_init = std::stoi(val); - } else if (key == "mu") { - options.mu = std::stoi(val); - } else if (key == "eta") { - options.eta = std::stoi(val); - } else if (key == "gamma1") { - options.gamma1 = std::stoi(val); - } else if (key == "gamma2") { - options.gamma2 = std::stoi(val); - } else if (key == "refine_stepback") { - options.refine_stepback = std::stoi(val); - } else { - logmessage(loglevel::warning, - "Ignoring unknown optimization option %s.", - key.c_str()); - return; - } - - logmessage(loglevel::debug, - "Set optimization option %s to %s.", - key.c_str(), - val.c_str()); - }, - 0); + [&fides_options]( + std::pair const& pair, int) { + std::string const& key = pair.first; + std::string const& val = pair.second; + auto& options = fides_options; + if (key == "maxiter") { + options.maxiter = std::stoi(val); + } else if (key == "maxtime") { + options.maxtime = std::chrono::seconds(std::stoi(val)); + } else if (key == "fatol") { + options.fatol = std::stod(val); + } else if (key == "frtol") { + options.frtol = std::stod(val); + } else if (key == "xtol") { + options.xtol = std::stod(val); + } else if (key == "gatol") { + options.gatol = std::stod(val); + } else if (key == "grtol") { + options.grtol = std::stod(val); + } else if (key == "subspace_solver") { + auto result = std::find_if( + fides::subspace_dim_to_str.cbegin(), + fides::subspace_dim_to_str.cend(), + [key](auto const& kv) { return kv.second == key; }); + if (result != fides::subspace_dim_to_str.cend()) + options.subspace_solver = result->first; + else + logmessage( + loglevel::warning, + "Invalid value %s provided for option " + "'subspace_solver'. Ignoring.", + val.c_str()); + } else if (key == "stepback_strategy") { + auto result = std::find_if( + fides::step_back_strategy_str.cbegin(), + fides::step_back_strategy_str.cend(), + [key](auto const& kv) { return kv.second == key; }); + if (result != fides::step_back_strategy_str.cend()) + options.stepback_strategy = result->first; + else + logmessage( + loglevel::warning, + "Invalid value %s provided for option " + "'stepback_strategy'. Ignoring.", + val.c_str()); + } else if (key == "theta_max") { + options.theta_max = std::stoi(val); + } else if (key == "delta_init") { + options.delta_init = std::stoi(val); + } else if (key == "mu") { + options.mu = std::stoi(val); + } else if (key == "eta") { + options.eta = std::stoi(val); + } else if (key == "gamma1") { + options.gamma1 = std::stoi(val); + } else if (key == "gamma2") { + options.gamma2 = std::stoi(val); + } else if (key == "refine_stepback") { + options.refine_stepback = std::stoi(val); + } else { + logmessage( + loglevel::warning, + "Ignoring unknown optimization option %s.", + key.c_str()); + return; + } + + logmessage( + loglevel::debug, + "Set optimization option %s to %s.", + key.c_str(), + val.c_str()); + }, + 0); return fides_options; } std::tuple> -OptimizerFides::optimize(OptimizationProblem* problem) -{ +OptimizerFides::optimize(OptimizationProblem* problem) { auto reporter = problem->getReporter(); auto numParams = - static_cast(problem->cost_fun_->numParameters()); + static_cast(problem->cost_fun_->numParameters()); DynamicVector x0(numParams); problem->fillInitialParameters(x0); @@ -138,25 +137,26 @@ OptimizerFides::optimize(OptimizationProblem* problem) DynamicVector g(x.size(), NAN); double fval = NAN; problem->cost_fun_->evaluate( - gsl::make_span(x), fval, gsl::make_span(g), nullptr, nullptr); + gsl::make_span(x), fval, gsl::make_span(g), nullptr, nullptr); return std::make_tuple(fval, g, blaze::DynamicMatrix()); }; auto fides_options = - get_optimization_options(problem->getOptimizationOptions()); + get_optimization_options(problem->getOptimizationOptions()); // TODO to config fides::BFGS hessian_approximation; fides::Optimizer optimizer( - fun, lb, ub, fides_options, &hessian_approximation); + fun, lb, ub, fides_options, &hessian_approximation); reporter->starting(x0); auto [fval, x, grad, hess] = optimizer.minimize(x0); reporter->finished(fval, x, static_cast(optimizer.exit_flag_)); - return std::make_tuple(static_cast(optimizer.exit_flag_) <= 0, - fval, - std::vector(x.begin(), x.end())); + return std::make_tuple( + static_cast(optimizer.exit_flag_) <= 0, + fval, + std::vector(x.begin(), x.end())); } } // namespace parpe diff --git a/src/parpeoptimization/localOptimizationFsqp.cpp b/src/parpeoptimization/localOptimizationFsqp.cpp index 482e8bc28..9e88c0606 100644 --- a/src/parpeoptimization/localOptimizationFsqp.cpp +++ b/src/parpeoptimization/localOptimizationFsqp.cpp @@ -1,14 +1,14 @@ #include "localOptimizationFsqp.h" -#include -#include #include +#include #include +#include #include -#include -#include #include +#include +#include extern "C" { #include @@ -16,20 +16,32 @@ extern "C" { } // callback functions types for FFSQP, see below -using objType = void (*) (integer &nparam, integer &j, doublereal *x, doublereal &fj); -using constrType = void (*) (integer &nparam, integer &j, doublereal *x, doublereal &gj); -using gradobType = void (*) (integer &nparam, integer &j, doublereal *x, doublereal *gradfj, doublereal *dummy); -using gradcnType = void (*) (integer &nparam, integer &j, doublereal *x, doublereal *gradgj, doublereal *dummy); - +using objType = + void (*)(integer& nparam, integer& j, doublereal* x, doublereal& fj); +using constrType = + void (*)(integer& nparam, integer& j, doublereal* x, doublereal& gj); +using gradobType = void (*)( + integer& nparam, + integer& j, + doublereal* x, + doublereal* gradfj, + doublereal* dummy); +using gradcnType = void (*)( + integer& nparam, + integer& j, + doublereal* x, + doublereal* gradgj, + doublereal* dummy); extern "C" { -// These two functions are needed for linking by ql0001, so far they were never called though -integer lnblnk_(char *a, ftnlen) { +// These two functions are needed for linking by ql0001, so far they were never +// called though +integer lnblnk_(char* a, ftnlen) { throw std::runtime_error("FSQP: lnblnk_ was called but is not implemented"); return strlen(a); } -int basout_(integer *, integer *, char *, ftnlen) { +int basout_(integer*, integer*, char*, ftnlen) { // TODO: should print? throw std::runtime_error("FSQP: basout_ was called but is not implemented"); @@ -67,26 +79,56 @@ int basout_(integer *, integer *, char *, ftnlen) { * @param gradcn * @return */ -int ffsqp_(integer &nparam, integer &nf, integer &nineqn, - integer &nineq, integer &neqn, integer &neq, integer &mode, integer & - iprint, integer &miter, integer &inform__, doublereal &bigbnd, - doublereal &eps, doublereal &epseqn, doublereal &udelta, doublereal * - bl, doublereal *bu, doublereal *x, doublereal *f, doublereal *g, - integer *iw, integer &iwsize, doublereal *w, integer &nwsize, objType - obj, constrType constr, gradobType gradob, gradcnType gradcn); - -//#include "ffsqp.c" +int ffsqp_( + integer& nparam, + integer& nf, + integer& nineqn, + integer& nineq, + integer& neqn, + integer& neq, + integer& mode, + integer& iprint, + integer& miter, + integer& inform__, + doublereal& bigbnd, + doublereal& eps, + doublereal& epseqn, + doublereal& udelta, + doublereal* bl, + doublereal* bu, + doublereal* x, + doublereal* f, + doublereal* g, + integer* iw, + integer& iwsize, + doublereal* w, + integer& nwsize, + objType obj, + constrType constr, + gradobType gradob, + gradcnType gradcn); + +// #include "ffsqp.c" } - namespace parpe { extern "C" { // FFSQP callback functions -void obj(integer &nparam, integer &j, doublereal *x, doublereal &fj); -void constr (integer &nparam, integer &j, doublereal *x, doublereal &gj); -void gradob (integer &nparam, integer &j, doublereal *x, doublereal *gradfj, doublereal *dummy); -void gradcn (integer &nparam, integer &j, doublereal *x, doublereal *gradgj, doublereal *dummy); +void obj(integer& nparam, integer& j, doublereal* x, doublereal& fj); +void constr(integer& nparam, integer& j, doublereal* x, doublereal& gj); +void gradob( + integer& nparam, + integer& j, + doublereal* x, + doublereal* gradfj, + doublereal* dummy); +void gradcn( + integer& nparam, + integer& j, + doublereal* x, + doublereal* gradgj, + doublereal* dummy); } // FFSQP print level options @@ -111,57 +153,56 @@ enum class informExitStatus { bigbndExceeded }; -// make sure float sizes match; otherwise data must be copied to new containers first +// make sure float sizes match; otherwise data must be copied to new containers +// first static_assert(sizeof(double) == sizeof(doublereal), ""); /** Mutex for managing access to FFSQP routines which are not thread-safe */ using mutexFsqpType = std::recursive_mutex; -static mutexFsqpType mutexFsqp {}; +static mutexFsqpType mutexFsqp{}; -std::unique_lock fsqpGetLock() -{ +std::unique_lock fsqpGetLock() { return std::unique_lock(mutexFsqp); } -InverseUniqueLock fsqpReleaseLock() -{ +InverseUniqueLock fsqpReleaseLock() { return InverseUniqueLock(&mutexFsqp); } - /** * @brief Wrapper for a FFSQP optimization problem. * * An instance of this class is slipped into the FFSQP callback functions. * - * NOTE: FFSQP is not thread-safe, therefore we lock a mutex when passing control to - * FFSQP routines. Not sure if strictly necessary, but in FFSQP code consider - * changing `static` to `static __thread` for non-`fmt_` variables. + * NOTE: FFSQP is not thread-safe, therefore we lock a mutex when passing + * control to FFSQP routines. Not sure if strictly necessary, but in FFSQP code + * consider changing `static` to `static __thread` for non-`fmt_` variables. */ class FsqpProblem { -public: - explicit FsqpProblem(OptimizationProblem *problem) - : problem(problem), - reporter(problem->getReporter()), - nparam(problem->costFun->numParameters()), - miter(problem->getOptimizationOptions().maxOptimizerIterations), - bl(std::vector(nparam)), - bu(std::vector(nparam)), - x(std::vector(nparam)), - f(std::vector(std::max(1L, nf))), - g(std::vector(std::max(1L, nineq + neq))), - iwsize(6 * nparam + 8 * std::max(1L, nineq + neq) + 7 * std::max(1L, nf) + 30), - iw(std::vector(iwsize)) + public: + explicit FsqpProblem(OptimizationProblem* problem) + : problem(problem) + , reporter(problem->getReporter()) + , nparam(problem->costFun->numParameters()) + , miter(problem->getOptimizationOptions().maxOptimizerIterations) + , bl(std::vector(nparam)) + , bu(std::vector(nparam)) + , x(std::vector(nparam)) + , f(std::vector(std::max(1L, nf))) + , g(std::vector(std::max(1L, nineq + neq))) + , iwsize( + 6 * nparam + 8 * std::max(1L, nineq + neq) + + 7 * std::max(1L, nf) + 30) + , iw(std::vector(iwsize)) { - nwsize = 4 * nparam * nparam - + 5 * std::max(1L, nineq + neq) * nparam - + 3 * std::max(1L, nf) * nparam - + 26 * (nparam + std::max(1L, nf)) - + 45 * std::max(1L, nineq + neq) + 100; - - // Reserve one more to hide a pointer to this instance behind the last parameter - // so we can use it in objective and constraint functions + nwsize = 4 * nparam * nparam + 5 * std::max(1L, nineq + neq) * nparam + + 3 * std::max(1L, nf) * nparam + + 26 * (nparam + std::max(1L, nf)) + + 45 * std::max(1L, nineq + neq) + 100; + + // Reserve one more to hide a pointer to this instance behind the last + // parameter so we can use it in objective and constraint functions w.resize(nwsize + 1); // make sure it fits auto thisthis = this; @@ -173,54 +214,89 @@ class FsqpProblem { problem->fillInitialParameters(x); problem->fillParametersMin(bl); problem->fillParametersMax(bu); - } - std::tuple > optimize() - { - if(reporter) + std::tuple> optimize() { + if (reporter) reporter->starting(x); // lock while ffsqp has control auto lock = fsqpGetLock(); - ffsqp_(nparam, nf, nineqn, nineq, neq, neqn, mode, iprint, miter, inform, - bigbnd, eps, epseqn, udelta, bl.data(), bu.data(), - x.data(), f.data(), g.data(), iw.data(), iwsize, &w[1], nwsize, - parpe::obj, parpe::constr, parpe::gradob, parpe::gradcn); - - if(reporter) + ffsqp_( + nparam, + nf, + nineqn, + nineq, + neq, + neqn, + mode, + iprint, + miter, + inform, + bigbnd, + eps, + epseqn, + udelta, + bl.data(), + bu.data(), + x.data(), + f.data(), + g.data(), + iw.data(), + iwsize, + &w[1], + nwsize, + parpe::obj, + parpe::constr, + parpe::gradob, + parpe::gradcn); + + if (reporter) reporter->finished(f[0], x, inform); - std::cout<<"Final cost "< >( - inform, - f[0], - x); + return std::tuple>(inform, f[0], x); } - void obj(integer &nparam, integer &j, doublereal *x, doublereal &fj) { + void obj(integer& nparam, integer& j, doublereal* x, doublereal& fj) { gradientDummy.resize(nparam); - reporter->evaluate(gsl::span(x, nparam), fj, gradientDummy); + reporter->evaluate( + gsl::span(x, nparam), fj, gradientDummy); - std::cout<<"np:"<evaluate(gsl::span(x, nparam), fvalDummy, gsl::span(gradfj, nparam)); - reporter->iterationFinished(gsl::span(x, nparam), fvalDummy, gsl::span(gradfj, nparam)); - std::cout<<"np:"<evaluate( + gsl::span(x, nparam), + fvalDummy, + gsl::span(gradfj, nparam)); + reporter->iterationFinished( + gsl::span(x, nparam), + fvalDummy, + gsl::span(gradfj, nparam)); + std::cout << "np:" << nparam << " j:" << j << " x:" << x[0] + << " gradfj:" << gradfj[0] << std::endl; } - // Once we want contraints: void gradcn (integer &nparam, integer &j, doublereal *x, doublereal *gradgj, doublereal *dummy) {} + // Once we want contraints: void gradcn (integer &nparam, integer &j, + // doublereal *x, doublereal *gradgj, doublereal *dummy) {} - OptimizationProblem *problem = nullptr; + OptimizationProblem* problem = nullptr; std::unique_ptr reporter; /* Do gradient evaluation always during `obj` and save results, @@ -283,8 +359,8 @@ class FsqpProblem { std::vector w; }; -std::tuple > OptimizerFsqp::optimize(parpe::OptimizationProblem *problem) -{ +std::tuple> +OptimizerFsqp::optimize(parpe::OptimizationProblem* problem) { FsqpProblem p(problem); return p.optimize(); } @@ -296,7 +372,8 @@ std::tuple > OptimizerFsqp::optimize(parpe::Opt * @return nwff */ constexpr int getNwff(int nparam, int j) { - return 1 + nparam*nparam /* nwhes1*/ + (nparam+1)*(nparam+1) /* nwff */ + j; + return 1 + nparam * nparam /* nwhes1*/ + + (nparam + 1) * (nparam + 1) /* nwff */ + j; } /** @@ -307,21 +384,23 @@ constexpr int getNwff(int nparam, int j) { */ constexpr int getNwgrf(int nparam, int j) { return getNwff(nparam, j) + (1 + 1) /*nwx TODO: nobj*/ - + 3 * (nparam + 1) /* nwdi, nwd, nwgm */ - + 1 /* nwgrg */ + (0 * nparam + 1) /* nwgrf */; + + 3 * (nparam + 1) /* nwdi, nwd, nwgm */ + + 1 /* nwgrg */ + (0 * nparam + 1) /* nwgrf */; } /** - * @brief Recover out data that was hidden in front of FFSQP's w. Find from provided fj. + * @brief Recover out data that was hidden in front of FFSQP's w. Find from + * provided fj. * @param fj * @param nparam * @param j * @return */ -FsqpProblem *getProblemFromFj(doublereal &fj, integer nparam, integer j) { - // need to go relative to fj because location of x changes; for position see "nwff" +FsqpProblem* getProblemFromFj(doublereal& fj, integer nparam, integer j) { + // need to go relative to fj because location of x changes; for position see + // "nwff" - parpe::FsqpProblem *problem = nullptr; + parpe::FsqpProblem* problem = nullptr; int nwff = getNwff(nparam, j); logmessage(loglevel::debug, "w0 obj: %p", &fj - nwff + 1); @@ -334,14 +413,16 @@ FsqpProblem *getProblemFromFj(doublereal &fj, integer nparam, integer j) { } /** - * @brief Recover out data that was hidden in front of FFSQP's w. Find from provided gradfj. + * @brief Recover out data that was hidden in front of FFSQP's w. Find from + * provided gradfj. * @param gradfj * @param nparam * @param j * @return */ -FsqpProblem *getProblemFromGradFj(doublereal *gradfj, integer nparam, integer j) { - parpe::FsqpProblem *problem = nullptr; +FsqpProblem* +getProblemFromGradFj(doublereal* gradfj, integer nparam, integer j) { + parpe::FsqpProblem* problem = nullptr; int nwgrf = getNwgrf(nparam, j); logmessage(loglevel::debug, "w0 gradobj: %p", gradfj - nwgrf + 1); @@ -354,7 +435,6 @@ FsqpProblem *getProblemFromGradFj(doublereal *gradfj, integer nparam, integer j) return problem; } - /** * @brief Objective function to be passed to FFSQP. * @param nparam Length of x @@ -362,8 +442,10 @@ FsqpProblem *getProblemFromGradFj(doublereal *gradfj, integer nparam, integer j) * @param x Parameters * @param fj (out) Function value for objective j */ -void obj(integer &nparam, integer &j, doublereal *x, doublereal &fj) { - RELEASE_ASSERT(j == 1, "Error: j > 1. Only a single objective is currently supported."); +void obj(integer& nparam, integer& j, doublereal* x, doublereal& fj) { + RELEASE_ASSERT( + j == 1, + "Error: j > 1. Only a single objective is currently supported."); auto unlockFsqp = fsqpReleaseLock(); @@ -378,8 +460,8 @@ void obj(integer &nparam, integer &j, doublereal *x, doublereal &fj) { * @param x Parameters * @param gj Value of constraint j */ -void constr (integer &nparam, integer &j, doublereal *x, doublereal &gj) { - //auto unlockFsqp = fsqpReleaseLock(); +void constr(integer& nparam, integer& j, doublereal* x, doublereal& gj) { + // auto unlockFsqp = fsqpReleaseLock(); // no constraints currently supported } @@ -392,8 +474,15 @@ void constr (integer &nparam, integer &j, doublereal *x, doublereal &gj) { * @param gradfj (out) Gradient value for objective j at x * @param dummy Passed to gradob for forward difference calculation */ -void gradob (integer &nparam, integer &j, doublereal *x, doublereal *gradfj, doublereal *dummy) { - RELEASE_ASSERT(j == 1, "Error: j > 1. Only a single objective is currently supported."); +void gradob( + integer& nparam, + integer& j, + doublereal* x, + doublereal* gradfj, + doublereal* dummy) { + RELEASE_ASSERT( + j == 1, + "Error: j > 1. Only a single objective is currently supported."); auto unlockFsqp = fsqpReleaseLock(); @@ -409,12 +498,15 @@ void gradob (integer &nparam, integer &j, doublereal *x, doublereal *gradfj, dou * @param gradgj (out) Gradient value for constraint j at x * @param dummy Passed to gradob for forward difference calculation */ -void gradcn (integer &nparam, integer &j, doublereal *x, doublereal *gradgj, doublereal* /*dummy*/) { - //auto unlockFsqp = fsqpReleaseLock(); +void gradcn( + integer& nparam, + integer& j, + doublereal* x, + doublereal* gradgj, + doublereal* /*dummy*/) { + // auto unlockFsqp = fsqpReleaseLock(); // no constraints currently supported } - } // namespace parpe - diff --git a/src/parpeoptimization/localOptimizationIpopt.cpp b/src/parpeoptimization/localOptimizationIpopt.cpp index 8f2e6506d..3786c8713 100644 --- a/src/parpeoptimization/localOptimizationIpopt.cpp +++ b/src/parpeoptimization/localOptimizationIpopt.cpp @@ -1,11 +1,11 @@ #include +#include +#include #include #include #include #include -#include -#include #include #include @@ -19,10 +19,9 @@ namespace parpe { - // https://www.coin-or.org/Ipopt/documentation/node40.html // grep -A1 -r "roptions->Add" ../ThirdParty/Ipopt-3.12.7 -const std::array strOpts = { +std::array const strOpts = { "accept_every_trial_step", "adaptive_mu_globalization", "adaptive_mu_kkt_norm_type", @@ -102,7 +101,8 @@ const std::array strOpts = { "quality_function_norm_type", "recalc_y", "replace_bounds", - "sb","print_info_string", + "sb", + "print_info_string", "skip_corr_if_neg_curv", "skip_corr_in_monotone_mode", "skip_finalize_solution_call", @@ -115,7 +115,7 @@ const std::array strOpts = { "wsmp_skip_inertia_check", }; -const std::array intOpts = { +std::array const intOpts = { "acceptable_iter", "accept_after_max_steps", "accept_every_trial_step", @@ -177,7 +177,7 @@ const std::array intOpts = { "wsmp_write_matrix_iteration", }; -const std::array dblOpts = { +std::array const dblOpts = { "acceptable_compl_inf_tol", "acceptable_constr_viol_tol", "acceptable_dual_inf_tol", @@ -196,7 +196,8 @@ const std::array dblOpts = { "constr_mult_reset_threshold", "constr_viol_tol", "corrector_compl_avrg_red_fact", - "delta", "Multiplier for constraint violation in the switching rule.", + "delta", + "Multiplier for constraint violation in the switching rule.", "derivative_test_perturbation", "derivative_test_tol", "diverging_iterates_tol", @@ -246,7 +247,8 @@ const std::array dblOpts = { "max_cpu_time", "max_hessian_perturbation", "min_hessian_perturbation", - "mu_init", "Initial value for the barrier parameter.", + "mu_init", + "Initial value for the barrier parameter.", "mu_linear_decrease_factor", "mu_max", "mu_max_fact", @@ -259,7 +261,8 @@ const std::array dblOpts = { "neg_curv_test_tol", "nlp_lower_bound_inf", "nlp_scaling_constr_target_gradient", - "nlp_scaling_max_gradient", "Maximum gradient after NLP scaling.", + "nlp_scaling_max_gradient", + "Maximum gradient after NLP scaling.", "nlp_scaling_min_value", "nlp_scaling_obj_target_gradient", "nlp_upper_bound_inf", @@ -330,40 +333,49 @@ const std::array dblOpts = { extern volatile sig_atomic_t caughtTerminationSignal; #endif -static_assert(sizeof(double) == sizeof(Ipopt::Number), - "Sizeof IpOpt::Number != sizeof double"); - +static_assert( + sizeof(double) == sizeof(Ipopt::Number), + "Sizeof IpOpt::Number != sizeof double"); using namespace Ipopt; - -void setIpOptOption(const std::pair &pair, SmartPtr* o) { +void setIpOptOption( + std::pair const& pair, + SmartPtr* o) { // for iterating over OptimizationOptions auto options = *o; - const std::string &key = pair.first; - const std::string &val = pair.second; + std::string const& key = pair.first; + std::string const& val = pair.second; bool success = true; - if(std::find(strOpts.begin(), strOpts.end(), key) != strOpts.end()) + if (std::find(strOpts.begin(), strOpts.end(), key) != strOpts.end()) success = options->SetStringValue(key, val); - else if(std::find(intOpts.begin(), intOpts.end(), key) != intOpts.end()) + else if (std::find(intOpts.begin(), intOpts.end(), key) != intOpts.end()) success = options->SetIntegerValue(key, std::stoi(val)); - else if(std::find(dblOpts.begin(), dblOpts.end(), key) != dblOpts.end()) + else if (std::find(dblOpts.begin(), dblOpts.end(), key) != dblOpts.end()) success = options->SetNumericValue(key, std::stod(val)); else { - logmessage(loglevel::warning, "Ignoring unknown optimization option %s.", key.c_str()); + logmessage( + loglevel::warning, + "Ignoring unknown optimization option %s.", + key.c_str()); return; } RELEASE_ASSERT(success, "Problem setting IpOpt option"); - logmessage(loglevel::debug, "Set optimization option %s to %s.", key.c_str(), val.c_str()); + logmessage( + loglevel::debug, + "Set optimization option %s to %s.", + key.c_str(), + val.c_str()); } -void setIpOptOptions(SmartPtr optionsIpOpt, - OptimizationProblem *problem) { +void setIpOptOptions( + SmartPtr optionsIpOpt, + OptimizationProblem* problem) { if (problem->getOptimizationOptions().printToStdout) { optionsIpOpt->SetIntegerValue("print_level", 5); @@ -380,13 +392,15 @@ void setIpOptOptions(SmartPtr optionsIpOpt, optionsIpOpt->SetStringValue("limited_memory_update_type", "bfgs"); optionsIpOpt->SetIntegerValue( - "max_iter", problem->getOptimizationOptions().maxOptimizerIterations); + "max_iter", problem->getOptimizationOptions().maxOptimizerIterations); // set IpOpt options from OptimizationOptions - problem->getOptimizationOptions().for_each *>(setIpOptOption, &optionsIpOpt); + problem->getOptimizationOptions().for_each*>( + setIpOptOption, &optionsIpOpt); } -std::tuple > OptimizerIpOpt::optimize(OptimizationProblem *problem) { +std::tuple> +OptimizerIpOpt::optimize(OptimizationProblem* problem) { ApplicationReturnStatus status = Unrecoverable_Exception; std::vector finalParameters; @@ -400,8 +414,8 @@ std::tuple > OptimizerIpOpt::optimize(Optimizat auto optimizationController = problem->getReporter(); try { - SmartPtr mynlp = - new LocalOptimizationIpoptTNLP(*problem, *optimizationController); + SmartPtr mynlp = new LocalOptimizationIpoptTNLP( + *problem, *optimizationController); SmartPtr app = IpoptApplicationFactory(); app->RethrowNonIpoptException(true); @@ -414,28 +428,35 @@ std::tuple > OptimizerIpOpt::optimize(Optimizat Expects(status == Solve_Succeeded); status = app->OptimizeTNLP(mynlp); - if(status == Invalid_Number_Detected) { + if (status == Invalid_Number_Detected) { // TODO: print where } } catch (IpoptException& e) { - logmessage(loglevel::error, "IpOpt exception: %s", e.Message().c_str()); + logmessage( + loglevel::error, "IpOpt exception: %s", e.Message().c_str()); } catch (std::exception& e) { - logmessage(loglevel::error, "Unknown exception occurred during optimization: %s", e.what()); + logmessage( + loglevel::error, + "Unknown exception occurred during optimization: %s", + e.what()); } catch (...) { - logmessage(loglevel::error, "Unknown exception occurred during optimization"); + logmessage( + loglevel::error, + "Unknown exception occurred during optimization"); } finalCost = optimizationController->getFinalCost(); finalParameters = optimizationController->getFinalParameters(); - } // TODO: need smarter way to decide if should retry or not // if((int)status < Not_Enough_Degrees_Of_Freedom) { // // should exit, retrying probably makes no sense - // throw ParPEException(std::string("Unrecoverable IpOpt problem - see messages above. Code ") + std::to_string(status)); + // throw ParPEException(std::string("Unrecoverable IpOpt problem - + // see messages above. Code ") + std::to_string(status)); // } - return std::tuple >((int)status < Maximum_Iterations_Exceeded, finalCost, finalParameters); + return std::tuple>( + (int)status < Maximum_Iterations_Exceeded, finalCost, finalParameters); } } // namespace parpe diff --git a/src/parpeoptimization/localOptimizationIpoptTNLP.cpp b/src/parpeoptimization/localOptimizationIpoptTNLP.cpp index a16d0d836..a13b36c29 100644 --- a/src/parpeoptimization/localOptimizationIpoptTNLP.cpp +++ b/src/parpeoptimization/localOptimizationIpoptTNLP.cpp @@ -4,26 +4,24 @@ #include #include -//#include +// #include namespace parpe { using namespace Ipopt; LocalOptimizationIpoptTNLP::LocalOptimizationIpoptTNLP( - OptimizationProblem& problem, - OptimizationReporter& reporter) - : problem(problem) - , reporter(reporter) -{} - -bool -LocalOptimizationIpoptTNLP::get_nlp_info(Index& n, - Index& m, - Index& nnz_jac_g, - Index& nnz_h_lag, - IndexStyleEnum& index_style) -{ + OptimizationProblem& problem, + OptimizationReporter& reporter) + : problem(problem) + , reporter(reporter) {} + +bool LocalOptimizationIpoptTNLP::get_nlp_info( + Index& n, + Index& m, + Index& nnz_jac_g, + Index& nnz_h_lag, + IndexStyleEnum& index_style) { n = reporter.numParameters(); m = 0; // number of constraints @@ -34,14 +32,13 @@ LocalOptimizationIpoptTNLP::get_nlp_info(Index& n, return true; } -bool -LocalOptimizationIpoptTNLP::get_bounds_info(Index n, - Number* x_l, - Number* x_u, - Index /*m*/, - Number* /*g_l*/, - Number* /*g_u*/) -{ +bool LocalOptimizationIpoptTNLP::get_bounds_info( + Index n, + Number* x_l, + Number* x_u, + Index /*m*/, + Number* /*g_l*/, + Number* /*g_u*/) { // parameter bounds problem.fillParametersMin(gsl::make_span(x_l, n)); problem.fillParametersMax(gsl::make_span(x_u, n)); @@ -51,17 +48,16 @@ LocalOptimizationIpoptTNLP::get_bounds_info(Index n, return true; } -bool -LocalOptimizationIpoptTNLP::get_starting_point(Index n, - bool init_x, - Number* x, - bool init_z, - Number* /*z_L*/, - Number* /*z_U*/, - Index m, - bool init_lambda, - Number* /*lambda*/) -{ +bool LocalOptimizationIpoptTNLP::get_starting_point( + Index n, + bool init_x, + Number* x, + bool init_z, + Number* /*z_L*/, + Number* /*z_U*/, + Index m, + bool init_lambda, + Number* /*lambda*/) { Expects(init_z == false); Expects(init_lambda == false); Expects(m == 0); @@ -83,75 +79,70 @@ LocalOptimizationIpoptTNLP::get_starting_point(Index n, return true; } -bool -LocalOptimizationIpoptTNLP::eval_f(Index n, - const Number* x, - bool /*new_x*/, - Number& obj_value) -{ +bool LocalOptimizationIpoptTNLP::eval_f( + Index n, + Number const* x, + bool /*new_x*/, + Number& obj_value) { [[maybe_unused]] auto unlockIpOpt = ipOptReleaseLock(); - return reporter.evaluate(gsl::make_span(x, n), - obj_value, - gsl::span()) == functionEvaluationSuccess; + return reporter.evaluate( + gsl::make_span(x, n), + obj_value, + gsl::span()) == functionEvaluationSuccess; } -bool -LocalOptimizationIpoptTNLP::eval_grad_f(Index n, - const Number* x, - bool /*new_x*/, - Number* grad_f) -{ +bool LocalOptimizationIpoptTNLP::eval_grad_f( + Index n, + Number const* x, + bool /*new_x*/, + Number* grad_f) { [[maybe_unused]] auto unlockIpOpt = ipOptReleaseLock(); double obj_value; - return reporter.evaluate(gsl::make_span(x, n), - obj_value, - gsl::make_span(grad_f, n)) == - functionEvaluationSuccess; + return reporter.evaluate( + gsl::make_span(x, n), + obj_value, + gsl::make_span(grad_f, n)) == functionEvaluationSuccess; } -bool -LocalOptimizationIpoptTNLP::eval_g(Index /*n*/, - const Number* /*x*/, - bool /*new_x*/, - Index /*m*/, - Number* /*g*/) -{ +bool LocalOptimizationIpoptTNLP::eval_g( + Index /*n*/, + Number const* /*x*/, + bool /*new_x*/, + Index /*m*/, + Number* /*g*/) { throw std::runtime_error("no constraints, should never get here"); } -bool -LocalOptimizationIpoptTNLP::eval_jac_g(Index /*n*/, - const Number* /*x*/, - bool /*new_x*/, - Index m, - Index /*nele_jac*/, - Index* /*iRow*/, - Index* /*jCol*/, - Number* /*values*/) -{ +bool LocalOptimizationIpoptTNLP::eval_jac_g( + Index /*n*/, + Number const* /*x*/, + bool /*new_x*/, + Index m, + Index /*nele_jac*/, + Index* /*iRow*/, + Index* /*jCol*/, + Number* /*values*/) { // no constraints, nothing to do here, but will be called once Expects(m == 0); // while constraints not implemented return true; } -bool -LocalOptimizationIpoptTNLP::intermediate_callback( - AlgorithmMode /*mode*/, - Index /*iter*/, - Number obj_value, - Number /*inf_pr*/, - Number /*inf_du*/, - Number /*mu*/, - Number /*d_norm*/, - Number /*regularization_size*/, - Number /*alpha_du*/, - Number /*alpha_pr*/, - Index /*ls_trials*/, - const IpoptData* ip_data, - IpoptCalculatedQuantities* /*ip_cq*/) -{ +bool LocalOptimizationIpoptTNLP::intermediate_callback( + AlgorithmMode /*mode*/, + Index /*iter*/, + Number obj_value, + Number /*inf_pr*/, + Number /*inf_du*/, + Number /*mu*/, + Number /*d_norm*/, + Number /*regularization_size*/, + Number /*alpha_du*/, + Number /*alpha_pr*/, + Index /*ls_trials*/, + IpoptData const* ip_data, + IpoptCalculatedQuantities* /*ip_cq*/) { [[maybe_unused]] auto unlockIpOpt = ipOptReleaseLock(); @@ -159,16 +150,17 @@ LocalOptimizationIpoptTNLP::intermediate_callback( gsl::span parameters; if (auto x = ip_data->curr()->x(); - auto xx = dynamic_cast(Ipopt::GetRawPtr(x))) + auto xx = dynamic_cast(Ipopt::GetRawPtr(x))) parameters = gsl::span(xx->Values(), xx->Dim()); else - logmessage(loglevel::warning, - "Not Ipopt::DenseVector in " - "LocalOptimizationIpoptTNLP::intermediate_callback"); + logmessage( + loglevel::warning, + "Not Ipopt::DenseVector in " + "LocalOptimizationIpoptTNLP::intermediate_callback"); // is always the last step accepted? int status = - reporter.iterationFinished(parameters, obj_value, gsl::span()); + reporter.iterationFinished(parameters, obj_value, gsl::span()); #ifdef INSTALL_SIGNAL_HANDLER if (caughtTerminationSignal) { @@ -180,39 +172,35 @@ LocalOptimizationIpoptTNLP::intermediate_callback( return status == 0; } -void -LocalOptimizationIpoptTNLP::finalize_solution( - SolverReturn status, - Index n, - const Number* x, - const Number* /*z_L*/, - const Number* /*z_U*/, - Index /*m*/, - const Number* /*g*/, - const Number* /*lambda*/, - Number obj_value, - const IpoptData* /*ip_data*/, - IpoptCalculatedQuantities* /*ip_cq*/) -{ +void LocalOptimizationIpoptTNLP::finalize_solution( + SolverReturn status, + Index n, + Number const* x, + Number const* /*z_L*/, + Number const* /*z_U*/, + Index /*m*/, + Number const* /*g*/, + Number const* /*lambda*/, + Number obj_value, + IpoptData const* /*ip_data*/, + IpoptCalculatedQuantities* /*ip_cq*/) { [[maybe_unused]] auto unlockIpOpt = ipOptReleaseLock(); // If we finish with objective value of NAN, IpOpt still passes // obj_value 0.0 along with the respective flag. This does not make too // much sense. Set to NAN. - if(status == INVALID_NUMBER_DETECTED && obj_value == 0.0) { + if (status == INVALID_NUMBER_DETECTED && obj_value == 0.0) { obj_value = std::numeric_limits::quiet_NaN(); } reporter.finished(obj_value, gsl::span(x, n), status); } -auto ipOptGetLock() -> std::unique_lock -{ +auto ipOptGetLock() -> std::unique_lock { return std::unique_lock(mutexIpOpt); } -auto ipOptReleaseLock() -> InverseUniqueLock -{ +auto ipOptReleaseLock() -> InverseUniqueLock { return InverseUniqueLock(&mutexIpOpt); } diff --git a/src/parpeoptimization/localOptimizationToms611.cpp b/src/parpeoptimization/localOptimizationToms611.cpp index 6a8343cc5..98e8caa8e 100644 --- a/src/parpeoptimization/localOptimizationToms611.cpp +++ b/src/parpeoptimization/localOptimizationToms611.cpp @@ -1,18 +1,17 @@ -#include #include "logging.h" #include "optimizationOptions.h" #include "optimizationProblem.h" #include -#include +#include #include +#include namespace parpe { static_assert(sizeof(doublereal) == sizeof(double), "Float size mismatch"); -struct sumslUserData -{ - OptimizationProblem *problem = nullptr; +struct sumslUserData { + OptimizationProblem* problem = nullptr; std::unique_ptr reporter; std::vector parametersMin; std::vector parametersMax; @@ -42,92 +41,120 @@ enum toms611realOptionsIndices { xftol = 33, }; - -void setToms611Option(const std::pair &pair, std::pair options) { +void setToms611Option( + std::pair const& pair, + std::pair options) { // for iterating over OptimizationOptions - const std::string &key = pair.first; - const std::string &val = pair.second; + std::string const& key = pair.first; + std::string const& val = pair.second; - integer *iv = options.first; - doublereal *v = options.second; + integer* iv = options.first; + doublereal* v = options.second; - if(key == "mxfcal") { + if (key == "mxfcal") { iv[mxfcal] = std::stoi(val); - } else if(key == "mxiter") { + } else if (key == "mxiter") { iv[mxiter] = std::stoi(val); - } else if(key == "outlev") { + } else if (key == "outlev") { iv[outlev] = std::stoi(val); - } else if(key == "solprt") { + } else if (key == "solprt") { iv[solprt] = std::stoi(val); - } else if(key == "x0prt") { + } else if (key == "x0prt") { iv[x0prt] = std::stoi(val); - } else if(key == "bias") { + } else if (key == "bias") { v[bias] = std::stod(val); - } else if(key == "afctol") { + } else if (key == "afctol") { v[afctol] = std::stod(val); - } else if(key == "dinit") { + } else if (key == "dinit") { v[dinit] = std::stod(val); - } else if(key == "lmax0") { + } else if (key == "lmax0") { v[lmax0] = std::stod(val); - } else if(key == "lmaxs") { + } else if (key == "lmaxs") { v[lmaxs] = std::stod(val); - } else if(key == "rfctol") { + } else if (key == "rfctol") { v[rfctol] = std::stod(val); - } else if(key == "sctol") { + } else if (key == "sctol") { v[sctol] = std::stod(val); - } else if(key == "tuner1") { + } else if (key == "tuner1") { v[tuner1] = std::stod(val); - } else if(key == "xftol") { + } else if (key == "xftol") { v[xftol] = std::stod(val); } else { - logmessage(loglevel::warning, "Ignoring unknown optimization option %s.", key.c_str()); + logmessage( + loglevel::warning, + "Ignoring unknown optimization option %s.", + key.c_str()); return; } - logmessage(loglevel::debug, "Set optimization option %s to %s.", key.c_str(), val.c_str()); + logmessage( + loglevel::debug, + "Set optimization option %s to %s.", + key.c_str(), + val.c_str()); } - -void calcf(integer const &n, doublereal const *x, integer &nf, doublereal &f, - sumslUserData *userData, doublereal *urparm, void *ufparm) { - - if(!withinBounds(n, x, userData->parametersMin.data(), userData->parametersMax.data())) { +void calcf( + integer const& n, + doublereal const* x, + integer& nf, + doublereal& f, + sumslUserData* userData, + doublereal* urparm, + void* ufparm) { + + if (!withinBounds( + n, + x, + userData->parametersMin.data(), + userData->parametersMax.data())) { nf = 0; // tells optimizer to choose a shorter step return; } - - auto result = userData->reporter->evaluate(gsl::span(x, n), f, gsl::span()); + auto result = userData->reporter->evaluate( + gsl::span(x, n), f, gsl::span()); *urparm = f; - if(std::isnan(f) || result != functionEvaluationSuccess) { + if (std::isnan(f) || result != functionEvaluationSuccess) { nf = 0; // tells optimizer to choose a shorter step return; } } -void calcg(integer const &n, doublereal const *x, integer &nf, doublereal *g, - sumslUserData *userData, doublereal *urparm, void *ufparm) { - - if(!withinBounds(n, x, userData->parametersMin.data(), userData->parametersMax.data())) { +void calcg( + integer const& n, + doublereal const* x, + integer& nf, + doublereal* g, + sumslUserData* userData, + doublereal* urparm, + void* ufparm) { + + if (!withinBounds( + n, + x, + userData->parametersMin.data(), + userData->parametersMax.data())) { nf = 0; // tells optimizer to choose a shorter step return; } - userData->reporter->evaluate(gsl::span(x, n), *urparm, gsl::span(g, n)); + userData->reporter->evaluate( + gsl::span(x, n), *urparm, gsl::span(g, n)); - auto result = userData->reporter->iterationFinished(gsl::span(x, n), *urparm, gsl::span(g, n)); + auto result = userData->reporter->iterationFinished( + gsl::span(x, n), *urparm, gsl::span(g, n)); - if(std::isnan(*urparm) || result != functionEvaluationSuccess) { + if (std::isnan(*urparm) || result != functionEvaluationSuccess) { nf = 0; // tells optimizer to choose a shorter step return; } } - -std::tuple > OptimizerToms611TrustRegionSumsl::optimize(OptimizationProblem *problem) -{ +std::tuple> +OptimizerToms611TrustRegionSumsl::optimize(OptimizationProblem* problem) { integer numOptimizationVariables = problem->costFun->numParameters(); // allocate toms 611 memory and set options @@ -142,24 +169,28 @@ std::tuple > OptimizerToms611TrustRegionSumsl:: std::fill(scaling, scaling + numOptimizationVariables, 1.0); // Initialize optimizer options and memory - integer deflt_algorithm = 2; /* general unconstrained optimization constants */ + integer deflt_algorithm = + 2; /* general unconstrained optimization constants */ deflt_(deflt_algorithm, iv, liv, lv, v.data()); - if(iv[0] != 12) // dflt_ success + if (iv[0] != 12) // dflt_ success throw std::exception(); // change default options auto o = problem->getOptimizationOptions(); iv[17] = o.maxOptimizerIterations; // mxiter - iv[23] = 0; // x0prt: don't print x0 and scaling - - problem->getOptimizationOptions().for_each< std::pair >(setToms611Option, std::pair(iv, v.data())); + iv[23] = 0; // x0prt: don't print x0 and scaling + problem->getOptimizationOptions() + .for_each>( + setToms611Option, std::pair(iv, v.data())); doublereal parameters[numOptimizationVariables]; - problem->fillInitialParameters(gsl::span(parameters, numOptimizationVariables)); + problem->fillInitialParameters( + gsl::span(parameters, numOptimizationVariables)); - double fval = NAN; // the last computed cost function value; is this necessarily the one for the final parameters? + double fval = NAN; // the last computed cost function value; is this + // necessarily the one for the final parameters? sumslUserData userData; userData.problem = problem; @@ -169,21 +200,33 @@ std::tuple > OptimizerToms611TrustRegionSumsl:: problem->fillParametersMin(userData.parametersMin); problem->fillParametersMax(userData.parametersMax); - userData.reporter->starting(gsl::span(parameters, numOptimizationVariables)); - - sumsl_(numOptimizationVariables, - scaling, - parameters, - reinterpret_cast(calcf), (S_fp)calcg, - iv, liv, - lv, v.data(), - reinterpret_cast(&userData), // sumsl_ only lets us pass integer, real or function... - &fval, nullptr); - - userData.reporter->finished(fval, gsl::span(parameters, numOptimizationVariables), iv[0]); - - return std::tuple >(iv[0] >= first_error_code, fval, std::vector(parameters, parameters + numOptimizationVariables)); + userData.reporter->starting( + gsl::span(parameters, numOptimizationVariables)); + + sumsl_( + numOptimizationVariables, + scaling, + parameters, + reinterpret_cast(calcf), + (S_fp)calcg, + iv, + liv, + lv, + v.data(), + reinterpret_cast( + &userData), // sumsl_ only lets us pass integer, real or function... + &fval, + nullptr); + + userData.reporter->finished( + fval, + gsl::span(parameters, numOptimizationVariables), + iv[0]); + + return std::tuple>( + iv[0] >= first_error_code, + fval, + std::vector(parameters, parameters + numOptimizationVariables)); } - } // namespace parpe diff --git a/src/parpeoptimization/minibatchOptimization.cpp b/src/parpeoptimization/minibatchOptimization.cpp old mode 100755 new mode 100644 index ac78dcf9c..961d70dec --- a/src/parpeoptimization/minibatchOptimization.cpp +++ b/src/parpeoptimization/minibatchOptimization.cpp @@ -4,12 +4,12 @@ namespace parpe { -double getVectorNorm(gsl::span v) { +double getVectorNorm(gsl::span v) { return std::sqrt(std::inner_product(v.begin(), v.end(), v.begin(), 0.0)); } -std::vector getVectorDifference(gsl::span v, - gsl::span w) { +std::vector +getVectorDifference(gsl::span v, gsl::span w) { Expects(v.size() == w.size()); std::vector difference(v.size(), 0.0); for (unsigned int i = 0; i < v.size(); ++i) @@ -18,10 +18,11 @@ std::vector getVectorDifference(gsl::span v, return difference; } -void setMinibatchOption(const std::pair &pair, - MinibatchOptimizer *optimizer) { - const std::string &key = pair.first; - const std::string &val = pair.second; +void setMinibatchOption( + std::pair const& pair, + MinibatchOptimizer* optimizer) { + std::string const& key = pair.first; + std::string const& val = pair.second; /* Get options from h5-file */ if (key == "maxEpochs") { @@ -42,44 +43,74 @@ void setMinibatchOption(const std::pair &p } } else if (key == "parameterUpdater") { if (val == "Vanilla") { - // already default optimizer->parameterUpdater = std::make_unique(); - } else if (val == "RmsProp" && !dynamic_cast(optimizer->parameterUpdater.get())) { - // this might have been set previously if there was an updater-specific option before - optimizer->parameterUpdater = std::make_unique(); - } else if (val == "Adam" && !dynamic_cast(optimizer->parameterUpdater.get())) { - // this might have been set previously if there was an updater-specific option before - optimizer->parameterUpdater = std::make_unique(); - } else if (val == "AdamClassic" && !dynamic_cast(optimizer->parameterUpdater.get())) { - // this might have been set previously if there was an updater-specific option before - optimizer->parameterUpdater = std::make_unique(); + // already default optimizer->parameterUpdater = + // std::make_unique(); + } else if ( + val == "RmsProp" && !dynamic_cast( + optimizer->parameterUpdater.get())) { + // this might have been set previously if there was an + // updater-specific option before + optimizer->parameterUpdater = + std::make_unique(); + } else if ( + val == "Adam" && !dynamic_cast( + optimizer->parameterUpdater.get())) { + // this might have been set previously if there was an + // updater-specific option before + optimizer->parameterUpdater = + std::make_unique(); + } else if ( + val == "AdamClassic" && !dynamic_cast( + optimizer->parameterUpdater.get())) { + // this might have been set previously if there was an + // updater-specific option before + optimizer->parameterUpdater = + std::make_unique(); } else { - logmessage(loglevel::warning, "Ignoring unknown Minibatch parameterUpdater %s.", val.c_str()); + logmessage( + loglevel::warning, + "Ignoring unknown Minibatch parameterUpdater %s.", + val.c_str()); } } else if (key == "learningRateInterpMode") { if (val == "linear") { - optimizer->learningRateUpdater = std::make_unique < LearningRateUpdater - > (optimizer->maxEpochs, parpe::learningRateInterp::linear); + optimizer->learningRateUpdater = + std::make_unique( + optimizer->maxEpochs, parpe::learningRateInterp::linear); } else if (val == "inverseLinear") { - optimizer->learningRateUpdater = std::make_unique < LearningRateUpdater - > (optimizer->maxEpochs, parpe::learningRateInterp::inverseLinear); + optimizer->learningRateUpdater = + std::make_unique( + optimizer->maxEpochs, + parpe::learningRateInterp::inverseLinear); } else if (val == "logarithmic") { - optimizer->learningRateUpdater = std::make_unique < LearningRateUpdater - > (optimizer->maxEpochs, parpe::learningRateInterp::logarithmic); + optimizer->learningRateUpdater = + std::make_unique( + optimizer->maxEpochs, + parpe::learningRateInterp::logarithmic); } } else if (key == "startLearningRate") { - optimizer->learningRateUpdater->setStartLearningRate(std::stod(val)); + optimizer->learningRateUpdater->setStartLearningRate(std::stod(val)); } else if (key == "endLearningRate") { - optimizer->learningRateUpdater->setEndLearningRate(std::stod(val)); + optimizer->learningRateUpdater->setEndLearningRate(std::stod(val)); } else { - logmessage(loglevel::warning, "Ignoring unknown optimization option %s.", key.c_str()); + logmessage( + loglevel::warning, + "Ignoring unknown optimization option %s.", + key.c_str()); return; } - logmessage(loglevel::debug, "Set optimization option %s to %s.", key.c_str(), val.c_str()); + logmessage( + loglevel::debug, + "Set optimization option %s to %s.", + key.c_str(), + val.c_str()); } -std::tuple > runMinibatchOptimization(MinibatchOptimizationProblem *problem) { - auto minibatchOptimizer = getMinibatchOptimizer(problem->getOptimizationOptions()); +std::tuple> +runMinibatchOptimization(MinibatchOptimizationProblem* problem) { + auto minibatchOptimizer = + getMinibatchOptimizer(problem->getOptimizationOptions()); auto costFun = problem->getGradientFunction(); @@ -94,30 +125,42 @@ std::tuple > runMinibatchOptimization(Minibatch auto data = problem->getTrainingData(); - return minibatchOptimizer->optimize(*costFun, data, initialParameters, lowerParameterBounds, upperParameterBounds, - problem->getReporter().get(), problem->logger_.get()); + return minibatchOptimizer->optimize( + *costFun, + data, + initialParameters, + lowerParameterBounds, + upperParameterBounds, + problem->getReporter().get(), + problem->logger_.get()); } - -LearningRateUpdater::LearningRateUpdater(int maxEpochs, learningRateInterp learningRateInterpMode) - :maxEpochs(maxEpochs), - learningRateInterpMode(learningRateInterpMode) -{ -} +LearningRateUpdater::LearningRateUpdater( + int maxEpochs, + learningRateInterp learningRateInterpMode) + : maxEpochs(maxEpochs) + , learningRateInterpMode(learningRateInterpMode) {} void LearningRateUpdater::updateLearningRate(int currentEpoch) { - // Depending on the interpolation mode the current learning rate computed must be... + // Depending on the interpolation mode the current learning rate computed + // must be... if (learningRateInterpMode == learningRateInterp::linear) { - currentLearningRate = startLearningRate - - (startLearningRate - endLearningRate) * ((double) currentEpoch) / ((double) maxEpochs - 1); + currentLearningRate = + startLearningRate - (startLearningRate - endLearningRate) * + ((double)currentEpoch) / + ((double)maxEpochs - 1); } else if (learningRateInterpMode == learningRateInterp::inverseLinear) { - currentLearningRate = 1 / startLearningRate - - (1 / startLearningRate - 1 / endLearningRate) * ((double) currentEpoch) / ((double) maxEpochs - 1); + currentLearningRate = 1 / startLearningRate - + (1 / startLearningRate - 1 / endLearningRate) * + ((double)currentEpoch) / + ((double)maxEpochs - 1); currentLearningRate = 1 / currentLearningRate; } else if (learningRateInterpMode == learningRateInterp::logarithmic) { - currentLearningRate = log(startLearningRate) - - (log(startLearningRate) - log(endLearningRate)) * ((double) currentEpoch) / ((double) maxEpochs - 1); + currentLearningRate = log(startLearningRate) - + (log(startLearningRate) - log(endLearningRate)) * + ((double)currentEpoch) / + ((double)maxEpochs - 1); currentLearningRate = exp(currentLearningRate); } } @@ -144,13 +187,11 @@ void LearningRateUpdater::setMaxEpochs(int newMaxEpochs) { maxEpochs = newMaxEpochs; } -void LearningRateUpdater::setStartLearningRate(double learningRate) -{ +void LearningRateUpdater::setStartLearningRate(double learningRate) { startLearningRate = learningRate; } -void LearningRateUpdater::setEndLearningRate(double learningRate) -{ +void LearningRateUpdater::setEndLearningRate(double learningRate) { endLearningRate = learningRate; } @@ -161,28 +202,32 @@ void ParameterUpdaterRmsProp::initialize(unsigned int numParameters) { std::fill(oldGradientNormCache.begin(), oldGradientNormCache.end(), 0.0); } -void ParameterUpdaterRmsProp::updateParameters(double learningRate, - int /*iteration*/, - gsl::span gradient, - gsl::span parameters, - gsl::span lowerBounds, - gsl::span upperBounds) { +void ParameterUpdaterRmsProp::updateParameters( + double learningRate, + int /*iteration*/, + gsl::span gradient, + gsl::span parameters, + gsl::span lowerBounds, + gsl::span upperBounds) { int numParameters = gradient.size(); oldGradientNormCache = gradientNormCache; for (int i = 0; i < numParameters; ++i) { - gradientNormCache[i] = decayRate * gradientNormCache[i] + (1 - decayRate) * gradient[i] * gradient[i]; + gradientNormCache[i] = decayRate * gradientNormCache[i] + + (1 - decayRate) * gradient[i] * gradient[i]; - parameters[i] += -learningRate * gradient[i] / (std::sqrt(gradientNormCache[i]) + delta); + parameters[i] += -learningRate * gradient[i] / + (std::sqrt(gradientNormCache[i]) + delta); } clipToBounds(lowerBounds, upperBounds, parameters); } void ParameterUpdaterRmsProp::undoLastStep() { - // The cached gradient norm needs to be restored, since the new one is probably NaN + // The cached gradient norm needs to be restored, since the new one is + // probably NaN gradientNormCache = oldGradientNormCache; } @@ -199,12 +244,13 @@ void ParameterUpdaterMomentum::initialize(unsigned int numParameters) { std::fill(oldMomentum.begin(), oldMomentum.end(), 0.0); } -void ParameterUpdaterMomentum::updateParameters(double learningRate, - int /*iteration*/, - gsl::span gradient, - gsl::span parameters, - gsl::span lowerBounds, - gsl::span upperBounds) { +void ParameterUpdaterMomentum::updateParameters( + double learningRate, + int /*iteration*/, + gsl::span gradient, + gsl::span parameters, + gsl::span lowerBounds, + gsl::span upperBounds) { int numParameters = gradient.size(); oldMomentum = momentum; @@ -221,7 +267,8 @@ void ParameterUpdaterMomentum::updateParameters(double learningRate, } void ParameterUpdaterMomentum::undoLastStep() { - // The cached gradient norm needs to be restored, since the new one is probably NaN + // The cached gradient norm needs to be restored, since the new one is + // probably NaN momentum = oldMomentum; } @@ -230,7 +277,6 @@ void ParameterUpdaterMomentum::clearCache() { std::fill(momentum.begin(), momentum.end(), 0.0); } - void ParameterUpdaterAdam::initialize(unsigned int numParameters) { gradientCache.resize(numParameters); gradientNormCache.resize(numParameters); @@ -242,12 +288,13 @@ void ParameterUpdaterAdam::initialize(unsigned int numParameters) { std::fill(oldGradientCache.begin(), oldGradientCache.end(), 0.0); } -void ParameterUpdaterAdam::updateParameters(double learningRate, - int iteration, - gsl::span gradient, - gsl::span parameters, - gsl::span lowerBounds, - gsl::span upperBounds) { +void ParameterUpdaterAdam::updateParameters( + double learningRate, + int iteration, + gsl::span gradient, + gsl::span parameters, + gsl::span lowerBounds, + gsl::span upperBounds) { int numParameters = gradient.size(); double tmpNumerator; @@ -258,13 +305,19 @@ void ParameterUpdaterAdam::updateParameters(double learningRate, for (int i = 0; i < numParameters; ++i) { // compute new steps from last gradient information - gradientCache[i] = decayRateGradient * gradientCache[i] + (1 - decayRateGradient) * gradient[i]; - gradientNormCache[i] = decayRateGradientNorm * gradientNormCache[i] - + (1 - decayRateGradientNorm) * gradient[i] * gradient[i]; - - tmpNumerator = gradientCache[i] / (1 - std::pow(decayRateGradient, (double) iteration)); - tmpDenominator = std::sqrt(gradientNormCache[i] / (1 - std::pow(decayRateGradientNorm, (double) iteration))) - + delta; + gradientCache[i] = decayRateGradient * gradientCache[i] + + (1 - decayRateGradient) * gradient[i]; + gradientNormCache[i] = + decayRateGradientNorm * gradientNormCache[i] + + (1 - decayRateGradientNorm) * gradient[i] * gradient[i]; + + tmpNumerator = gradientCache[i] / + (1 - std::pow(decayRateGradient, (double)iteration)); + tmpDenominator = + std::sqrt( + gradientNormCache[i] / + (1 - std::pow(decayRateGradientNorm, (double)iteration))) + + delta; parameters[i] += -learningRate * tmpNumerator / tmpDenominator; } @@ -273,7 +326,8 @@ void ParameterUpdaterAdam::updateParameters(double learningRate, } void ParameterUpdaterAdam::undoLastStep() { - // The cached gradient norm needs to be restored, since the new one is probably NaN + // The cached gradient norm needs to be restored, since the new one is + // probably NaN gradientNormCache = oldGradientNormCache; gradientCache = oldGradientCache; } @@ -297,12 +351,13 @@ void ParameterUpdaterAdamClassic::initialize(unsigned int numParameters) { std::fill(oldGradientCache.begin(), oldGradientCache.end(), 0.0); } -void ParameterUpdaterAdamClassic::updateParameters(double learningRate, - int iteration, - gsl::span gradient, - gsl::span parameters, - gsl::span lowerBounds, - gsl::span upperBounds) { +void ParameterUpdaterAdamClassic::updateParameters( + double learningRate, + int iteration, + gsl::span gradient, + gsl::span parameters, + gsl::span lowerBounds, + gsl::span upperBounds) { int numParameters = gradient.size(); double tmpNumerator; @@ -313,13 +368,19 @@ void ParameterUpdaterAdamClassic::updateParameters(double learningRate, for (int i = 0; i < numParameters; ++i) { // compute new steps from last gradient information - gradientCache[i] = decayRateGradient * gradientCache[i] + (1 - decayRateGradient) * gradient[i]; - gradientNormCache[i] = decayRateGradientNorm * gradientNormCache[i] - + (1 - decayRateGradientNorm) * gradient[i] * gradient[i]; - - tmpNumerator = gradientCache[i] / (1 - std::pow(decayRateGradient, (double) iteration)); - tmpDenominator = std::sqrt(gradientNormCache[i] / (1 - std::pow(decayRateGradientNorm, (double) iteration))) - + delta; + gradientCache[i] = decayRateGradient * gradientCache[i] + + (1 - decayRateGradient) * gradient[i]; + gradientNormCache[i] = + decayRateGradientNorm * gradientNormCache[i] + + (1 - decayRateGradientNorm) * gradient[i] * gradient[i]; + + tmpNumerator = gradientCache[i] / + (1 - std::pow(decayRateGradient, (double)iteration)); + tmpDenominator = + std::sqrt( + gradientNormCache[i] / + (1 - std::pow(decayRateGradientNorm, (double)iteration))) + + delta; parameters[i] += -learningRate * tmpNumerator / tmpDenominator; } @@ -328,7 +389,8 @@ void ParameterUpdaterAdamClassic::updateParameters(double learningRate, } void ParameterUpdaterAdamClassic::undoLastStep() { - // The cached gradient norm needs to be restored, since the new one is probably NaN + // The cached gradient norm needs to be restored, since the new one is + // probably NaN gradientNormCache = oldGradientNormCache; gradientCache = oldGradientCache; } @@ -341,28 +403,32 @@ void ParameterUpdaterAdamClassic::clearCache() { std::fill(oldGradientCache.begin(), oldGradientCache.end(), 0.0); } - -void ParameterUpdaterVanilla::updateParameters(double learningRate, - int /*iteration*/, - gsl::span gradient, - gsl::span parameters, - gsl::span lowerBounds, - gsl::span upperBounds) { +void ParameterUpdaterVanilla::updateParameters( + double learningRate, + int /*iteration*/, + gsl::span gradient, + gsl::span parameters, + gsl::span lowerBounds, + gsl::span upperBounds) { int numParameters = gradient.size(); double delta = 1e-8; for (int i = 0; i < numParameters; ++i) { - // logmessage(LOGLVL_DEBUG, "p_%d: %f - %f = %f", i, parameters[i], learningRate * gradient[i], parameters[i] - learningRate * gradient[i]); - parameters[i] -= learningRate * gradient[i] / (getVectorNorm(gradient) + delta); + // logmessage(LOGLVL_DEBUG, "p_%d: %f - %f = %f", i, parameters[i], + // learningRate * gradient[i], parameters[i] - learningRate * + // gradient[i]); + parameters[i] -= + learningRate * gradient[i] / (getVectorNorm(gradient) + delta); } clipToBounds(lowerBounds, upperBounds, parameters); } -void ParameterUpdaterVanilla::initialize([[maybe_unused]] unsigned int numParameters) {} +void ParameterUpdaterVanilla::initialize( + [[maybe_unused]] unsigned int numParameters) {} void ParameterUpdaterVanilla::clearCache() {} void ParameterUpdaterVanilla::undoLastStep() {} -} +} // namespace parpe diff --git a/src/parpeoptimization/multiStartOptimization.cpp b/src/parpeoptimization/multiStartOptimization.cpp index 8580d4492..1e354a138 100644 --- a/src/parpeoptimization/multiStartOptimization.cpp +++ b/src/parpeoptimization/multiStartOptimization.cpp @@ -1,8 +1,8 @@ #include -#include #include #include +#include #include #include @@ -10,19 +10,15 @@ namespace parpe { - MultiStartOptimization::MultiStartOptimization( - MultiStartOptimizationProblem &problem, + MultiStartOptimizationProblem& problem, bool runParallel, int first_start_idx) - : msProblem(problem), - numberOfStarts(problem.getNumberOfStarts()), - restartOnFailure(problem.restartOnFailure()), - runParallel(runParallel), - first_start_idx(first_start_idx) -{ - -} + : msProblem(problem) + , numberOfStarts(problem.getNumberOfStarts()) + , restartOnFailure(problem.restartOnFailure()) + , runParallel(runParallel) + , first_start_idx(first_start_idx) {} void MultiStartOptimization::run() { if (runParallel) @@ -31,20 +27,21 @@ void MultiStartOptimization::run() { runSingleThreaded(); } -void MultiStartOptimization::runMultiThreaded() const -{ +void MultiStartOptimization::runMultiThreaded() const { // Determine thread pool size // (note that hardware_concurrency() may return 0) auto num_threads = std::max(std::thread::hardware_concurrency(), 1U); - if(auto env = std::getenv("PARPE_NUM_PARALLEL_STARTS")) { + if (auto env = std::getenv("PARPE_NUM_PARALLEL_STARTS")) { num_threads = std::stoi(env); } - num_threads = std::min(num_threads, - static_cast(numberOfStarts)); + num_threads = + std::min(num_threads, static_cast(numberOfStarts)); - logmessage(loglevel::debug, - "Running %d starts using %d threads", - numberOfStarts, num_threads); + logmessage( + loglevel::debug, + "Running %d starts using %d threads", + numberOfStarts, + num_threads); boost::asio::thread_pool pool(num_threads); @@ -56,25 +53,23 @@ void MultiStartOptimization::runMultiThreaded() const std::vector>> futures; futures.reserve(numberOfStarts); for (int start_idx = 0; start_idx < numberOfStarts; ++start_idx) { - futures.push_back( - boost::asio::post( - pool, - std::packaged_task()>([this, start_idx] { - return std::make_pair(start_idx, runStart(start_idx)); - }))); + futures.push_back(boost::asio::post( + pool, std::packaged_task()>([this, start_idx] { + return std::make_pair(start_idx, runStart(start_idx)); + }))); ++lastStartIdx; } // Report finished runs and restart if necessary - while ((restartOnFailure && num_successful_starts < numberOfStarts) - || (!restartOnFailure && num_finished_starts < numberOfStarts)) { - for (auto &future: futures) { + while ((restartOnFailure && num_successful_starts < numberOfStarts) || + (!restartOnFailure && num_finished_starts < numberOfStarts)) { + for (auto& future : futures) { // future value might have been retrieved before - if(!future.valid()) { + if (!future.valid()) { continue; } - if(auto status = future.wait_for(std::chrono::milliseconds(1)); + if (auto status = future.wait_for(std::chrono::milliseconds(1)); status != std::future_status::ready) { continue; } @@ -84,29 +79,34 @@ void MultiStartOptimization::runMultiThreaded() const if (retval == 0) { // success - logmessage(loglevel::debug, - "Optimization #%d finished successfully", - start_idx); + logmessage( + loglevel::debug, + "Optimization #%d finished successfully", + start_idx); ++num_successful_starts; } else if (!restartOnFailure) { // failure, no new start - logmessage(loglevel::debug, - "Optimization ms #%d finished " - "unsuccessfully. Not trying " - "new starting point.", - start_idx); + logmessage( + loglevel::debug, + "Optimization ms #%d finished " + "unsuccessfully. Not trying " + "new starting point.", + start_idx); } else { // failure, new start - logmessage(loglevel::debug, - "Thread ms #%d finished unsuccessfully... " - "trying new starting point", start_idx); + logmessage( + loglevel::debug, + "Thread ms #%d finished unsuccessfully... " + "trying new starting point", + start_idx); ++lastStartIdx; future = boost::asio::post( pool, std::packaged_task()>( - [this, start_idx=lastStartIdx] { - return std::make_pair(start_idx, runStart(start_idx)); + [this, start_idx = lastStartIdx] { + return std::make_pair( + start_idx, runStart(start_idx)); })); } } @@ -117,31 +117,34 @@ void MultiStartOptimization::runMultiThreaded() const logmessage(loglevel::debug, "Multi-start optimization finished."); } -void MultiStartOptimization::runSingleThreaded() -{ - logmessage(loglevel::debug, - "Starting runParallelMultiStartOptimization with %d starts sequentially", - numberOfStarts); +void MultiStartOptimization::runSingleThreaded() { + logmessage( + loglevel::debug, + "Starting runParallelMultiStartOptimization with %d starts " + "sequentially", + numberOfStarts); int ms = 0; int numSucceeded = 0; - while(true) { - if(restartOnFailure && numSucceeded == numberOfStarts) + while (true) { + if (restartOnFailure && numSucceeded == numberOfStarts) break; - if(ms == numberOfStarts) + if (ms == numberOfStarts) break; auto result = runStart(ms); - if(result) { - logmessage(loglevel::debug, - "Start #%d finished successfully", ms); + if (result) { + logmessage(loglevel::debug, "Start #%d finished successfully", ms); ++numSucceeded; } else { - logmessage(loglevel::debug, "Start ms #%d finished " - "unsuccessfully.",ms); + logmessage( + loglevel::debug, + "Start ms #%d finished " + "unsuccessfully.", + ms); } ++ms; } @@ -149,15 +152,12 @@ void MultiStartOptimization::runSingleThreaded() logmessage(loglevel::debug, "runParallelMultiStartOptimization finished"); } -void MultiStartOptimization::setRunParallel(bool runParallel) -{ +void MultiStartOptimization::setRunParallel(bool runParallel) { this->runParallel = runParallel; } -int MultiStartOptimization::runStart(int start_idx) const -{ - logmessage(loglevel::debug, - "Starting local optimization #%d", start_idx); +int MultiStartOptimization::runStart(int start_idx) const { + logmessage(loglevel::debug, "Starting local optimization #%d", start_idx); auto problem = msProblem.getLocalProblem(first_start_idx + start_idx); return getLocalOptimum(problem.get()); diff --git a/src/parpeoptimization/optimizationOptions.cpp b/src/parpeoptimization/optimizationOptions.cpp index 70689ed6a..aa76266e6 100644 --- a/src/parpeoptimization/optimizationOptions.cpp +++ b/src/parpeoptimization/optimizationOptions.cpp @@ -30,10 +30,10 @@ #include #include -#include #include -#include +#include #include +#include #include #include @@ -42,20 +42,20 @@ namespace parpe { // Workaround for missing to_string on some systems namespace patch { -template std::string to_string(const T &n) { +template std::string to_string(T const& n) { std::ostringstream stm; stm << n; return stm.str(); } } // namespace patch - -void optimizationOptionsFromAttribute(H5::H5Object & loc, - const H5std_string attr_name, - void *op_data) { +void optimizationOptionsFromAttribute( + H5::H5Object& loc, + H5std_string const attr_name, + void* op_data) { // iterate over attributes and add to OptimizationOptions - auto *o = static_cast(op_data); + auto* o = static_cast(op_data); [[maybe_unused]] auto lock = hdf5MutexGetLock(); @@ -70,7 +70,6 @@ void optimizationOptionsFromAttribute(H5::H5Object & loc, a.read(nativeType, buf); H5Tclose(nativeType); - if (typeClass == H5T_STRING) { // NOTE: only works for (fixed-length?) ASCII strings, no unicode // -> in python use np.string_("bla") @@ -89,11 +88,13 @@ std::unique_ptr OptimizationOptions::createOptimizer() const { return optimizerFactory(optimizer); } -std::unique_ptr OptimizationOptions::fromHDF5(const std::string &fileName) { +std::unique_ptr +OptimizationOptions::fromHDF5(std::string const& fileName) { return fromHDF5(hdf5OpenForReading(fileName)); } -std::unique_ptr OptimizationOptions::fromHDF5(const H5::H5File &file, std::string const& path) { +std::unique_ptr +OptimizationOptions::fromHDF5(const H5::H5File& file, std::string const& path) { auto o = std::make_unique(); auto hdf5path = path.c_str(); @@ -111,28 +112,35 @@ std::unique_ptr OptimizationOptions::fromHDF5(const H5::H5F } if (hdf5AttributeExists(file, hdf5path, "retryOptimization")) { - H5LTget_attribute_int(fileId, hdf5path, "retryOptimization", - &o->retryOptimization); + H5LTget_attribute_int( + fileId, hdf5path, "retryOptimization", &o->retryOptimization); } if (hdf5AttributeExists(file, hdf5path, "hierarchicalOptimization")) { - H5LTget_attribute_int(fileId, hdf5path, "hierarchicalOptimization", - &o->hierarchicalOptimization); + H5LTget_attribute_int( + fileId, + hdf5path, + "hierarchicalOptimization", + &o->hierarchicalOptimization); } if (hdf5AttributeExists(file, hdf5path, "multistartsInParallel")) { - H5LTget_attribute_int(fileId, hdf5path, "multistartsInParallel", - &o->multistartsInParallel); + H5LTget_attribute_int( + fileId, + hdf5path, + "multistartsInParallel", + &o->multistartsInParallel); } if (hdf5AttributeExists(file, hdf5path, "maxIter")) { // this value is overwritten by any optimizer-specific configuration - H5LTget_attribute_int(fileId, hdf5path, "maxIter", &o->maxOptimizerIterations); + H5LTget_attribute_int( + fileId, hdf5path, "maxIter", &o->maxOptimizerIterations); } std::string optimizerPath; - switch(o->optimizer) { + switch (o->optimizer) { case optimizerName::OPTIMIZER_FIDES: optimizerPath = std::string(hdf5path) + "/fides"; break; @@ -150,7 +158,7 @@ std::unique_ptr OptimizationOptions::fromHDF5(const H5::H5F optimizerPath = std::string(hdf5path) + "/ipopt"; } - if(hdf5GroupExists(file, optimizerPath)) { + if (hdf5GroupExists(file, optimizerPath)) { auto group = file.openGroup(optimizerPath); group.iterateAttrs(optimizationOptionsFromAttribute, nullptr, o.get()); } @@ -169,8 +177,8 @@ std::unique_ptr OptimizationOptions::fromHDF5(const H5::H5F * @return The selected starting point or NULL if the dataset did not exist or * had less columns than `ìndex` */ -std::vector OptimizationOptions::getStartingPoint(H5::H5File const& file, - int index) { +std::vector +OptimizationOptions::getStartingPoint(H5::H5File const& file, int index) { std::vector startingPoint; auto path = "/optimizationOptions/randomStarts"; @@ -187,30 +195,39 @@ std::vector OptimizationOptions::getStartingPoint(H5::H5File const& file auto dataset = file.openDataSet(path); // read dimensions auto dataspace = dataset.getSpace(); - const int ndims = dataspace.getSimpleExtentNdims(); + int const ndims = dataspace.getSimpleExtentNdims(); Expects(ndims == 2); hsize_t dims[ndims]; dataspace.getSimpleExtentDims(dims); if (dims[1] < static_cast(index)) { - logmessage(loglevel::debug, - "Requested starting point index %d out of bounds (%d)", - index, static_cast(dims[1])); + logmessage( + loglevel::debug, + "Requested starting point index %d out of bounds (%d)", + index, + static_cast(dims[1])); return startingPoint; } - logmessage(loglevel::info, "Reading random initial theta %d from %s", - index, path); + logmessage( + loglevel::info, + "Reading random initial theta %d from %s", + index, + path); startingPoint.resize(dims[0]); - hdf5Read2DDoubleHyperslab(file, path, dims[0], 1, 0, index, - startingPoint); - + hdf5Read2DDoubleHyperslab( + file, path, dims[0], 1, 0, index, startingPoint); - } catch (H5::Exception const&) { + } catch (H5::Exception const&) { if (H5Eget_num(H5E_DEFAULT)) { - logmessage(loglevel::error, - "Problem in OptimizationOptions::getStartingPoint"); - H5Ewalk2(H5E_DEFAULT, H5E_WALK_DOWNWARD, hdf5ErrorStackWalker_cb, nullptr); + logmessage( + loglevel::error, + "Problem in OptimizationOptions::getStartingPoint"); + H5Ewalk2( + H5E_DEFAULT, + H5E_WALK_DOWNWARD, + hdf5ErrorStackWalker_cb, + nullptr); } } H5_RESTORE_ERROR_HANDLER; @@ -227,49 +244,43 @@ std::string OptimizationOptions::toString() { s += "\n"; for_each( - [](const std::pair pair, - std::string &out) - { - out = out + pair.first + ": " + pair.second + "\n"; - }, s); + [](std::pair const pair, + std::string& out) { + out = out + pair.first + ": " + pair.second + "\n"; + }, + s); return s; } -int OptimizationOptions::getIntOption(std::string const& key) -{ +int OptimizationOptions::getIntOption(std::string const& key) { return std::stoi(options[key]); } -double OptimizationOptions::getDoubleOption(std::string const& key) -{ +double OptimizationOptions::getDoubleOption(std::string const& key) { return std::stod(options[key]); } -std::string OptimizationOptions::getStringOption(const std::string& key) -{ +std::string OptimizationOptions::getStringOption(std::string const& key) { return options[key]; } -void OptimizationOptions::setOption(std::string const& key, int value) -{ +void OptimizationOptions::setOption(std::string const& key, int value) { options[key] = std::to_string(value); } -void OptimizationOptions::setOption(std::string const& key, double value) -{ +void OptimizationOptions::setOption(std::string const& key, double value) { std::ostringstream out; - out << std::setprecision(std::numeric_limits::max_digits10) << value; + out << std::setprecision(std::numeric_limits::max_digits10) + << value; options[key] = out.str(); } -void OptimizationOptions::setOption(const std::string& key, std::string value) -{ +void OptimizationOptions::setOption(std::string const& key, std::string value) { options[key] = std::move(value); } -std::unique_ptr optimizerFactory(optimizerName optimizer) -{ +std::unique_ptr optimizerFactory(optimizerName optimizer) { switch (optimizer) { case optimizerName::OPTIMIZER_FIDES: #ifdef PARPE_ENABLE_FIDES @@ -315,86 +326,84 @@ std::unique_ptr optimizerFactory(optimizerName optimizer) return nullptr; } - -void printAvailableOptimizers(std::string const& prefix) -{ - optimizerName optimizer {optimizerName::OPTIMIZER_IPOPT}; +void printAvailableOptimizers(std::string const& prefix) { + optimizerName optimizer{optimizerName::OPTIMIZER_IPOPT}; // Note: Keep fall-through switch statement, so compiler will warn us about // any addition to optimizerName switch (optimizer) { case optimizerName::OPTIMIZER_FIDES: #ifdef PARPE_ENABLE_FIDES - std::cout<(optimizerName::OPTIMIZER_FIDES) - <<" enabled\n"; + std::cout << prefix << std::left << std::setw(22) << "OPTIMIZER_FIDES\t" + << static_cast(optimizerName::OPTIMIZER_FIDES) + << " enabled\n"; #else - std::cout<(optimizerName::OPTIMIZER_FIDES) - <<" disabled\n"; + std::cout << prefix << std::left << std::setw(22) << "OPTIMIZER_FIDES" + << static_cast(optimizerName::OPTIMIZER_FIDES) + << " disabled\n"; #endif [[fallthrough]]; case optimizerName::OPTIMIZER_IPOPT: #ifdef PARPE_ENABLE_IPOPT - std::cout<(optimizerName::OPTIMIZER_IPOPT) - <<" enabled\n"; + std::cout << prefix << std::left << std::setw(22) << "OPTIMIZER_IPOPT\t" + << static_cast(optimizerName::OPTIMIZER_IPOPT) + << " enabled\n"; #else - std::cout<(optimizerName::OPTIMIZER_IPOPT) - <<" disabled\n"; + std::cout << prefix << std::left << std::setw(22) << "OPTIMIZER_IPOPT" + << static_cast(optimizerName::OPTIMIZER_IPOPT) + << " disabled\n"; #endif [[fallthrough]]; case optimizerName::OPTIMIZER_CERES: #ifdef PARPE_ENABLE_CERES - std::cout<(optimizerName::OPTIMIZER_CERES) - <<" enabled\n"; + std::cout << prefix << std::left << std::setw(22) << "OPTIMIZER_CERES" + << static_cast(optimizerName::OPTIMIZER_CERES) + << " enabled\n"; #else - std::cout<(optimizerName::OPTIMIZER_CERES) - <<" disabled\n"; + std::cout << prefix << std::left << std::setw(22) << "OPTIMIZER_CERES" + << static_cast(optimizerName::OPTIMIZER_CERES) + << " disabled\n"; #endif [[fallthrough]]; case optimizerName::OPTIMIZER_DLIB: #ifdef PARPE_ENABLE_DLIB - std::cout<(optimizerName::OPTIMIZER_DLIB) - <<" enabled\n"; + std::cout << prefix << std::left << std::setw(22) << "OPTIMIZER_DLIB" + << static_cast(optimizerName::OPTIMIZER_DLIB) + << " enabled\n"; #else - std::cout<(optimizerName::OPTIMIZER_DLIB) - <<" disabled\n"; + std::cout << prefix << std::left << std::setw(22) << "OPTIMIZER_DLIB" + << static_cast(optimizerName::OPTIMIZER_DLIB) + << " disabled\n"; #endif [[fallthrough]]; case optimizerName::OPTIMIZER_TOMS611: #ifdef PARPE_ENABLE_TOMS611 - std::cout<(optimizerName::OPTIMIZER_TOMS611) - <<" enabled\n"; + std::cout << prefix << std::left << std::setw(22) << "OPTIMIZER_TOMS611" + << static_cast(optimizerName::OPTIMIZER_TOMS611) + << " enabled\n"; #else - std::cout<(optimizerName::OPTIMIZER_TOMS611) - <<" disabled\n"; + std::cout << prefix << std::left << std::setw(22) << "OPTIMIZER_TOMS611" + << static_cast(optimizerName::OPTIMIZER_TOMS611) + << " disabled\n"; #endif [[fallthrough]]; case optimizerName::OPTIMIZER_FSQP: #ifdef PARPE_ENABLE_FSQP - std::cout<(optimizerName::OPTIMIZER_FSQP) - <<" enabled\n"; + std::cout << prefix << std::left << std::setw(22) << "OPTIMIZER_FSQP" + << static_cast(optimizerName::OPTIMIZER_FSQP) + << " enabled\n"; #else - std::cout<(optimizerName::OPTIMIZER_FSQP) - <<" disabled\n"; + std::cout << prefix << std::left << std::setw(22) << "OPTIMIZER_FSQP" + << static_cast(optimizerName::OPTIMIZER_FSQP) + << " disabled\n"; #endif [[fallthrough]]; case optimizerName::OPTIMIZER_MINIBATCH_1: - std::cout<(optimizerName::OPTIMIZER_MINIBATCH_1) - <<" enabled\n"; + std::cout << prefix << std::left << std::setw(22) + << "OPTIMIZER_MINIBATCH_1" + << static_cast(optimizerName::OPTIMIZER_MINIBATCH_1) + << " enabled\n"; } } - } // namespace parpe diff --git a/src/parpeoptimization/optimizationProblem.cpp b/src/parpeoptimization/optimizationProblem.cpp index dae0d9e56..fa8300212 100644 --- a/src/parpeoptimization/optimizationProblem.cpp +++ b/src/parpeoptimization/optimizationProblem.cpp @@ -3,55 +3,53 @@ #include #include #include -#include -#include #include +#include #include +#include +#include #include #include #include #include -#include +#include #include #include -#include #include namespace parpe { - -int getLocalOptimum(OptimizationProblem *problem) { +int getLocalOptimum(OptimizationProblem* problem) { // TODO how to make this nicer? minibatchOptimizer should not inherit // from Optimizer since they have different interfaces, so we can not // use the same factory method auto options = problem->getOptimizationOptions(); if (options.optimizer == optimizerName::OPTIMIZER_MINIBATCH_1) { auto minibatchProblem = - dynamic_cast*>(problem); + dynamic_cast*>(problem); if (!minibatchProblem) throw ParPEException("Minibatch optimizer selected but given " "optimization problem cannot be solved by " "minibatch optimizer"); auto status = runMinibatchOptimization(minibatchProblem); - return std::get < 0 > (status); + return std::get<0>(status); } - auto optimizer = std::unique_ptr < Optimizer > ( - problem->getOptimizationOptions().createOptimizer()); + auto optimizer = std::unique_ptr( + problem->getOptimizationOptions().createOptimizer()); if (!optimizer) throw ParPEException("Invalid optimizer selected. Did you compile " "parPE with support for the selected optimizer?"); auto status = optimizer->optimize(problem); - return std::get < 0 > (status); + return std::get<0>(status); } - -void optimizationProblemGradientCheckMultiEps(OptimizationProblem *problem, - int numParameterIndicesToCheck - ) { +void optimizationProblemGradientCheckMultiEps( + OptimizationProblem* problem, + int numParameterIndicesToCheck) { // set eps - std::vector multi_eps {1e-1, 1e-3, 1e-5, 1e-7, 1e-9}; + std::vector multi_eps{1e-1, 1e-3, 1e-5, 1e-7, 1e-9}; // setting the number of parameters to the minimum of // numParamaterIndicesToCheck and dimension of the problem @@ -64,23 +62,25 @@ void optimizationProblemGradientCheckMultiEps(OptimizationProblem *problem, std::random_device rd; std::mt19937 g(rd()); - optimizationProblemGradientCheckMultiEps(problem, parameterIndices, multi_eps); + optimizationProblemGradientCheckMultiEps( + problem, parameterIndices, multi_eps); } -void optimizationProblemGradientCheckMultiEps(OptimizationProblem *problem, - gsl::span parameterIndices, - gsl::span multi_eps){ - //get a random theta +void optimizationProblemGradientCheckMultiEps( + OptimizationProblem* problem, + gsl::span parameterIndices, + gsl::span multi_eps) { + // get a random theta std::vector theta(problem->cost_fun_->numParameters()); problem->fillInitialParameters(theta); double fc = 0; // f(theta) - //evaluate the objective function at theta and get analytical gradient + // evaluate the objective function at theta and get analytical gradient std::vector gradient(theta.size()); problem->cost_fun_->evaluate(theta, fc, gradient); auto parameter_ids = problem->cost_fun_->getParameterIds(); std::vector thetaTmp(theta); - for(int curInd : parameterIndices) { + for (int curInd : parameterIndices) { double eps_best = 0.0; double regRelErr_best = 0.0; double absErr_best = 0.0; @@ -89,20 +89,20 @@ void optimizationProblemGradientCheckMultiEps(OptimizationProblem *problem, // getting the analytical gradient of current index double curGrad = gradient[curInd]; - for(double epsilon : multi_eps) { + for (double epsilon : multi_eps) { double fb = 0.0; // f(theta - eps) double ff = 0.0; // f(theta + eps) thetaTmp[curInd] = theta[curInd] + epsilon; - problem->cost_fun_->evaluate(gsl::span(thetaTmp), ff, - gsl::span()); + problem->cost_fun_->evaluate( + gsl::span(thetaTmp), ff, gsl::span()); thetaTmp[curInd] = theta[curInd] - epsilon; - problem->cost_fun_->evaluate(gsl::span(thetaTmp), fb, - gsl::span()); + problem->cost_fun_->evaluate( + gsl::span(thetaTmp), fb, gsl::span()); // calculating the finite difference double fd_c = (ff - fb) / (2 * epsilon); - //reverting thetaTmp back to original + // reverting thetaTmp back to original thetaTmp[curInd] = theta[curInd]; double abs_err = std::fabs(curGrad - fd_c); @@ -110,7 +110,7 @@ void optimizationProblemGradientCheckMultiEps(OptimizationProblem *problem, // comparing results with current best epsilon // if better, replace. Also replace if no eps currently saved. - if((regRelError < regRelErr_best) || (regRelErr_best == 0)){ + if ((regRelError < regRelErr_best) || (regRelErr_best == 0)) { eps_best = epsilon; regRelErr_best = regRelError; fd_c_best = fd_c; @@ -122,34 +122,41 @@ void optimizationProblemGradientCheckMultiEps(OptimizationProblem *problem, ll = loglevel::warning; if (std::isnan(curGrad) || fabs(regRelErr_best) > 1e-2) ll = loglevel::error; - logmessage(ll, "%-25s (%d) g: %12.6g fd_c: %12.6g |Δ/fd_c|: %.6e " - "|Δ|: %12.6g ϵ: %12.6g ", - parameter_ids.empty()?"":parameter_ids[curInd].c_str(), - curInd, curGrad, fd_c_best, - regRelErr_best, absErr_best, eps_best); + logmessage( + ll, + "%-25s (%d) g: %12.6g fd_c: %12.6g |Δ/fd_c|: %.6e " + "|Δ|: %12.6g ϵ: %12.6g ", + parameter_ids.empty() ? "" : parameter_ids[curInd].c_str(), + curInd, + curGrad, + fd_c_best, + regRelErr_best, + absErr_best, + eps_best); } } -void optimizationProblemGradientCheck(OptimizationProblem *problem, - int numParameterIndicesToCheck, - double epsilon) { +void optimizationProblemGradientCheck( + OptimizationProblem* problem, + int numParameterIndicesToCheck, + double epsilon) { int numParameters = problem->cost_fun_->numParameters(); numParameterIndicesToCheck = - std::min(numParameterIndicesToCheck, numParameters); + std::min(numParameterIndicesToCheck, numParameters); // choose random parameters to check std::vector parameterIndices(numParameterIndicesToCheck); std::iota(parameterIndices.begin(), parameterIndices.end(), 0); std::random_device rd; std::mt19937 g(rd()); - //std::shuffle(parameterIndices.begin(), parameterIndices.end(), g); + // std::shuffle(parameterIndices.begin(), parameterIndices.end(), g); optimizationProblemGradientCheck(problem, parameterIndices, epsilon); } - -void optimizationProblemGradientCheck(OptimizationProblem *problem, - gsl::span parameterIndices, - double epsilon) { +void optimizationProblemGradientCheck( + OptimizationProblem* problem, + gsl::span parameterIndices, + double epsilon) { double fc = 0; // f(theta) std::vector theta(problem->cost_fun_->numParameters()); problem->fillInitialParameters(theta); @@ -165,12 +172,12 @@ void optimizationProblemGradientCheck(OptimizationProblem *problem, double fb = 0, ff = 0; // f(theta + eps) , f(theta - eps) thetaTmp[curInd] = theta[curInd] + epsilon; - problem->cost_fun_->evaluate(gsl::span(thetaTmp), ff, - gsl::span()); + problem->cost_fun_->evaluate( + gsl::span(thetaTmp), ff, gsl::span()); thetaTmp[curInd] = theta[curInd] - epsilon; - problem->cost_fun_->evaluate(gsl::span(thetaTmp), fb, - gsl::span()); + problem->cost_fun_->evaluate( + gsl::span(thetaTmp), fb, gsl::span()); // double fd_f = (ff - fc) / epsilon; @@ -191,7 +198,8 @@ void optimizationProblemGradientCheck(OptimizationProblem *problem, // || (curGrad <= fd_f && curGrad >= fd_b))) // status[0] = '!'; - // printf("%5d%s g: %12.6g fd_f: %12.6g (Δ%12.6g) fd_c: %12.6g (Δ%12.6g) fd_b: %12.6g (Δ%12.6g)", + // printf("%5d%s g: %12.6g fd_f: %12.6g (Δ%12.6g) fd_c: %12.6g + // (Δ%12.6g) fd_b: %12.6g (Δ%12.6g)", // curInd, status, curGrad, // fd_f, curGrad - fd_f, // fd_c, curGrad - fd_c, @@ -206,61 +214,72 @@ void optimizationProblemGradientCheck(OptimizationProblem *problem, if (fabs(regRelError) > 1e-2) ll = loglevel::error; - logmessage(ll, "%5d g: %12.6g fd_c: %12.6g Δ/ff: %.6e f: %12.6g", - curInd, curGrad, fd_c, regRelError, ff); + logmessage( + ll, + "%5d g: %12.6g fd_c: %12.6g Δ/ff: %.6e f: %12.6g", + curInd, + curGrad, + fd_c, + regRelError, + ff); } } OptimizationProblem::OptimizationProblem( - std::unique_ptr costFun, - std::unique_ptr logger) - : cost_fun_(std::move(costFun)), logger_(std::move(logger)) { - -} + std::unique_ptr costFun, + std::unique_ptr logger) + : cost_fun_(std::move(costFun)) + , logger_(std::move(logger)) {} -const OptimizationOptions &OptimizationProblem::getOptimizationOptions() const { +OptimizationOptions const& OptimizationProblem::getOptimizationOptions() const { return optimization_options_; } -void OptimizationProblem::setOptimizationOptions(const OptimizationOptions &options) { +void OptimizationProblem::setOptimizationOptions( + OptimizationOptions const& options) { optimization_options_ = options; } std::unique_ptr OptimizationProblem::getReporter() const { - return std::make_unique < OptimizationReporter > ( - cost_fun_.get(), std::make_unique < Logger > (*logger_)); + return std::make_unique( + cost_fun_.get(), std::make_unique(*logger_)); } -void OptimizationProblem::fillInitialParameters(gsl::span buffer) const { +void OptimizationProblem::fillInitialParameters( + gsl::span buffer) const { int numParameters = cost_fun_->numParameters(); std::vector parametersMin(numParameters); std::vector parametersMax(numParameters); fillParametersMin(parametersMin); fillParametersMax(parametersMax); - fillArrayRandomDoubleIndividualInterval(parametersMin, parametersMax, - buffer); + fillArrayRandomDoubleIndividualInterval( + parametersMin, parametersMax, buffer); } -OptimizationReporter::OptimizationReporter(GradientFunction *gradFun, - std::unique_ptr logger) : - OptimizationReporter(gradFun, nullptr, std::move(logger)) { +OptimizationReporter::OptimizationReporter( + GradientFunction* gradFun, + std::unique_ptr logger) + : OptimizationReporter(gradFun, nullptr, std::move(logger)) { default_logger_prefix_ = this->logger_->getPrefix(); } -OptimizationReporter::OptimizationReporter(GradientFunction *gradFun, - std::unique_ptr rw, - std::unique_ptr logger) : - result_writer_(std::move(rw)), logger_(std::move(logger)) { +OptimizationReporter::OptimizationReporter( + GradientFunction* gradFun, + std::unique_ptr rw, + std::unique_ptr logger) + : result_writer_(std::move(rw)) + , logger_(std::move(logger)) { setGradientFunction(gradFun); default_logger_prefix_ = this->logger_->getPrefix(); } -FunctionEvaluationStatus OptimizationReporter::evaluate(gsl::span parameters, - double &fval, - gsl::span gradient, - Logger *logger, - double *cpuTime) const { +FunctionEvaluationStatus OptimizationReporter::evaluate( + gsl::span parameters, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime) const { double functionCpuSec = 0.0; if (cpuTime) *cpuTime = 0.0; @@ -269,27 +288,36 @@ FunctionEvaluationStatus OptimizationReporter::evaluate(gsl::span return functionEvaluationFailure; if (gradient.data()) { - if (!have_cached_gradient_ - || !std::equal(parameters.begin(), parameters.end(), - cached_parameters_.begin())) { + if (!have_cached_gradient_ || !std::equal( + parameters.begin(), + parameters.end(), + cached_parameters_.begin())) { // Have to compute anew cached_status_ = grad_fun_->evaluate( - parameters, cached_cost_, cached_gradient_, - logger ? logger : this->logger_.get(), &functionCpuSec); + parameters, + cached_cost_, + cached_gradient_, + logger ? logger : this->logger_.get(), + &functionCpuSec); have_cached_cost_ = true; have_cached_gradient_ = true; } // recycle old result - std::copy(cached_gradient_.begin(), cached_gradient_.end(), gradient.begin()); + std::copy( + cached_gradient_.begin(), cached_gradient_.end(), gradient.begin()); fval = cached_cost_; } else { - if (!have_cached_cost_ - || !std::equal(parameters.begin(), parameters.end(), - cached_parameters_.begin())) { + if (!have_cached_cost_ || !std::equal( + parameters.begin(), + parameters.end(), + cached_parameters_.begin())) { // Have to compute anew cached_status_ = grad_fun_->evaluate( - parameters, cached_cost_, gsl::span(), - logger ? logger : this->logger_.get(), &functionCpuSec); + parameters, + cached_cost_, + gsl::span(), + logger ? logger : this->logger_.get(), + &functionCpuSec); have_cached_cost_ = true; have_cached_gradient_ = false; } @@ -306,8 +334,9 @@ FunctionEvaluationStatus OptimizationReporter::evaluate(gsl::span *cpuTime = functionCpuSec; if (afterCostFunctionCall( - parameters, cached_cost_, - gradient.data() ? cached_gradient_ : gsl::span()) != 0) + parameters, + cached_cost_, + gradient.data() ? cached_gradient_ : gsl::span()) != 0) return functionEvaluationFailure; return cached_status_; @@ -319,10 +348,12 @@ int OptimizationReporter::numParameters() const { void OptimizationReporter::printObjectiveFunctionFailureMessage() const { if (logger_) - logger_->logmessage(loglevel::error, "Objective function evaluation failed!"); + logger_->logmessage( + loglevel::error, "Objective function evaluation failed!"); } -bool OptimizationReporter::starting(gsl::span initialParameters) const { +bool OptimizationReporter::starting( + gsl::span initialParameters) const { // If this is called multiple times (happens via IpOpt), don't do anything if (started_) return false; @@ -334,49 +365,64 @@ bool OptimizationReporter::starting(gsl::span initialParameters) c started_ = true; - logger_->setPrefix(default_logger_prefix_ + "i" + std::to_string(num_iterations_)); + logger_->setPrefix( + default_logger_prefix_ + "i" + std::to_string(num_iterations_)); return false; } -bool OptimizationReporter::iterationFinished(gsl::span parameters, - double objectiveFunctionValue, - gsl::span objectiveFunctionGradient) const { +bool OptimizationReporter::iterationFinished( + gsl::span parameters, + double objectiveFunctionValue, + gsl::span objectiveFunctionGradient) const { double wallTimeIter = wall_timer_.getRound(); double wallTimeOptim = wall_timer_.getTotal(); if (logger_) - logger_->logmessage(loglevel::info, - "iter: %d cost: %g time_iter: wall: %gs cpu: %gs time_optim: wall: %gs cpu: %gs", - num_iterations_, objectiveFunctionValue, wallTimeIter, - cpu_time_iteration_sec_, wallTimeOptim, - cpu_time_total_sec_); + logger_->logmessage( + loglevel::info, + "iter: %d cost: %g time_iter: wall: %gs cpu: %gs time_optim: wall: " + "%gs cpu: %gs", + num_iterations_, + objectiveFunctionValue, + wallTimeIter, + cpu_time_iteration_sec_, + wallTimeOptim, + cpu_time_total_sec_); if (result_writer_) result_writer_->logOptimizerIteration( - num_iterations_, parameters.empty() ? cached_parameters_ : parameters, objectiveFunctionValue, - // This might be misleading, the gradient could evaluated at other parameters if there was a line search inbetween - objectiveFunctionGradient.empty() ? cached_gradient_ : objectiveFunctionGradient, wallTimeIter, - cpu_time_iteration_sec_); + num_iterations_, + parameters.empty() ? cached_parameters_ : parameters, + objectiveFunctionValue, + // This might be misleading, the gradient could evaluated at other + // parameters if there was a line search inbetween + objectiveFunctionGradient.empty() ? cached_gradient_ + : objectiveFunctionGradient, + wallTimeIter, + cpu_time_iteration_sec_); ++num_iterations_; - logger_->setPrefix(default_logger_prefix_ + "i" + std::to_string(num_iterations_)); + logger_->setPrefix( + default_logger_prefix_ + "i" + std::to_string(num_iterations_)); cpu_time_iteration_sec_ = 0.0; return false; } -bool OptimizationReporter::beforeCostFunctionCall(gsl::span /*parameters*/) const { +bool OptimizationReporter::beforeCostFunctionCall( + gsl::span /*parameters*/) const { ++num_function_calls_; - //timeCostEvaluationBegin = clock(); + // timeCostEvaluationBegin = clock(); return false; } -bool OptimizationReporter::afterCostFunctionCall(gsl::span parameters, - double objectiveFunctionValue, - gsl::span objectiveFunctionGradient) const { +bool OptimizationReporter::afterCostFunctionCall( + gsl::span parameters, + double objectiveFunctionValue, + gsl::span objectiveFunctionGradient) const { double wallTime = wall_timer_.getTotal(); if (!std::isfinite(objectiveFunctionValue)) @@ -384,83 +430,106 @@ bool OptimizationReporter::afterCostFunctionCall(gsl::span paramet if (result_writer_) { result_writer_->logObjectiveFunctionEvaluation( - parameters, objectiveFunctionValue, - objectiveFunctionGradient, num_iterations_, num_function_calls_, - wallTime); + parameters, + objectiveFunctionValue, + objectiveFunctionGradient, + num_iterations_, + num_function_calls_, + wallTime); } return false; } -void OptimizationReporter::finished(double optimalCost, - gsl::span parameters, - int exitStatus) const { +void OptimizationReporter::finished( + double optimalCost, + gsl::span parameters, + int exitStatus) const { double timeElapsed = wall_timer_.getTotal(); - if ((optimalCost <= cached_cost_ || cached_parameters_.empty()) && !parameters.empty()) { + if ((optimalCost <= cached_cost_ || cached_parameters_.empty()) && + !parameters.empty()) { cached_cost_ = optimalCost; cached_parameters_.assign(parameters.begin(), parameters.end()); } else if (cached_cost_ > optimalCost && parameters.empty()) { - // the optimal value is not from the cached parameters and we did not get - // the optimal parameters from the optimizer. since we don't know them, rather set to nan + // the optimal value is not from the cached parameters and we did not + // get the optimal parameters from the optimizer. since we don't know + // them, rather set to nan if (logger_) - logger_->logmessage(loglevel::info, "cachedCost != optimalCost && parameters.empty()"); - cached_parameters_.assign(cached_parameters_.size(), std::numeric_limits::quiet_NaN()); + logger_->logmessage( + loglevel::info, + "cachedCost != optimalCost && parameters.empty()"); + cached_parameters_.assign( + cached_parameters_.size(), + std::numeric_limits::quiet_NaN()); cached_cost_ = optimalCost; } // else: our cached parameters were better. use those if (logger_) - logger_->logmessage(loglevel::info, "Optimizer status %d, final llh: %e, " - "time: wall: %f cpu: %f.", exitStatus, - cached_cost_, timeElapsed, cpu_time_total_sec_); + logger_->logmessage( + loglevel::info, + "Optimizer status %d, final llh: %e, " + "time: wall: %f cpu: %f.", + exitStatus, + cached_cost_, + timeElapsed, + cpu_time_total_sec_); if (result_writer_) - result_writer_->saveOptimizerResults(cached_cost_, cached_parameters_, - timeElapsed, cpu_time_total_sec_, - exitStatus); + result_writer_->saveOptimizerResults( + cached_cost_, + cached_parameters_, + timeElapsed, + cpu_time_total_sec_, + exitStatus); } -double OptimizationReporter::getFinalCost() const { - return cached_cost_; -} +double OptimizationReporter::getFinalCost() const { return cached_cost_; } -const std::vector &OptimizationReporter::getFinalParameters() const { +std::vector const& OptimizationReporter::getFinalParameters() const { return cached_parameters_; } -void OptimizationReporter::setGradientFunction(GradientFunction *gradFun) const { +void OptimizationReporter::setGradientFunction( + GradientFunction* gradFun) const { this->grad_fun_ = gradFun; num_parameters_ = gradFun->numParameters(); cached_gradient_.resize(num_parameters_); } -std::vector OptimizationReporter::getParameterIds() const -{ +std::vector OptimizationReporter::getParameterIds() const { return grad_fun_->getParameterIds(); } -void OptimizationProblemImpl::fillParametersMin(gsl::span buffer) const { +void OptimizationProblemImpl::fillParametersMin( + gsl::span buffer) const { std::copy(parametersMin.begin(), parametersMin.end(), buffer.begin()); } -void OptimizationProblemImpl::fillParametersMax(gsl::span buffer) const { +void OptimizationProblemImpl::fillParametersMax( + gsl::span buffer) const { std::copy(parametersMax.begin(), parametersMax.end(), buffer.begin()); } -void OptimizationProblemImpl::setParametersMin(std::vector parametersMin) { +void OptimizationProblemImpl::setParametersMin( + std::vector parametersMin) { this->parametersMin = std::move(parametersMin); } -void OptimizationProblemImpl::setParametersMax(std::vector parametersMax) { +void OptimizationProblemImpl::setParametersMax( + std::vector parametersMax) { this->parametersMax = std::move(parametersMax); } -void OptimizationProblemImpl::setInitialParameters(std::vector initial) { +void OptimizationProblemImpl::setInitialParameters( + std::vector initial) { parametersStart = std::move(initial); } -void OptimizationProblemImpl::fillInitialParameters(gsl::span buffer) const { +void OptimizationProblemImpl::fillInitialParameters( + gsl::span buffer) const { if (!parametersStart.empty()) { - std::copy(parametersStart.begin(), parametersStart.end(), buffer.begin()); + std::copy( + parametersStart.begin(), parametersStart.end(), buffer.begin()); } else { OptimizationProblem::fillInitialParameters(buffer); } diff --git a/src/parpeoptimization/optimizationResultWriter.cpp b/src/parpeoptimization/optimizationResultWriter.cpp index 97fb10ba1..e09cced35 100644 --- a/src/parpeoptimization/optimizationResultWriter.cpp +++ b/src/parpeoptimization/optimizationResultWriter.cpp @@ -1,31 +1,33 @@ #include -#include #include -#include #include +#include +#include #include -#include #include +#include #include namespace parpe { -OptimizationResultWriter::OptimizationResultWriter(const H5::H5File& file, - std::string rootPath) : - rootPath(std::move(rootPath)) { +OptimizationResultWriter::OptimizationResultWriter( + const H5::H5File& file, + std::string rootPath) + : rootPath(std::move(rootPath)) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); this->file = file; hdf5EnsureGroupExists(file, this->rootPath); } -OptimizationResultWriter::OptimizationResultWriter(const std::string &filename, - bool overwrite, - std::string rootPath) : - rootPath(std::move(rootPath)) { +OptimizationResultWriter::OptimizationResultWriter( + std::string const& filename, + bool overwrite, + std::string rootPath) + : rootPath(std::move(rootPath)) { logmessage(loglevel::debug, "Writing results to %s.", filename.c_str()); file = hdf5CreateFile(filename, overwrite); @@ -34,7 +36,7 @@ OptimizationResultWriter::OptimizationResultWriter(const std::string &filename, } OptimizationResultWriter::OptimizationResultWriter( - const OptimizationResultWriter &other) + OptimizationResultWriter const& other) : rootPath(other.rootPath) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); file = other.file; @@ -46,7 +48,7 @@ OptimizationResultWriter::~OptimizationResultWriter() { file.close(); } -const std::string &OptimizationResultWriter::getRootPath() const { +std::string const& OptimizationResultWriter::getRootPath() const { return rootPath; } @@ -58,126 +60,134 @@ std::string OptimizationResultWriter::getIterationPath(int iterationIdx) const { } void OptimizationResultWriter::logObjectiveFunctionEvaluation( - gsl::span parameters, - double objectiveFunctionValue, - gsl::span objectiveFunctionGradient, - int numIterations, - int numFunctionCalls, - double timeElapsedInSeconds) -{ + gsl::span parameters, + double objectiveFunctionValue, + gsl::span objectiveFunctionGradient, + int numIterations, + int numFunctionCalls, + double timeElapsedInSeconds) { [[maybe_unused]] auto lock = hdf5MutexGetLock(); std::string pathStr = getIterationPath(numIterations); - const char *fullGroupPath = pathStr.c_str(); + char const* fullGroupPath = pathStr.c_str(); hdf5CreateOrExtendAndWriteToDouble2DArray( - file, fullGroupPath, "costFunCost", - gsl::make_span(&objectiveFunctionValue, 1)); + file, + fullGroupPath, + "costFunCost", + gsl::make_span(&objectiveFunctionValue, 1)); if (logGradientEachFunctionEvaluation) { if (!objectiveFunctionGradient.empty()) { hdf5CreateOrExtendAndWriteToDouble2DArray( - file, fullGroupPath, "costFunGradient", - objectiveFunctionGradient); + file, + fullGroupPath, + "costFunGradient", + objectiveFunctionGradient); } else if (!parameters.empty()) { double dummyGradient[parameters.size()]; std::fill_n(dummyGradient, parameters.size(), NAN); hdf5CreateOrExtendAndWriteToDouble2DArray( - file, fullGroupPath, "costFunGradient", - gsl::make_span(dummyGradient, - parameters.size())); + file, + fullGroupPath, + "costFunGradient", + gsl::make_span(dummyGradient, parameters.size())); } } if (logParametersEachFunctionEvaluation) if (!parameters.empty()) hdf5CreateOrExtendAndWriteToDouble2DArray( - file, fullGroupPath, "costFunParameters", - parameters); + file, fullGroupPath, "costFunParameters", parameters); hdf5CreateOrExtendAndWriteToDouble2DArray( - file, fullGroupPath, "costFunWallTimeInSec", - gsl::make_span(&timeElapsedInSeconds, 1)); + file, + fullGroupPath, + "costFunWallTimeInSec", + gsl::make_span(&timeElapsedInSeconds, 1)); hdf5CreateOrExtendAndWriteToInt2DArray( - file, fullGroupPath, "costFunCallIndex", - gsl::make_span(&numFunctionCalls, 1)); + file, + fullGroupPath, + "costFunCallIndex", + gsl::make_span(&numFunctionCalls, 1)); flushResultWriter(); } void OptimizationResultWriter::logOptimizerIteration( - int numIterations, - gsl::span parameters, - double objectiveFunctionValue, - gsl::span gradient, - double wallSeconds, - double cpuSeconds) -{ + int numIterations, + gsl::span parameters, + double objectiveFunctionValue, + gsl::span gradient, + double wallSeconds, + double cpuSeconds) { std::string const& pathStr = getRootPath(); - const char *fullGroupPath = pathStr.c_str(); + char const* fullGroupPath = pathStr.c_str(); [[maybe_unused]] auto lock = hdf5MutexGetLock(); hdf5CreateOrExtendAndWriteToDouble2DArray( - file, fullGroupPath, "iterCostFunCost", - gsl::make_span(&objectiveFunctionValue, 1)); + file, + fullGroupPath, + "iterCostFunCost", + gsl::make_span(&objectiveFunctionValue, 1)); if (logGradientEachIteration) { if (!gradient.empty()) { hdf5CreateOrExtendAndWriteToDouble2DArray( - file, fullGroupPath, "iterCostFunGradient", - gradient); + file, fullGroupPath, "iterCostFunGradient", gradient); } else if (!parameters.empty()) { std::vector nanGradient(parameters.size(), NAN); hdf5CreateOrExtendAndWriteToDouble2DArray( - file, fullGroupPath, "iterCostFunGradient", - nanGradient); + file, fullGroupPath, "iterCostFunGradient", nanGradient); } } if (!parameters.empty()) { hdf5CreateOrExtendAndWriteToDouble2DArray( - file, fullGroupPath, "iterCostFunParameters", - parameters); + file, fullGroupPath, "iterCostFunParameters", parameters); } hdf5CreateOrExtendAndWriteToDouble2DArray( - file, fullGroupPath, "iterCostFunWallSec", - gsl::make_span(&wallSeconds, 1)); + file, + fullGroupPath, + "iterCostFunWallSec", + gsl::make_span(&wallSeconds, 1)); hdf5CreateOrExtendAndWriteToDouble2DArray( - file, fullGroupPath, "iterCostFunCpuSec", - gsl::make_span(&cpuSeconds, 1)); + file, + fullGroupPath, + "iterCostFunCpuSec", + gsl::make_span(&cpuSeconds, 1)); hdf5CreateOrExtendAndWriteToInt2DArray( - file, fullGroupPath, "iterIndex", - gsl::make_span(&numIterations, 1)); + file, + fullGroupPath, + "iterIndex", + gsl::make_span(&numIterations, 1)); flushResultWriter(); } - void OptimizationResultWriter::setLoggingEachIteration(bool logGradient) { logGradientEachIteration = logGradient; } void OptimizationResultWriter::setLoggingEachFunctionEvaluation( - bool logGradient, - bool logParameters) { + bool logGradient, + bool logParameters) { logGradientEachFunctionEvaluation = logGradient; logParametersEachFunctionEvaluation = logParameters; } - void OptimizationResultWriter::starting( - gsl::span initialParameters) { + gsl::span initialParameters) { if (!initialParameters.empty()) { - const auto& root_path = getRootPath(); + auto const& root_path = getRootPath(); hdf5CreateOrExtendAndWriteToDouble2DArray( - file, root_path, "initialParameters", - initialParameters); + file, root_path, "initialParameters", initialParameters); flushResultWriter(); } } @@ -189,53 +199,76 @@ void OptimizationResultWriter::flushResultWriter() const { } void OptimizationResultWriter::saveOptimizerResults( - double finalNegLogLikelihood, - gsl::span optimalParameters, - double wallSec, - double cpuSec, - int exitStatus) const { + double finalNegLogLikelihood, + gsl::span optimalParameters, + double wallSec, + double cpuSec, + int exitStatus) const { std::string const& optimPath = getRootPath(); hdf5EnsureGroupExists(file, optimPath); std::string fullGroupPath; - hsize_t dimensions[1] = { 1 }; + hsize_t dimensions[1] = {1}; [[maybe_unused]] auto lock = hdf5MutexGetLock(); fullGroupPath = (optimPath + "/finalCost"); - H5LTmake_dataset(file.getId(), fullGroupPath.c_str(), 1, dimensions, - H5T_NATIVE_DOUBLE, &finalNegLogLikelihood); + H5LTmake_dataset( + file.getId(), + fullGroupPath.c_str(), + 1, + dimensions, + H5T_NATIVE_DOUBLE, + &finalNegLogLikelihood); fullGroupPath = (optimPath + "/wallSec"); - H5LTmake_dataset(file.getId(), fullGroupPath.c_str(), 1, dimensions, - H5T_NATIVE_DOUBLE, &wallSec); + H5LTmake_dataset( + file.getId(), + fullGroupPath.c_str(), + 1, + dimensions, + H5T_NATIVE_DOUBLE, + &wallSec); fullGroupPath = (optimPath + "/cpuSec"); - H5LTmake_dataset(file.getId(), fullGroupPath.c_str(), 1, dimensions, - H5T_NATIVE_DOUBLE, &cpuSec); + H5LTmake_dataset( + file.getId(), + fullGroupPath.c_str(), + 1, + dimensions, + H5T_NATIVE_DOUBLE, + &cpuSec); fullGroupPath = (optimPath + "/exitStatus"); - H5LTmake_dataset(file.getId(), fullGroupPath.c_str(), 1, dimensions, - H5T_NATIVE_INT, &exitStatus); + H5LTmake_dataset( + file.getId(), + fullGroupPath.c_str(), + 1, + dimensions, + H5T_NATIVE_INT, + &exitStatus); if (!optimalParameters.empty()) { fullGroupPath = (optimPath + "/finalParameters"); dimensions[0] = optimalParameters.size(); - H5LTmake_dataset(file.getId(), fullGroupPath.c_str(), 1, dimensions, - H5T_NATIVE_DOUBLE, optimalParameters.data()); + H5LTmake_dataset( + file.getId(), + fullGroupPath.c_str(), + 1, + dimensions, + H5T_NATIVE_DOUBLE, + optimalParameters.data()); } flushResultWriter(); } -void OptimizationResultWriter::setRootPath(const std::string &path) { +void OptimizationResultWriter::setRootPath(std::string const& path) { rootPath = path; hdf5EnsureGroupExists(file, rootPath); } -const H5::H5File &OptimizationResultWriter::getH5File() const { - return file; -} +const H5::H5File& OptimizationResultWriter::getH5File() const { return file; } } // namespace parpe diff --git a/templates/main.cpp b/templates/main.cpp index 304e6b1a5..0930275f4 100644 --- a/templates/main.cpp +++ b/templates/main.cpp @@ -1,10 +1,10 @@ -#include #include #include -#include +#include #include -#include #include +#include +#include #include @@ -12,20 +12,21 @@ // to avoid including model-specific header files namespace amici::generic_model { - std::unique_ptr getModel(); +std::unique_ptr getModel(); } class MyOptimizationApplication : public parpe::OptimizationApplication { -public: + public: using OptimizationApplication::OptimizationApplication; - virtual void initProblem(std::string const& inFileArgument, - std::string const& outFileArgument) override - { + virtual void initProblem( + std::string const& inFileArgument, + std::string const& outFileArgument) override { if (!isWorker()) - parpe::logmessage(parpe::loglevel::info, - "Reading options and data from '%s'.", - inFileArgument.c_str()); + parpe::logmessage( + parpe::loglevel::info, + "Reading options and data from '%s'.", + inFileArgument.c_str()); auto h5Outfile = parpe::hdf5CreateFile(outFileArgument, true); logParPEVersion(h5Outfile); @@ -36,24 +37,24 @@ class MyOptimizationApplication : public parpe::OptimizationApplication { // read options from file auto h5Infile = dataProvider->getHdf5File(); - auto optimizationOptions = parpe::OptimizationOptions::fromHDF5(h5Infile); + auto optimizationOptions = + parpe::OptimizationOptions::fromHDF5(h5Infile); - // Create one instance for the problem, one for the application for clear ownership + // Create one instance for the problem, one for the application for + // clear ownership auto multiCondProb = new parpe::MultiConditionProblem( - dataProvider.get(), &loadBalancer, - std::make_unique(), - // TODO remove this resultwriter - std::make_unique( - h5Outfile, - std::string("/multistarts/")) - ); + dataProvider.get(), + &loadBalancer, + std::make_unique(), + // TODO remove this resultwriter + std::make_unique( + h5Outfile, std::string("/multistarts/"))); // hierarchical optimization? - if(optimizationOptions->hierarchicalOptimization) { + if (optimizationOptions->hierarchicalOptimization) { problem.reset(new parpe::HierarchicalOptimizationProblemWrapper( - std::unique_ptr(multiCondProb), - dataProvider.get()) - ); + std::unique_ptr(multiCondProb), + dataProvider.get())); } else { problem.reset(multiCondProb); } @@ -61,29 +62,26 @@ class MyOptimizationApplication : public parpe::OptimizationApplication { problem->setOptimizationOptions(*optimizationOptions); // On master, copy input data to result file - if(parpe::getMpiRank() < 1) + if (parpe::getMpiRank() < 1) dataProvider->copyInputData(h5Outfile); auto ms = new parpe::MultiConditionProblemMultiStartOptimizationProblem( - dataProvider.get(), - problem->getOptimizationOptions(), - multiCondProb->getResultWriter(), - &loadBalancer, - std::make_unique() - ); + dataProvider.get(), + problem->getOptimizationOptions(), + multiCondProb->getResultWriter(), + &loadBalancer, + std::make_unique()); multiStartOptimizationProblem.reset(ms); } - virtual ~MyOptimizationApplication() override { - parpe::logProcessStats(); - } + virtual ~MyOptimizationApplication() override { parpe::logProcessStats(); } -private: + private: /** DataProvider as interface to HDF5 data */ std::unique_ptr dataProvider; }; -int main(int argc, char **argv) { +int main(int argc, char** argv) { #ifndef NDEBUG // Set stdout to unbuffered when debugging setbuf(stdout, NULL); diff --git a/templates/main_debug.cpp b/templates/main_debug.cpp index 64129de14..de70e11cb 100644 --- a/templates/main_debug.cpp +++ b/templates/main_debug.cpp @@ -1,10 +1,10 @@ +#include +#include +#include #include #include #include #include -#include -#include -#include // to avoid including model-specific header files namespace amici::generic_model { @@ -12,33 +12,38 @@ std::unique_ptr getModel(); } using namespace parpe; -int main(int argc, char **argv) { +int main(int argc, char** argv) { #ifndef NDEBUG // Set stdout to unbuffered when debugging setbuf(stdout, NULL); #endif - std::string inFileArgument = "/home/dweindl/src/benchmarkProblem/20190205221009_Speedy_v4_Jan2019_generic_degradation_r415549/Speedy_v4_Jan2019_generic_degradation_r415549.bak.h5"; + std::string inFileArgument = + "/home/dweindl/src/benchmarkProblem/" + "20190205221009_Speedy_v4_Jan2019_generic_degradation_r415549/" + "Speedy_v4_Jan2019_generic_degradation_r415549.bak.h5"; - parpe::logmessage(parpe::loglevel::info, - "Reading options and data from '%s'.", - inFileArgument.c_str()); + parpe::logmessage( + parpe::loglevel::info, + "Reading options and data from '%s'.", + inFileArgument.c_str()); // setup data and problem MultiConditionDataProviderHDF5 dataProvider( amici::generic_model::getModel(), inFileArgument); auto options = OptimizationOptions::fromHDF5(dataProvider.getHdf5File()); - auto model = dataProvider.getModel(); - //model->setTimepoints({1e3}); + // model->setTimepoints({1e3}); model->requireSensitivitiesForAllParameters(); int condition_idx = 0; int start_idx = 0; - auto optimizationParams = options->getStartingPoint(dataProvider.getHdf5File(), start_idx); - dataProvider.updateSimulationParametersAndScale(condition_idx, optimizationParams, *model); + auto optimizationParams = + options->getStartingPoint(dataProvider.getHdf5File(), start_idx); + dataProvider.updateSimulationParametersAndScale( + condition_idx, optimizationParams, *model); auto edata = dataProvider.getExperimentalDataForCondition(condition_idx); auto solver = dataProvider.getSolver(); @@ -51,9 +56,9 @@ int main(int argc, char **argv) { WallTimer timer; auto rdata = amici::runAmiciSimulation(*solver, edata.get(), *model); - std::cout<numsteps<numstepsB<numsteps << std::endl; + std::cout << "NumstepsB: " << rdata->numstepsB << std::endl; + // logProcessStats(); } diff --git a/templates/main_nominal.cpp b/templates/main_nominal.cpp index 0c0be6047..fd131146a 100644 --- a/templates/main_nominal.cpp +++ b/templates/main_nominal.cpp @@ -1,12 +1,12 @@ +#include +#include +#include +#include #include #include #include #include #include -#include -#include -#include -#include // to avoid including model-specific header files namespace amici::generic_model { @@ -14,7 +14,7 @@ std::unique_ptr getModel(); } using namespace parpe; -int main(int argc, char **argv) { +int main(int argc, char** argv) { #ifndef NDEBUG // Set stdout to unbuffered when debugging setbuf(stdout, NULL); @@ -31,16 +31,16 @@ int main(int argc, char **argv) { outFileArgument = argv[2]; } else { std::stringstream ss; - ss << "Error: USAGE: "<< argv[0] + ss << "Error: USAGE: " << argv[0] << " HDF5_INPUT_FILE [HDF5_OUTPUT_FILE]\n"; fprintf(stderr, "%s", ss.str().c_str()); return 1; } - - parpe::logmessage(parpe::loglevel::info, - "Reading options and data from '%s'.", - inFileArgument.c_str()); + parpe::logmessage( + parpe::loglevel::info, + "Reading options and data from '%s'.", + inFileArgument.c_str()); // setup data and problem MultiConditionDataProviderHDF5 dataProvider( @@ -48,29 +48,35 @@ int main(int argc, char **argv) { auto options = OptimizationOptions::fromHDF5(dataProvider.getHdf5File()); std::unique_ptr rw; - if(!outFileArgument.empty()) { + if (!outFileArgument.empty()) { rw = std::make_unique( outFileArgument, true, "/"); } - MultiConditionProblem problem { &dataProvider, nullptr, nullptr, - std::move(rw)}; + MultiConditionProblem problem{ + &dataProvider, nullptr, nullptr, std::move(rw)}; // Read nominal parameters auto optimizationParams = amici::hdf5::getDoubleDataset1D( - dataProvider.getHdf5File(), "/parameters/nominalValues"); + dataProvider.getHdf5File(), "/parameters/nominalValues"); double fval = NAN; std::vector gradient(optimizationParams.size(), NAN); problem.cost_fun_->evaluate(optimizationParams, fval, gradient); - std::cout< getModel(); } void printUsage() { - std::cerr<<"Error: wrong number of arguments.\n"; - std::cerr<<"Usage: ... CONDITION_FILE_NAME CONDITION_FILE_PATH " - "PARAMETER_FILE_NAME PARAMETER_FILE_PATH " - "OUTFILENAME OUTFILEPATH " - "--at-optimum|--along-trajectory|--nominal " - "--mpi|--nompi --compute-inner|--nocompute-inner\n"; + std::cerr << "Error: wrong number of arguments.\n"; + std::cerr << "Usage: ... CONDITION_FILE_NAME CONDITION_FILE_PATH " + "PARAMETER_FILE_NAME PARAMETER_FILE_PATH " + "OUTFILENAME OUTFILEPATH " + "--at-optimum|--along-trajectory|--nominal " + "--mpi|--nompi --compute-inner|--nocompute-inner\n"; // |--parameter-matrix=PATH-UNSUPPORTED } -int main(int argc, char **argv) { +int main(int argc, char** argv) { int status = EXIT_SUCCESS; - if(argc != 10) { + if (argc != 10) { printUsage(); return EXIT_FAILURE; } bool computeInner; - if(std::string(argv[argc -1]) == "--compute-inner") { + if (std::string(argv[argc - 1]) == "--compute-inner") { computeInner = true; - } else if(std::string(argv[argc -1]) == "--nocompute-inner") { + } else if (std::string(argv[argc - 1]) == "--nocompute-inner") { computeInner = false; } else { printUsage(); return EXIT_FAILURE; } - if(std::string(argv[argc -2]) == "--mpi") { + if (std::string(argv[argc - 2]) == "--mpi") { #ifdef PARPE_ENABLE_MPI MPI_Init(&argc, &argv); #else throw std::runtime_error("parPE was built without MPI support."); #endif - } else if(std::string(argv[argc -2]) == "--nompi") { + } else if (std::string(argv[argc - 2]) == "--nompi") { ; } else { printUsage(); @@ -66,14 +66,18 @@ int main(int argc, char **argv) { std::string simulationMode = argv[7]; parpe::MultiConditionDataProviderHDF5 dp( - amici::generic_model::getModel(), - conditionFileName, - conditionFilePath); + amici::generic_model::getModel(), conditionFileName, conditionFilePath); - status = parpe::runSimulator(dp, simulationMode, - conditionFileName, conditionFilePath, - parameterFileName, parameterFilePath, - resultFileName, resultPath, computeInner); + status = parpe::runSimulator( + dp, + simulationMode, + conditionFileName, + conditionFilePath, + parameterFileName, + parameterFilePath, + resultFileName, + resultPath, + computeInner); parpe::finalizeMpiIfNeeded(); diff --git a/tests/parpeamici/amiciSimulationRunnerTest.cpp b/tests/parpeamici/amiciSimulationRunnerTest.cpp index 4a7ebff70..32d3dd87d 100644 --- a/tests/parpeamici/amiciSimulationRunnerTest.cpp +++ b/tests/parpeamici/amiciSimulationRunnerTest.cpp @@ -7,8 +7,7 @@ #include -TEST(SimulationWorkerAmici, SerializeResultPackageMessage) -{ +TEST(SimulationWorkerAmici, SerializeResultPackageMessage) { parpe::AmiciSimulationRunner::AmiciResultPackageSimple results = { 1.1, 2.345, @@ -16,8 +15,7 @@ TEST(SimulationWorkerAmici, SerializeResultPackageMessage) std::vector(3, 4.0), std::vector(3, 4.0), std::vector(1, 2.0), - 10 - }; + 10}; int msgSize = 0; auto buffer = @@ -25,8 +23,8 @@ TEST(SimulationWorkerAmici, SerializeResultPackageMessage) parpe::AmiciSimulationRunner::AmiciResultPackageSimple resultsAct = amici::deserializeFromChar< - parpe::AmiciSimulationRunner::AmiciResultPackageSimple>(buffer.get(), - msgSize); + parpe::AmiciSimulationRunner::AmiciResultPackageSimple>( + buffer.get(), msgSize); EXPECT_EQ(resultsAct, results); } diff --git a/tests/parpeamici/hierarchicalOptimizationTest.cpp b/tests/parpeamici/hierarchicalOptimizationTest.cpp index f0f2435a9..46044f2de 100644 --- a/tests/parpeamici/hierarchicalOptimizationTest.cpp +++ b/tests/parpeamici/hierarchicalOptimizationTest.cpp @@ -1,74 +1,79 @@ -#include #include +#include #include -#include "../parpeoptimization/quadraticTestProblem.h" #include "../parpecommon/testingMisc.h" +#include "../parpeoptimization/quadraticTestProblem.h" #include #include #include - // created by hierarchicalOptimizationTest.py form CMake #define TESTFILE "testhierarchical.h5" -using ::testing::Mock; using ::testing::_; -using ::testing::Ne; +using ::testing::DoAll; using ::testing::Eq; +using ::testing::Mock; +using ::testing::Ne; using ::testing::Return; using ::testing::ReturnRef; using ::testing::ReturnRefOfCopy; using ::testing::SetArgReferee; -using ::testing::DoAll; TEST(HierarchicalOptimization1, Reader) { - // mappingToObservable = np.array([[ 0, 1, 0], [ 0, 2, 0], [ 1, 1, 1], [1, 2, 1], [1, 3, 1]]) + // mappingToObservable = np.array([[ 0, 1, 0], [ 0, 2, 0], [ 1, 1, 1], [1, + // 2, 1], [1, 3, 1]]) - parpe::AnalyticalParameterHdf5Reader r(H5::H5File(TESTFILE, H5F_ACC_RDONLY), - "/scalingParameterIndices", - "/scalingParametersMapToObservables"); + parpe::AnalyticalParameterHdf5Reader r( + H5::H5File(TESTFILE, H5F_ACC_RDONLY), + "/scalingParameterIndices", + "/scalingParametersMapToObservables"); - auto exp1 = std::vector {1, 2}; + auto exp1 = std::vector{1, 2}; EXPECT_TRUE(exp1 == r.getConditionsForParameter(0)); - auto exp2 = std::vector {1, 2, 3}; + auto exp2 = std::vector{1, 2, 3}; EXPECT_TRUE(exp2 == r.getConditionsForParameter(1)); EXPECT_THROW(r.getObservablesForParameter(0, 0), std::out_of_range); - auto exp3 = std::vector {1}; + auto exp3 = std::vector{1}; EXPECT_TRUE(exp3 == r.getObservablesForParameter(1, 1)); - auto exp4 = std::vector {0, 1}; + auto exp4 = std::vector{0, 1}; EXPECT_TRUE(exp4 == r.getOptimizationParameterIndices()); } - class AnalyticalParameterProviderMock - : public parpe::AnalyticalParameterProvider { -public: + : public parpe::AnalyticalParameterProvider { + public: AnalyticalParameterProviderMock() = default; - MOCK_CONST_METHOD1(getConditionsForParameter, - std::vector(int parameterIndex)); - MOCK_CONST_METHOD2(getObservablesForParameter, - std::vector const&(int parameterIndex, int conditionIdx)); + MOCK_CONST_METHOD1( + getConditionsForParameter, + std::vector(int parameterIndex)); + MOCK_CONST_METHOD2( + getObservablesForParameter, + std::vector const&(int parameterIndex, int conditionIdx)); MOCK_CONST_METHOD0(getOptimizationParameterIndices, std::vector()); }; +class AmiciSummedGradientFunctionMock + : public parpe::AmiciSummedGradientFunction { + public: + MOCK_CONST_METHOD5( + getModelOutputsAndSigmas, + parpe::FunctionEvaluationStatus( + gsl::span parameters, + std::vector>& modelOutput, + std::vector>& modelSigmas, + parpe::Logger* logger, + double* cpuTime)); -class AmiciSummedGradientFunctionMock : public parpe::AmiciSummedGradientFunction { -public: - MOCK_CONST_METHOD5(getModelOutputsAndSigmas, parpe::FunctionEvaluationStatus( - gsl::span parameters, - std::vector > &modelOutput, - std::vector > &modelSigmas, - parpe::Logger *logger, double *cpuTime)); - - MOCK_CONST_METHOD0(getAllMeasurements, std::vector>()); + MOCK_CONST_METHOD0(getAllMeasurements, std::vector>()); // MOCK_CONST_METHOD6(evaluate, parpe::FunctionEvaluationStatus( // gsl::span parameters, @@ -77,132 +82,134 @@ class AmiciSummedGradientFunctionMock : public parpe::AmiciSummedGradientFunctio // gsl::span gradient, // parpe::Logger *logger, double *cpuTime)); - MOCK_CONST_METHOD6(evaluate, parpe::FunctionEvaluationStatus( - gsl::span parameters, - std::vector datasets, - double &fval, - gsl::span gradient, - parpe::Logger *logger, double *cpuTime)); - - MOCK_CONST_METHOD1(getParameterScaling, amici::ParameterScaling(int parameterIndex)); - - MOCK_CONST_METHOD0(numParameters, int()); + MOCK_CONST_METHOD6( + evaluate, + parpe::FunctionEvaluationStatus( + gsl::span parameters, + std::vector datasets, + double& fval, + gsl::span gradient, + parpe::Logger* logger, + double* cpuTime)); + + MOCK_CONST_METHOD1( + getParameterScaling, + amici::ParameterScaling(int parameterIndex)); + + MOCK_CONST_METHOD0(numParameters, int()); }; - class HierarchicalOptimization : public ::testing::Test { -protected: + protected: int numParameters_ = 4; int numConditions = 4; int numObservables = 3; int numTimepoints = 2; - std::vector > modelOutput = {{1.0, 1.0, 1.0, - 1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0, - 1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0, - 1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0, - 1.0, 1.0, 1.0},}; - std::vector> measurements = {{NAN, 1.0, 1.0, - 1.0, 1.0, 1.0}, - {2.0, 1.0, 1.0, - 2.0, 1.0, NAN}, - {2.0, 1.0, 1.0, - 2.0, NAN, 1.0}, - {1.0, 1.0, 1.0, - NAN, 1.0, 1.0},}; - std::vector> sigmas = {{NAN, 1.0, 1.0, - 1.0, 1.0, 1.0}, - {1.0, 1.0, 1.0, - 1.0, 1.0, NAN}, - {1.0, 1.0, 1.0, - 1.0, NAN, 1.0}, - {1.0, 1.0, 1.0, - NAN, 1.0, 1.0},}; + std::vector> modelOutput = { + {1.0, 1.0, 1.0, 1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0, 1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0, 1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0, 1.0, 1.0, 1.0}, + }; + std::vector> measurements = { + {NAN, 1.0, 1.0, 1.0, 1.0, 1.0}, + {2.0, 1.0, 1.0, 2.0, 1.0, NAN}, + {2.0, 1.0, 1.0, 2.0, NAN, 1.0}, + {1.0, 1.0, 1.0, NAN, 1.0, 1.0}, + }; + std::vector> sigmas = { + {NAN, 1.0, 1.0, 1.0, 1.0, 1.0}, + {1.0, 1.0, 1.0, 1.0, 1.0, NAN}, + {1.0, 1.0, 1.0, 1.0, NAN, 1.0}, + {1.0, 1.0, 1.0, NAN, 1.0, 1.0}, + }; }; - - TEST_F(HierarchicalOptimization, HierarchicalOptimization) { auto funUnqiue = std::make_unique(); auto fun = funUnqiue.get(); ON_CALL(*fun, numParameters()).WillByDefault(Return(numParameters_)); ON_CALL(*fun, getParameterScaling(_)) - .WillByDefault(Return(amici::ParameterScaling::log10)); + .WillByDefault(Return(amici::ParameterScaling::log10)); auto scalingReaderUnique = - std::make_unique( - H5::H5File(TESTFILE, H5F_ACC_RDONLY), - "/scalingParameterIndices", - "/scalingParametersMapToObservables"); + std::make_unique( + H5::H5File(TESTFILE, H5F_ACC_RDONLY), + "/scalingParameterIndices", + "/scalingParametersMapToObservables"); auto scalingReader = scalingReaderUnique.get(); auto offsetReaderUnique = - std::make_unique( - H5::H5File(TESTFILE, H5F_ACC_RDONLY), - "/offsetParameterIndices", - "/offsetParametersMapToObservables"); + std::make_unique( + H5::H5File(TESTFILE, H5F_ACC_RDONLY), + "/offsetParameterIndices", + "/offsetParametersMapToObservables"); auto sigmaReaderUnique = - std::make_unique( - H5::H5File(TESTFILE, H5F_ACC_RDONLY), - "/sigmaParameterIndices", - "/sigmaParametersMapToObservables"); + std::make_unique( + H5::H5File(TESTFILE, H5F_ACC_RDONLY), + "/sigmaParameterIndices", + "/sigmaParametersMapToObservables"); parpe::HierarchicalOptimizationWrapper hierarchicalOptimizationWrapper( - funUnqiue.get(), - std::move(scalingReaderUnique), std::move(offsetReaderUnique), std::move(sigmaReaderUnique), - numConditions, numObservables, - parpe::ErrorModel::normal); + funUnqiue.get(), + std::move(scalingReaderUnique), + std::move(offsetReaderUnique), + std::move(sigmaReaderUnique), + numConditions, + numObservables, + parpe::ErrorModel::normal); EXPECT_EQ(2, hierarchicalOptimizationWrapper.numProportionalityFactors()); - std::vector reducedParameters {3.0, 2.0}; + std::vector reducedParameters{3.0, 2.0}; // last 2 are scalings - std::vector fullParameters {3.0, 2.0, 1.5, 1.3}; + std::vector fullParameters{3.0, 2.0, 1.5, 1.3}; // scalings set to log10(1) // last 2 are scalings - std::vector onesFullParameters {0.0, 0.0, 3.0, 2.0}; + std::vector onesFullParameters{0.0, 0.0, 3.0, 2.0}; std::vector scalingDummy( - hierarchicalOptimizationWrapper.numProportionalityFactors(), 0.0); + hierarchicalOptimizationWrapper.numProportionalityFactors(), 0.0); std::vector offsetDummy( - hierarchicalOptimizationWrapper.numOffsetParameters(), 0.0); + hierarchicalOptimizationWrapper.numOffsetParameters(), 0.0); std::vector sigmaDummy( - hierarchicalOptimizationWrapper.numSigmaParameters(), 0.0); + hierarchicalOptimizationWrapper.numSigmaParameters(), 0.0); auto splicedParameter = parpe::spliceParameters( - gsl::make_span(reducedParameters.data(), - reducedParameters.size()), - hierarchicalOptimizationWrapper.getProportionalityFactorIndices(), - hierarchicalOptimizationWrapper.getOffsetParameterIndices(), - hierarchicalOptimizationWrapper.getSigmaParameterIndices(), - scalingDummy, offsetDummy, sigmaDummy); + gsl::make_span(reducedParameters.data(), reducedParameters.size()), + hierarchicalOptimizationWrapper.getProportionalityFactorIndices(), + hierarchicalOptimizationWrapper.getOffsetParameterIndices(), + hierarchicalOptimizationWrapper.getSigmaParameterIndices(), + scalingDummy, + offsetDummy, + sigmaDummy); EXPECT_EQ(onesFullParameters, splicedParameter); ON_CALL(*fun, getModelOutputsAndSigmas(_, _, _, _, _)) - .WillByDefault(DoAll(SetArgReferee<1>(modelOutput), - SetArgReferee<2>(sigmas), - Return(parpe::functionEvaluationSuccess))); + .WillByDefault(DoAll( + SetArgReferee<1>(modelOutput), + SetArgReferee<2>(sigmas), + Return(parpe::functionEvaluationSuccess))); // Ensure it is called with proper parameter vector: - EXPECT_CALL(*fun, getModelOutputsAndSigmas( - gsl::span(onesFullParameters), _, _, _, _)); + EXPECT_CALL( + *fun, + getModelOutputsAndSigmas( + gsl::span(onesFullParameters), _, _, _, _)); std::vector> outputs; std::vector> modelSigmas; std::tie(outputs, modelSigmas) = hierarchicalOptimizationWrapper.getUnscaledModelOutputsAndSigmas( - reducedParameters, nullptr, nullptr); + reducedParameters, nullptr, nullptr); Mock::VerifyAndClearExpectations(fun); auto s = parpe::getScaledParameter( - parpe::computeAnalyticalScalings( - 0, outputs, measurements, - *scalingReader, numObservables), - amici::ParameterScaling::log10); + parpe::computeAnalyticalScalings( + 0, outputs, measurements, *scalingReader, numObservables), + amici::ParameterScaling::log10); EXPECT_EQ(log10(2.0), s); applyOptimalScaling(0, 2.0, outputs, *scalingReader, numObservables); @@ -216,10 +223,11 @@ TEST_F(HierarchicalOptimization, HierarchicalOptimization) { // likelihood without offset must be 0 after scaling and if all other // measurements/observables agree std::vector> sigmas( - outputs.size(), std::vector(outputs[0].size(), 1.0)); - const auto llh = parpe::computeNegLogLikelihood(measurements, outputs, sigmas); - const double pi = atan(1) * 4; - const double llhOffset = 0.5 * log(2 * pi) * 20; + outputs.size(), std::vector(outputs[0].size(), 1.0)); + auto const llh = + parpe::computeNegLogLikelihood(measurements, outputs, sigmas); + double const pi = atan(1) * 4; + double const llhOffset = 0.5 * log(2 * pi) * 20; EXPECT_NEAR(llh - llhOffset, 0, 1e-10); // w.computeAnalyticalScalings(); @@ -250,18 +258,22 @@ TEST_F(HierarchicalOptimization, NoAnalyticalParameters) { EXPECT_CALL(*sigmaProvider, getOptimizationParameterIndices()); parpe::HierarchicalOptimizationWrapper w( - fun.get(), std::move(scalingProvider), - std::move(offsetProvider), std::move(sigmaProvider), - numConditions, numObservables, parpe::ErrorModel::normal); + fun.get(), + std::move(scalingProvider), + std::move(offsetProvider), + std::move(sigmaProvider), + numConditions, + numObservables, + parpe::ErrorModel::normal); - EXPECT_CALL(*funNonOwning, evaluate(_, _, _, Eq(gsl::span()), _, _)); + EXPECT_CALL( + *funNonOwning, evaluate(_, _, _, Eq(gsl::span()), _, _)); std::vector parameters{3.0, 2.0, 1.5, 1.3}; double fval; w.evaluate(parameters, fval, gsl::span(), nullptr, nullptr); } - TEST_F(HierarchicalOptimization, ComputeAnalyticalScalings) { /* data * measurement = data * 10 @@ -271,28 +283,29 @@ TEST_F(HierarchicalOptimization, ComputeAnalyticalScalings) { // constexpr int numTimepoints = 2; constexpr int scalingIdx = 0; - std::vector > - modelOutputsUnscaled { {1.0, 2.0, 3.0, 4.0} }; - std::vector > - measurements { {10.0, 20.0, 30.0, 40.0} }; + std::vector> modelOutputsUnscaled{{1.0, 2.0, 3.0, 4.0}}; + std::vector> measurements{{10.0, 20.0, 30.0, 40.0}}; AnalyticalParameterProviderMock scalingProvider; // TEST LIN - std::vector res {0}; + std::vector res{0}; ON_CALL(scalingProvider, getConditionsForParameter(0)) - .WillByDefault(Return(res)); + .WillByDefault(Return(res)); ON_CALL(scalingProvider, getObservablesForParameter(0, 0)) - .WillByDefault(ReturnRef(res)); + .WillByDefault(ReturnRef(res)); EXPECT_CALL(scalingProvider, getConditionsForParameter(0)); EXPECT_CALL(scalingProvider, getObservablesForParameter(0, 0)); - const auto scaling = parpe::computeAnalyticalScalings( - scalingIdx, modelOutputsUnscaled, measurements, - scalingProvider, numObservables); - const auto scaledScaling = parpe::getScaledParameter( - scaling, amici::ParameterScaling::none); + auto const scaling = parpe::computeAnalyticalScalings( + scalingIdx, + modelOutputsUnscaled, + measurements, + scalingProvider, + numObservables); + auto const scaledScaling = + parpe::getScaledParameter(scaling, amici::ParameterScaling::none); EXPECT_EQ(10.0, scaledScaling); @@ -300,11 +313,14 @@ TEST_F(HierarchicalOptimization, ComputeAnalyticalScalings) { EXPECT_CALL(scalingProvider, getConditionsForParameter(0)); EXPECT_CALL(scalingProvider, getObservablesForParameter(0, 0)); - const auto scaling2 = parpe::computeAnalyticalScalings( - scalingIdx, modelOutputsUnscaled, measurements, - scalingProvider, numObservables); - const auto scaledScaling2 = parpe::getScaledParameter( - scaling2, amici::ParameterScaling::log10); + auto const scaling2 = parpe::computeAnalyticalScalings( + scalingIdx, + modelOutputsUnscaled, + measurements, + scalingProvider, + numObservables); + auto const scaledScaling2 = + parpe::getScaledParameter(scaling2, amici::ParameterScaling::log10); EXPECT_EQ(1.0, scaledScaling2); @@ -315,16 +331,18 @@ TEST_F(HierarchicalOptimization, ComputeAnalyticalScalings) { EXPECT_CALL(scalingProvider, getConditionsForParameter(0)); EXPECT_CALL(scalingProvider, getObservablesForParameter(0, 0)); - const auto scaling3 = parpe::computeAnalyticalScalings( - scalingIdx, modelOutputsUnscaled, measurements, - scalingProvider, numObservables); - const auto scaledScaling3 = parpe::getScaledParameter( - scaling3, amici::ParameterScaling::log10); + auto const scaling3 = parpe::computeAnalyticalScalings( + scalingIdx, + modelOutputsUnscaled, + measurements, + scalingProvider, + numObservables); + auto const scaledScaling3 = + parpe::getScaledParameter(scaling3, amici::ParameterScaling::log10); EXPECT_EQ(1.0, scaledScaling3); } - TEST_F(HierarchicalOptimization, ComputeAnalyticalOffsets) { /* data * measurement = data + 10 @@ -334,39 +352,45 @@ TEST_F(HierarchicalOptimization, ComputeAnalyticalOffsets) { // constexpr int numTimepoints = 2; constexpr int scalingIdx = 0; - const std::vector > - modelOutputsUnscaled { {1.0, 2.0, 3.0, 4.0} }; - const std::vector > - measurements { {11.0, 12.0, 13.0, 14.0} }; + std::vector> const modelOutputsUnscaled{ + {1.0, 2.0, 3.0, 4.0}}; + std::vector> const measurements{ + {11.0, 12.0, 13.0, 14.0}}; AnalyticalParameterProviderMock scalingProvider; // TEST LIN - std::vector res {0}; + std::vector res{0}; ON_CALL(scalingProvider, getConditionsForParameter(0)) - .WillByDefault(Return(res)); + .WillByDefault(Return(res)); ON_CALL(scalingProvider, getObservablesForParameter(0, 0)) - .WillByDefault(ReturnRefOfCopy(res)); + .WillByDefault(ReturnRefOfCopy(res)); EXPECT_CALL(scalingProvider, getConditionsForParameter(0)); EXPECT_CALL(scalingProvider, getObservablesForParameter(0, 0)); - const auto offset = parpe::computeAnalyticalOffsets( - scalingIdx, modelOutputsUnscaled, measurements, - scalingProvider, numObservables); - const auto scaledOffset = parpe::getScaledParameter( - offset, amici::ParameterScaling::none); + auto const offset = parpe::computeAnalyticalOffsets( + scalingIdx, + modelOutputsUnscaled, + measurements, + scalingProvider, + numObservables); + auto const scaledOffset = + parpe::getScaledParameter(offset, amici::ParameterScaling::none); EXPECT_EQ(10.0, scaledOffset); // TEST LOG10 EXPECT_CALL(scalingProvider, getConditionsForParameter(0)); EXPECT_CALL(scalingProvider, getObservablesForParameter(0, 0)); - const auto offset2 = parpe::computeAnalyticalOffsets( - scalingIdx, modelOutputsUnscaled, measurements, - scalingProvider, numObservables); - const auto scaledOffset2 = parpe::getScaledParameter( - offset2, amici::ParameterScaling::log10); + auto const offset2 = parpe::computeAnalyticalOffsets( + scalingIdx, + modelOutputsUnscaled, + measurements, + scalingProvider, + numObservables); + auto const scaledOffset2 = + parpe::getScaledParameter(offset2, amici::ParameterScaling::log10); EXPECT_EQ(1.0, scaledOffset2); } @@ -375,135 +399,141 @@ TEST_F(HierarchicalOptimization, ApplyOptimalScaling) { // constexpr int numTimepoints = 2; constexpr int scalingIdx = 0; constexpr double scaling = 0.5; - const std::vector > - modelOutputsScaledExpected { {1.0, 4.0, 3.0, 8.0} }; - std::vector > modelOutputs { {2.0, 4.0, 6.0, 8.0} }; + std::vector> const modelOutputsScaledExpected{ + {1.0, 4.0, 3.0, 8.0}}; + std::vector> modelOutputs{{2.0, 4.0, 6.0, 8.0}}; AnalyticalParameterProviderMock scalingProvider; - std::vector res {0}; + std::vector res{0}; ON_CALL(scalingProvider, getConditionsForParameter(0)) - .WillByDefault(Return(res)); + .WillByDefault(Return(res)); // applies to all timepoints for observable 0 (== element 0 and 2) ON_CALL(scalingProvider, getObservablesForParameter(0, 0)) - .WillByDefault(ReturnRefOfCopy(res)); + .WillByDefault(ReturnRefOfCopy(res)); EXPECT_CALL(scalingProvider, getConditionsForParameter(0)); EXPECT_CALL(scalingProvider, getObservablesForParameter(0, 0)); - parpe::applyOptimalScaling(scalingIdx, scaling, modelOutputs, - scalingProvider, numObservables); + parpe::applyOptimalScaling( + scalingIdx, scaling, modelOutputs, scalingProvider, numObservables); EXPECT_EQ(modelOutputsScaledExpected, modelOutputs); } - TEST_F(HierarchicalOptimization, ApplyOptimalOffset) { constexpr int numObservables = 2; // constexpr int numTimepoints = 2; constexpr int offsetIdx = 0; constexpr double offset = 5; - const std::vector > modelOutputsScaledExpected { {1.0, 4.0, 3.0, 8.0} }; - std::vector > modelOutputs { {-4.0, 4.0, -2.0, 8.0} }; - + std::vector> const modelOutputsScaledExpected{ + {1.0, 4.0, 3.0, 8.0}}; + std::vector> modelOutputs{{-4.0, 4.0, -2.0, 8.0}}; AnalyticalParameterProviderMock offsetProvider; - std::vector res {0}; + std::vector res{0}; ON_CALL(offsetProvider, getConditionsForParameter(0)) - .WillByDefault(Return(res)); + .WillByDefault(Return(res)); // applies to all timepoints for observable 0 (== element 0 and 2) ON_CALL(offsetProvider, getObservablesForParameter(0, 0)) - .WillByDefault(ReturnRefOfCopy(res)); + .WillByDefault(ReturnRefOfCopy(res)); EXPECT_CALL(offsetProvider, getConditionsForParameter(0)); EXPECT_CALL(offsetProvider, getObservablesForParameter(0, 0)); - parpe::applyOptimalOffset(offsetIdx, offset, modelOutputs, - offsetProvider, numObservables); + parpe::applyOptimalOffset( + offsetIdx, offset, modelOutputs, offsetProvider, numObservables); EXPECT_EQ(modelOutputsScaledExpected, modelOutputs); } - TEST_F(HierarchicalOptimization, Scaling) { - EXPECT_EQ(42.0, - amici::getUnscaledParameter(42.0, amici::ParameterScaling::none)); - EXPECT_EQ(42.0, - parpe::getScaledParameter(42.0, amici::ParameterScaling::none)); - - EXPECT_EQ(2.0, - parpe::getScaledParameter(100.0, amici::ParameterScaling::log10)); - EXPECT_EQ(100.0, - amici::getUnscaledParameter(2.0, amici::ParameterScaling::log10)); - - EXPECT_DOUBLE_EQ(1.0, - parpe::getScaledParameter(std::exp(1.0), - amici::ParameterScaling::ln)); - EXPECT_DOUBLE_EQ(std::exp(1), - amici::getUnscaledParameter(1.0, - amici::ParameterScaling::ln)); + EXPECT_EQ( + 42.0, amici::getUnscaledParameter(42.0, amici::ParameterScaling::none)); + EXPECT_EQ( + 42.0, parpe::getScaledParameter(42.0, amici::ParameterScaling::none)); + + EXPECT_EQ( + 2.0, parpe::getScaledParameter(100.0, amici::ParameterScaling::log10)); + EXPECT_EQ( + 100.0, + amici::getUnscaledParameter(2.0, amici::ParameterScaling::log10)); + + EXPECT_DOUBLE_EQ( + 1.0, + parpe::getScaledParameter(std::exp(1.0), amici::ParameterScaling::ln)); + EXPECT_DOUBLE_EQ( + std::exp(1), + amici::getUnscaledParameter(1.0, amici::ParameterScaling::ln)); } TEST(HierarchicalOptimization1, SpliceParameters) { - const std::vector - fullParametersExp {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}; + std::vector const fullParametersExp{ + 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}; - const std::vector reducedParameters {1.0, 5.0, 8.0}; + std::vector const reducedParameters{1.0, 5.0, 8.0}; - const std::vector proportionalityFactorIndices {2, 3, 7}; - const std::vector scalings {2.0, 3.0, 7.0}; + std::vector const proportionalityFactorIndices{2, 3, 7}; + std::vector const scalings{2.0, 3.0, 7.0}; - const std::vector offsetParameterIndices {0, 4}; - const std::vector offsets {0.0, 4.0}; + std::vector const offsetParameterIndices{0, 4}; + std::vector const offsets{0.0, 4.0}; - const std::vector sigmaParameterIndices {6}; - const std::vector sigmas {6.0}; + std::vector const sigmaParameterIndices{6}; + std::vector const sigmas{6.0}; - auto fullParametersAct = parpe::spliceParameters(gsl::make_span(reducedParameters.data(), reducedParameters.size()), - proportionalityFactorIndices, offsetParameterIndices, sigmaParameterIndices, - scalings, offsets, sigmas); + auto fullParametersAct = parpe::spliceParameters( + gsl::make_span(reducedParameters.data(), reducedParameters.size()), + proportionalityFactorIndices, + offsetParameterIndices, + sigmaParameterIndices, + scalings, + offsets, + sigmas); EXPECT_EQ(fullParametersExp, fullParametersAct); } TEST(HierarchicalOptimization1, SpliceParametersNothingToDo) { - const std::vector fullParametersExp {0.0, 1.0, 2.0}; + std::vector const fullParametersExp{0.0, 1.0, 2.0}; - const std::vector reducedParameters {0.0, 1.0, 2.0}; + std::vector const reducedParameters{0.0, 1.0, 2.0}; - const std::vector proportionalityFactorIndices; - const std::vector scalings; + std::vector const proportionalityFactorIndices; + std::vector const scalings; - const std::vector offsetParameterIndices; - const std::vector offsets; + std::vector const offsetParameterIndices; + std::vector const offsets; - const std::vector sigmaParameterIndices; - const std::vector sigmas; + std::vector const sigmaParameterIndices; + std::vector const sigmas; auto fullParametersAct = parpe::spliceParameters( - reducedParameters, proportionalityFactorIndices, - offsetParameterIndices, sigmaParameterIndices, - scalings, offsets, sigmas); + reducedParameters, + proportionalityFactorIndices, + offsetParameterIndices, + sigmaParameterIndices, + scalings, + offsets, + sigmas); EXPECT_EQ(fullParametersExp, fullParametersAct); } - TEST(HierarchicalOptimization1, FillFilteredParams) { - const std::vector resultExp {1.0, 2.0, 3.0, 4.0, 5.0}; - const std::vector - valuesToFilter {9.0, 1.0, 2.0, 9.0, 9.0, 3.0, 4.0, 5.0, 9.0}; - const std::vector sortedIndicesToExclude {0, 3, 4, 8}; + std::vector const resultExp{1.0, 2.0, 3.0, 4.0, 5.0}; + std::vector const valuesToFilter{ + 9.0, 1.0, 2.0, 9.0, 9.0, 3.0, 4.0, 5.0, 9.0}; + std::vector const sortedIndicesToExclude{0, 3, 4, 8}; auto resultAct = std::vector( - valuesToFilter.size() - sortedIndicesToExclude.size()); - parpe::fillFilteredParams(valuesToFilter, sortedIndicesToExclude, - resultAct); + valuesToFilter.size() - sortedIndicesToExclude.size()); + parpe::fillFilteredParams( + valuesToFilter, sortedIndicesToExclude, resultAct); EXPECT_EQ(resultExp, resultAct); } - TEST_F(HierarchicalOptimization, WrappedFunIsCalledWithGradient) { // setup auto fun = std::make_unique(); @@ -514,20 +544,21 @@ TEST_F(HierarchicalOptimization, WrappedFunIsCalledWithGradient) { auto sigmaProvider = std::make_unique(); auto scalingProviderNonOwning = scalingProvider.get(); - std::vector res {0}; - std::vector idxs {3}; + std::vector res{0}; + std::vector idxs{3}; ON_CALL(*scalingProvider, getOptimizationParameterIndices()) - .WillByDefault(Return(idxs)); + .WillByDefault(Return(idxs)); ON_CALL(*scalingProvider, getConditionsForParameter(0)) - .WillByDefault(Return(res)); + .WillByDefault(Return(res)); // applies to all timepoints for observable 0 (== element 0 and 2) ON_CALL(*scalingProvider, getObservablesForParameter(0, 0)) - .WillByDefault(ReturnRefOfCopy(res)); + .WillByDefault(ReturnRefOfCopy(res)); ON_CALL(*fun, numParameters()).WillByDefault(Return(numParameters_)); ON_CALL(*fun, getModelOutputsAndSigmas(_, _, _, _, _)) - .WillByDefault(DoAll(SetArgReferee<1>(modelOutput), - SetArgReferee<2>(sigmas), - Return(parpe::functionEvaluationSuccess))); + .WillByDefault(DoAll( + SetArgReferee<1>(modelOutput), + SetArgReferee<2>(sigmas), + Return(parpe::functionEvaluationSuccess))); ON_CALL(*fun, getAllMeasurements()).WillByDefault(Return(measurements)); EXPECT_CALL(*fun, numParameters()).Times(2); @@ -536,9 +567,12 @@ TEST_F(HierarchicalOptimization, WrappedFunIsCalledWithGradient) { EXPECT_CALL(*sigmaProvider, getOptimizationParameterIndices()); parpe::HierarchicalOptimizationWrapper hierarchicalWrapper( - fun.get(), std::move(scalingProvider), - std::move(offsetProvider), std::move(sigmaProvider), - numConditions, numObservables, + fun.get(), + std::move(scalingProvider), + std::move(offsetProvider), + std::move(sigmaProvider), + numConditions, + numObservables, parpe::ErrorModel::normal); Mock::VerifyAndClearExpectations(funNonOwning); Mock::VerifyAndClearExpectations(scalingProviderNonOwning); @@ -547,18 +581,20 @@ TEST_F(HierarchicalOptimization, WrappedFunIsCalledWithGradient) { EXPECT_CALL(*funNonOwning, numParameters()).Times(1); - const std::vector parameters { 1.0, 2.0, 3.0, /*4.0*/ }; - EXPECT_EQ((unsigned) hierarchicalWrapper.numParameters(), - parameters.size()); + std::vector const parameters{1.0, 2.0, 3.0, /*4.0*/}; + EXPECT_EQ((unsigned)hierarchicalWrapper.numParameters(), parameters.size()); std::vector gradient(parameters.size()); // ensure fun::evaluate is called with gradient EXPECT_CALL(*funNonOwning, numParameters()); EXPECT_CALL(*funNonOwning, getModelOutputsAndSigmas(_, _, _, _, _)); - EXPECT_CALL(*scalingProviderNonOwning, getConditionsForParameter(0)).Times(2); - EXPECT_CALL(*scalingProviderNonOwning, getObservablesForParameter(0, 0)).Times(2); - EXPECT_CALL(*funNonOwning, evaluate(_, _, _, Ne(gsl::span()), _, _)); + EXPECT_CALL(*scalingProviderNonOwning, getConditionsForParameter(0)) + .Times(2); + EXPECT_CALL(*scalingProviderNonOwning, getObservablesForParameter(0, 0)) + .Times(2); + EXPECT_CALL( + *funNonOwning, evaluate(_, _, _, Ne(gsl::span()), _, _)); double fval; hierarchicalWrapper.evaluate(parameters, fval, gradient, nullptr, nullptr); @@ -570,32 +606,35 @@ TEST_F(HierarchicalOptimization, WrappedFunIsCalledWithGradient) { EXPECT_CALL(*funNonOwning, numParameters()); EXPECT_CALL(*funNonOwning, getModelOutputsAndSigmas(_, _, _, _, _)); EXPECT_CALL(*scalingProviderNonOwning, getConditionsForParameter(0)) - .Times(2); + .Times(2); EXPECT_CALL(*scalingProviderNonOwning, getObservablesForParameter(0, 0)) - .Times(2); + .Times(2); - hierarchicalWrapper.evaluate(parameters, fval, gsl::span(), nullptr, nullptr); + hierarchicalWrapper.evaluate( + parameters, fval, gsl::span(), nullptr, nullptr); } TEST(HierarchicalOptimization1, LikelihoodOfMatchingData) { - const std::vector data {1.0, 2.0, 3.0}; - const std::vector sigmas {1.0, 1.0, 1.0}; + std::vector const data{1.0, 2.0, 3.0}; + std::vector const sigmas{1.0, 1.0, 1.0}; - const double pi = atan(1)*4; - const double llhOffset = 0.5 * log(2 * pi); - const double expected = llhOffset * static_cast(data.size()); + double const pi = atan(1) * 4; + double const llhOffset = 0.5 * log(2 * pi); + double const expected = llhOffset * static_cast(data.size()); auto actual = parpe::computeNegLogLikelihood(data, data, sigmas); EXPECT_EQ(expected, actual); } - TEST_F(HierarchicalOptimization, ProblemWrapper) { - //std::unique_ptr problem(new parpe::QuadraticTestProblem()); - //auto hCost = std::make_unique(); - //auto wrappedFun = dynamic_cast*>(wrappedProblem->costFun.get()); - - //parpe::HierarchicalOptimizationProblemWrapper hw(std::move(problem), std::move(hCost)); + // std::unique_ptr problem(new + // parpe::QuadraticTestProblem()); auto hCost = + // std::make_unique(); auto + // wrappedFun = + // dynamic_cast*>(wrappedProblem->costFun.get()); + + // parpe::HierarchicalOptimizationProblemWrapper hw(std::move(problem), + // std::move(hCost)); // TODO test wrapper; need dataprovider?! // mock().ignoreOtherCalls(); diff --git a/tests/parpeamici/multiConditionProblemTest.cpp b/tests/parpeamici/multiConditionProblemTest.cpp index 20af6d627..9c47f59f9 100644 --- a/tests/parpeamici/multiConditionProblemTest.cpp +++ b/tests/parpeamici/multiConditionProblemTest.cpp @@ -3,4 +3,3 @@ #include "../parpecommon/testingMisc.h" #include - diff --git a/tests/parpeamici/simulationResultWriterTest.cpp b/tests/parpeamici/simulationResultWriterTest.cpp index 6733cd0e7..1d505c231 100644 --- a/tests/parpeamici/simulationResultWriterTest.cpp +++ b/tests/parpeamici/simulationResultWriterTest.cpp @@ -5,18 +5,17 @@ #include #include -#include #include #include +#include #include #include #include - TEST(SimulationResultWriter, ResultWriter) { // setup ResultWriter - const char* tmpName = "parpeTest_testResultWriter.h5"; + char const* tmpName = "parpeTest_testResultWriter.h5"; auto _ = gsl::finally([tmpName] { remove(tmpName); }); parpe::SimulationResultWriter rw(tmpName, "/testResultWriter/"); @@ -29,23 +28,55 @@ TEST(SimulationResultWriter, ResultWriter) { constexpr int numSimulations = 2; constexpr int nx = 3; constexpr int nytrue = 2; - const std::vector timepoints {1.0, 2.0}; + std::vector const timepoints{1.0, 2.0}; amici::ExpData edata(nytrue, 0, 0, timepoints); - std::vector measurements {1.1, 2.1, 3.1, 4.1}; - EXPECT_TRUE(measurements.size() == (unsigned) nytrue * timepoints.size()); + std::vector measurements{1.1, 2.1, 3.1, 4.1}; + EXPECT_TRUE(measurements.size() == (unsigned)nytrue * timepoints.size()); edata.setObservedData(measurements); amici::ReturnData rdata( - timepoints, - amici::ModelDimensions(nx, nx, nx, nx, 0, 0, 0, nytrue, nytrue, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - std::vector(), 0, 0, 0, 0, 0, 0), - 0, 0, timepoints.size(), 0, - std::vector(), - amici::SecondOrderMode::none, amici::SensitivityOrder::none, - amici::SensitivityMethod::none, amici::RDataReporting::full, - true, true, 50); + timepoints, + amici::ModelDimensions( + nx, + nx, + nx, + nx, + 0, + 0, + 0, + nytrue, + nytrue, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + std::vector(), + 0, + 0, + 0, + 0, + 0, + 0), + 0, + 0, + timepoints.size(), + 0, + std::vector(), + amici::SecondOrderMode::none, + amici::SensitivityOrder::none, + amici::SensitivityMethod::none, + amici::RDataReporting::full, + true, + true, + 50); std::iota(rdata.x.begin(), rdata.x.end(), 0); rdata.llh = 1.2345; rdata.y.resize(measurements.size()); @@ -68,14 +99,17 @@ TEST(SimulationResultWriter, ResultWriter) { // verify hsize_t m, n; auto xAct = amici::hdf5::getDoubleDataset2D(file, rw.xPath + "/1", m, n); - parpe::checkEqualArray(rdata.x.data(), xAct.data(), xAct.size(), 1e-16, 1e-16); + parpe::checkEqualArray( + rdata.x.data(), xAct.data(), xAct.size(), 1e-16, 1e-16); - auto yMesAct = amici::hdf5::getDoubleDataset2D(file, rw.yMesPath + "/1", m, n); - parpe::checkEqualArray(measurements.data(), yMesAct.data(), yMesAct.size(), 1e-16, 1e-16); + auto yMesAct = + amici::hdf5::getDoubleDataset2D(file, rw.yMesPath + "/1", m, n); + parpe::checkEqualArray( + measurements.data(), yMesAct.data(), yMesAct.size(), 1e-16, 1e-16); } TEST(SimulationResultWriter, ResultWriterNewExistingFile) { - const char* tmpName = "parpeTest_testResultWriterNewExistingFile.h5"; + char const* tmpName = "parpeTest_testResultWriterNewExistingFile.h5"; auto _ = gsl::finally([tmpName] { remove(tmpName); }); // create file diff --git a/tests/parpecommon/commonTests.cpp b/tests/parpecommon/commonTests.cpp index a3f57de22..98dc0550d 100644 --- a/tests/parpecommon/commonTests.cpp +++ b/tests/parpecommon/commonTests.cpp @@ -1,14 +1,14 @@ -#include -#include +#include #include +#include +#include #include -#include #include "testingMisc.h" #include -#include #include // strlen +#include #include @@ -18,95 +18,94 @@ using namespace parpe; -TEST(Testing, TenToMinusInf) { - ASSERT_EQ(0.0, pow(10, -INFINITY)); -} +TEST(Testing, TenToMinusInf) { ASSERT_EQ(0.0, pow(10, -INFINITY)); } TEST(Testing, WithinTolerance) { - captureStreamToString([](){ - double atol = 0.1; - double rtol = 0.1; - - EXPECT_TRUE(withinTolerance(1.0, 1.0, atol, rtol, 0)); - // abs false, rel true - EXPECT_TRUE(withinTolerance(2.0, 2.15, atol, rtol, 0)); - // abs true, rel false - EXPECT_TRUE(withinTolerance(0, 0.05, atol, rtol, 0)); - - EXPECT_TRUE(withinTolerance(NAN, NAN, atol, rtol, 0)); - EXPECT_FALSE(withinTolerance(NAN, 1, atol, rtol, 0)); - EXPECT_FALSE(withinTolerance(1, NAN, atol, rtol, 0)); - - EXPECT_TRUE(withinTolerance(INFINITY, INFINITY, atol, rtol, 0)); - EXPECT_FALSE(withinTolerance(1, INFINITY, atol, rtol, 0)); - EXPECT_FALSE(withinTolerance(INFINITY, 1, atol, rtol, 0)); - }, stderr, STDERR_FILENO); + captureStreamToString( + []() { + double atol = 0.1; + double rtol = 0.1; + + EXPECT_TRUE(withinTolerance(1.0, 1.0, atol, rtol, 0)); + // abs false, rel true + EXPECT_TRUE(withinTolerance(2.0, 2.15, atol, rtol, 0)); + // abs true, rel false + EXPECT_TRUE(withinTolerance(0, 0.05, atol, rtol, 0)); + + EXPECT_TRUE(withinTolerance(NAN, NAN, atol, rtol, 0)); + EXPECT_FALSE(withinTolerance(NAN, 1, atol, rtol, 0)); + EXPECT_FALSE(withinTolerance(1, NAN, atol, rtol, 0)); + + EXPECT_TRUE(withinTolerance(INFINITY, INFINITY, atol, rtol, 0)); + EXPECT_FALSE(withinTolerance(1, INFINITY, atol, rtol, 0)); + EXPECT_FALSE(withinTolerance(INFINITY, 1, atol, rtol, 0)); + }, + stderr, + STDERR_FILENO); } TEST(Testing, CheckEqualArray) { - const double expected[] = {1.0, 2.0, 3.0}; - const double actual[] = {1.0, 2.0, 3.0}; + double const expected[] = {1.0, 2.0, 3.0}; + double const actual[] = {1.0, 2.0, 3.0}; checkEqualArray(expected, actual, 3, 1e-16, 1e-16); checkEqualArray(nullptr, nullptr, 3, 1e-16, 1e-16); } TEST(Testing, RandInt) { - const int numTests = 100; - const int min = -1; - const int max = 1; + int const numTests = 100; + int const min = -1; + int const max = 1; - for(int i = 0; i < numTests; ++i) { + for (int i = 0; i < numTests; ++i) { int r = randInt(min, max); EXPECT_TRUE(r >= min && r <= max); } } TEST(Common, Backtrace) { - std::string output = captureStreamToString([]() { - parpe::printBacktrace(5); - }, stderr, STDERR_FILENO); + std::string output = captureStreamToString( + []() { parpe::printBacktrace(5); }, stderr, STDERR_FILENO); EXPECT_TRUE(100 < output.size()); } TEST(Common, RandDouble) { - const int numTests = 100; - const double min = -1.0; - const double max = 1.0; + int const numTests = 100; + double const min = -1.0; + double const max = 1.0; - for(int i = 0; i < numTests; ++i) { + for (int i = 0; i < numTests; ++i) { double r = parpe::randDouble(min, max); EXPECT_TRUE(r >= min && r <= max); } } TEST(Common, FillArrayRandomDoubleSameInterval) { - const int numTests = 100; - const double min = -1.0; - const double max = 1.0; + int const numTests = 100; + double const min = -1.0; + double const max = 1.0; double buf[numTests]; parpe::fillArrayRandomDoubleSameInterval(min, max, buf); - for(int i = 0; i < numTests; ++i) { + for (int i = 0; i < numTests; ++i) { EXPECT_TRUE(buf[i] >= min && buf[i] <= max); } } TEST(Common, FillArrayRandomDoubleIndividualInterval) { - const int numTests = 100; - const double min[numTests] = {-1.0, 0.0, 1.0}; - const double max[numTests] = {-0.5, 0.5, 1.5}; + int const numTests = 100; + double const min[numTests] = {-1.0, 0.0, 1.0}; + double const max[numTests] = {-0.5, 0.5, 1.5}; double buf[numTests]; parpe::fillArrayRandomDoubleIndividualInterval(min, max, buf); - for(int i = 0; i < numTests; ++i) { + for (int i = 0; i < numTests; ++i) { EXPECT_TRUE(buf[i] >= min[i] && buf[i] <= max[i]); } } - #ifdef PARPE_ENABLE_MPI TEST(Common, Mpi) { // Before MPI initialized @@ -125,7 +124,6 @@ TEST(Common, Mpi) { } #endif - TEST(Common, StrFormatCurrentLocaltime) { int buflen = 10; char buf[buflen]; @@ -133,36 +131,29 @@ TEST(Common, StrFormatCurrentLocaltime) { EXPECT_EQ(3UL, std::strlen(buf)); } - TEST(Logging, PrintDebugInfoAndWait) { - captureStreamToString([](){ - parpe::printDebugInfoAndWait(0); - }, stdout); + captureStreamToString([]() { parpe::printDebugInfoAndWait(0); }, stdout); } TEST(Logging, MessageIsPrinted) { - captureStreamToString([](){ - parpe::logmessage(parpe::loglevel::error, "error"); - }, stdout); + captureStreamToString( + []() { parpe::logmessage(parpe::loglevel::error, "error"); }, stdout); } TEST(Logging, PrintMPIInfo) { - std::string s = captureStreamToString([](){ - parpe::printMPIInfo(); - }, stdout); + std::string s = + captureStreamToString([]() { parpe::printMPIInfo(); }, stdout); EXPECT_TRUE(s.size() > 20); } TEST(Logging, LogProcessStats) { - std::string s = captureStreamToString([](){ - parpe::logProcessStats(); - }, stdout); + std::string s = + captureStreamToString([]() { parpe::logProcessStats(); }, stdout); EXPECT_TRUE(s.size() > 200); } - #include #include @@ -176,19 +167,22 @@ TEST(CostFunction, MseZero) { EXPECT_EQ(costExp, costAct); std::vector predictionGradient0 = {1.0, 1.0}; - std::vector predictionGradient = { &predictionGradient0[0], - &predictionGradient0[1]}; + std::vector predictionGradient = { + &predictionGradient0[0], &predictionGradient0[1]}; std::vector costGradientExp = {0.0}; std::vector costGradientAct = {NAN}; - mse.evaluate(label, prediction, - 1, predictionGradient, - costAct, costGradientAct.data()); + mse.evaluate( + label, + prediction, + 1, + predictionGradient, + costAct, + costGradientAct.data()); EXPECT_EQ(costExp, costAct); EXPECT_TRUE(costGradientExp == costGradientAct); - } TEST(CostFunction, MseNonzero) { @@ -201,26 +195,29 @@ TEST(CostFunction, MseNonzero) { EXPECT_EQ(costExp, costAct); std::vector predictionGradient0 = {5.0, 3.0}; - std::vector predictionGradient = { &predictionGradient0[0], &predictionGradient0[1]}; + std::vector predictionGradient = { + &predictionGradient0[0], &predictionGradient0[1]}; std::vector costGradientExp = {3.0}; std::vector costGradientAct = {NAN}; - mse.evaluate(label, prediction, - 1, predictionGradient, - costAct, costGradientAct.data()); + mse.evaluate( + label, + prediction, + 1, + predictionGradient, + costAct, + costGradientAct.data()); EXPECT_EQ(costExp, costAct); EXPECT_TRUE(costGradientExp == costGradientAct); - } - TEST(CostFunction, LinearModel) { - std::vector parameters = { 3.0, 2.0 }; // x = 3.0, b = 2.0 - std::vector> features = { { 4.0 } }; + std::vector parameters = {3.0, 2.0}; // x = 3.0, b = 2.0 + std::vector> features = {{4.0}}; // y = A x + b = 4.0 * 3.0 + 2.0 = 14.0 - std::vector outputsExp {14.0}; + std::vector outputsExp{14.0}; std::vector outputsAct(features.size(), NAN); LinearModel lm; @@ -230,19 +227,18 @@ TEST(CostFunction, LinearModel) { std::vector> gradExp = {{4.0, 1.0}}; - auto gradAct = std::vector >( - features.size(), - std::vector(parameters.size(), NAN)); + auto gradAct = std::vector>( + features.size(), std::vector(parameters.size(), NAN)); lm.evaluate(parameters, features, outputsAct, gradAct); EXPECT_TRUE(gradExp == gradAct); } TEST(CostFunction, LinearModel2) { - std::vector parameters = { 3.0, 1.0, 2.0 }; - std::vector> features = { { 4.0, 1.0 } }; + std::vector parameters = {3.0, 1.0, 2.0}; + std::vector> features = {{4.0, 1.0}}; // y = A x + b = 4.0 * 3.0 + 1.0*1.0 + 2.0 = 15.0 - std::vector outputsExp {15.0}; + std::vector outputsExp{15.0}; std::vector outputsAct(features.size(), NAN); LinearModel lm; @@ -252,20 +248,19 @@ TEST(CostFunction, LinearModel2) { std::vector> gradExp = {{4.0, 1.0, 1.0}}; - auto gradAct = std::vector >( - features.size(), - std::vector(parameters.size(), NAN)); + auto gradAct = std::vector>( + features.size(), std::vector(parameters.size(), NAN)); lm.evaluate(parameters, features, outputsAct, gradAct); EXPECT_TRUE(gradExp == gradAct); } TEST(CostFunction, LinearModel3) { - std::vector parameters = { 3.0, 1.0, 2.0 }; - std::vector> features = { { 4.0, 1.0 }, { 8.0, 2.0 }}; + std::vector parameters = {3.0, 1.0, 2.0}; + std::vector> features = {{4.0, 1.0}, {8.0, 2.0}}; // y = A x + b = 4.0 * 3.0 + 1.0*1.0 + 2.0 = 15.0 // y2= 8.0 * 3.0 + 2.0*1.0 + 2.0 = 28 - std::vector outputsExp {15.0, 28.0}; + std::vector outputsExp{15.0, 28.0}; std::vector outputsAct(features.size(), NAN); parpe::LinearModel lm; @@ -273,10 +268,11 @@ TEST(CostFunction, LinearModel3) { EXPECT_TRUE(outputsExp == outputsAct); - std::vector> gradExp = {{4.0, 1.0, 1.0}, {8.0, 2.0, 1.0}}; + std::vector> gradExp = { + {4.0, 1.0, 1.0}, {8.0, 2.0, 1.0}}; - auto gradAct = std::vector >( - features.size(), std::vector(parameters.size(), NAN)); + auto gradAct = std::vector>( + features.size(), std::vector(parameters.size(), NAN)); lm.evaluate(parameters, features, outputsAct, gradAct); EXPECT_TRUE(gradExp == gradAct); diff --git a/tests/parpecommon/hdf5MiscTests.cpp b/tests/parpecommon/hdf5MiscTests.cpp index 06e2dfff0..62d6f6194 100644 --- a/tests/parpecommon/hdf5MiscTests.cpp +++ b/tests/parpecommon/hdf5MiscTests.cpp @@ -4,12 +4,12 @@ #include "testingMisc.h" -#include #include +#include class HDF5 : public ::testing::Test { -protected: + protected: void SetUp() override { // avoid memory problems H5::H5Library::dontAtExit(); @@ -23,50 +23,47 @@ class HDF5 : public ::testing::Test { std::remove(tempFileName.c_str()); } - std::string tempFileName {"parpeTest_hdf5Misc.h5"}; + std::string tempFileName{"parpeTest_hdf5Misc.h5"}; H5::H5File file; }; - - TEST_F(HDF5, OpenExistingFileNoOverwrite) { - EXPECT_THROW(parpe::hdf5CreateFile(tempFileName, false), - parpe::HDF5Exception); + EXPECT_THROW( + parpe::hdf5CreateFile(tempFileName, false), parpe::HDF5Exception); } - TEST_F(HDF5, OpenExistingFileOverwrite) { file.close(); file = parpe::hdf5CreateFile(tempFileName, true); } - -TEST_F(HDF5, MutexGetLock) { - parpe::hdf5MutexGetLock(); -} - +TEST_F(HDF5, MutexGetLock) { parpe::hdf5MutexGetLock(); } TEST_F(HDF5, ErrorStackWalker) { H5_SAVE_ERROR_HANDLER; // provoke error by asking to truncate a file that is already open - hid_t fileId = H5Fcreate(tempFileName.c_str(), H5F_ACC_TRUNC, - H5P_DEFAULT, H5P_DEFAULT); + hid_t fileId = H5Fcreate( + tempFileName.c_str(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); EXPECT_TRUE(fileId <= 0); - auto s = parpe::captureStreamToString([](){ - H5Ewalk2(H5E_DEFAULT, H5E_WALK_DOWNWARD, - parpe::hdf5ErrorStackWalker_cb, nullptr); - }, stdout); + auto s = parpe::captureStreamToString( + []() { + H5Ewalk2( + H5E_DEFAULT, + H5E_WALK_DOWNWARD, + parpe::hdf5ErrorStackWalker_cb, + nullptr); + }, + stdout); H5_RESTORE_ERROR_HANDLER; EXPECT_TRUE(200 < s.size()); } - TEST_F(HDF5, CreateGroup) { - const char *groupName = "/test"; + char const* groupName = "/test"; EXPECT_FALSE(parpe::hdf5GroupExists(file, groupName)); @@ -75,19 +72,17 @@ TEST_F(HDF5, CreateGroup) { EXPECT_TRUE(parpe::hdf5GroupExists(file, groupName)); } - TEST_F(HDF5, CreateExistingGroup) { parpe::hdf5CreateGroup(file, "/test", false); H5_SAVE_ERROR_HANDLER; - EXPECT_THROW(parpe::hdf5CreateGroup(file, "/test", false), - parpe::HDF5Exception); + EXPECT_THROW( + parpe::hdf5CreateGroup(file, "/test", false), parpe::HDF5Exception); H5_RESTORE_ERROR_HANDLER; } - TEST_F(HDF5, EnsureGroupExists) { - const char *groupName = "/test"; + char const* groupName = "/test"; EXPECT_FALSE(parpe::hdf5GroupExists(file, groupName)); @@ -99,9 +94,9 @@ TEST_F(HDF5, EnsureGroupExists) { } TEST_F(HDF5, StringAttribute) { - const char *groupName = "/"; - const char *attrName = "testA"; - const char *expAttrValue = "adsf"; + char const* groupName = "/"; + char const* attrName = "testA"; + char const* expAttrValue = "adsf"; EXPECT_FALSE(parpe::hdf5AttributeExists(file, groupName, attrName)); @@ -111,8 +106,8 @@ TEST_F(HDF5, StringAttribute) { H5T_class_t type_class; size_t size = 0; - int ret = H5LTget_attribute_info(file.getId(), groupName, attrName, nullptr, - &type_class, &size); + int ret = H5LTget_attribute_info( + file.getId(), groupName, attrName, nullptr, &type_class, &size); EXPECT_TRUE(ret >= 0); char actValue[size]; @@ -120,27 +115,28 @@ TEST_F(HDF5, StringAttribute) { EXPECT_EQ(std::string(expAttrValue), std::string(actValue)); } - TEST_F(HDF5, DatasetDimensions) { - const char datasetName[] = "bla"; - const int rank = 3; - const hsize_t dims[rank] = {1,2,3}; - const int buffer[6] = {1}; + char const datasetName[] = "bla"; + int const rank = 3; + hsize_t const dims[rank] = {1, 2, 3}; + int const buffer[6] = {1}; EXPECT_FALSE(file.nameExists(datasetName)); - EXPECT_THROW(parpe::hdf5GetDatasetDimensions( - file, datasetName, rank, - nullptr, nullptr, nullptr, nullptr), - H5::Exception); + EXPECT_THROW( + parpe::hdf5GetDatasetDimensions( + file, datasetName, rank, nullptr, nullptr, nullptr, nullptr), + H5::Exception); - EXPECT_TRUE(H5LTmake_dataset_int(file.getId(), datasetName, rank, dims, buffer) >= 0); + EXPECT_TRUE( + H5LTmake_dataset_int(file.getId(), datasetName, rank, dims, buffer) >= + 0); EXPECT_TRUE(file.nameExists(datasetName)); int d0 = 0, d1 = 0, d2 = 0, d3 = 0; - parpe::hdf5GetDatasetDimensions(file, datasetName, rank, - &d0, &d1, &d2, &d3); + parpe::hdf5GetDatasetDimensions( + file, datasetName, rank, &d0, &d1, &d2, &d3); EXPECT_EQ((signed)dims[0], d0); EXPECT_EQ((signed)dims[1], d1); EXPECT_EQ((signed)dims[2], d2); diff --git a/tests/parpecommon/testingMisc.cpp b/tests/parpecommon/testingMisc.cpp index e3cc80d4d..c23e34a40 100644 --- a/tests/parpecommon/testingMisc.cpp +++ b/tests/parpecommon/testingMisc.cpp @@ -1,59 +1,76 @@ #include "testingMisc.h" -#include -#include +#include +#include #include +#include +#include +#include // O_WRONLY +#include #include +#include #include -#include #include -#include // O_WRONLY -#include -#include -#include #include #include namespace parpe { -bool withinTolerance(double expected, double actual, double atol, double rtol, - int index) { - bool withinTol = fabs(expected - actual) <= atol - || fabs((expected - actual) / (rtol + expected)) <= rtol; +bool withinTolerance( + double expected, + double actual, + double atol, + double rtol, + int index) { + bool withinTol = fabs(expected - actual) <= atol || + fabs((expected - actual) / (rtol + expected)) <= rtol; - if(!withinTol && std::isnan(expected) && std::isnan(actual)) + if (!withinTol && std::isnan(expected) && std::isnan(actual)) withinTol = true; - if(!withinTol && std::isinf(expected) && std::isinf(actual)) + if (!withinTol && std::isinf(expected) && std::isinf(actual)) withinTol = true; - if(!withinTol) { - fprintf(stderr, "ERROR: Expected value %e, but was %e at index %d.\n", - expected, actual, index); - fprintf(stderr, " Relative error: %e (tolerance was %e)\n", - fabs((expected - actual) / (rtol + expected)), rtol); - fprintf(stderr, " Absolute error: %e (tolerance was %e)\n", - fabs(expected - actual), atol); - //printBacktrace(12); + if (!withinTol) { + fprintf( + stderr, + "ERROR: Expected value %e, but was %e at index %d.\n", + expected, + actual, + index); + fprintf( + stderr, + " Relative error: %e (tolerance was %e)\n", + fabs((expected - actual) / (rtol + expected)), + rtol); + fprintf( + stderr, + " Absolute error: %e (tolerance was %e)\n", + fabs(expected - actual), + atol); + // printBacktrace(12); } return withinTol; } -void checkEqualArray(const double *expected, const double *actual, int length, - double atol, double rtol) { - if(!expected && !actual) +void checkEqualArray( + double const* expected, + double const* actual, + int length, + double atol, + double rtol) { + if (!expected && !actual) return; EXPECT_TRUE(expected && actual); - if(!(expected && actual)) { + if (!(expected && actual)) { // in case EXPECT_TRUE does not exit return; } - for(int i = 0; i < length; ++i) - { + for (int i = 0; i < length; ++i) { bool withinTol = withinTolerance(expected[i], actual[i], atol, rtol, i); EXPECT_TRUE(withinTol); } @@ -67,8 +84,8 @@ int randInt(int min, int max) { return dis(gen); } -std::string captureStreamToString(const std::function& f, - std::ostream &os) { +std::string +captureStreamToString(std::function const& f, std::ostream& os) { std::streambuf* oldOStreamBuf = os.rdbuf(); os.flush(); @@ -78,15 +95,16 @@ std::string captureStreamToString(const std::function& f, f(); strOs.flush(); - os.rdbuf( oldOStreamBuf ); + os.rdbuf(oldOStreamBuf); return strOs.str(); } -std::string captureStreamToString(const std::function& f, - std::FILE* captureStream, - int captureStreamFd) { - char tempFileName [] = "parpeTestCaptureXXXXXX"; +std::string captureStreamToString( + std::function const& f, + std::FILE* captureStream, + int captureStreamFd) { + char tempFileName[] = "parpeTestCaptureXXXXXX"; int newStreamFd = mkstemp(tempFileName); Expects(newStreamFd >= 0); @@ -95,23 +113,24 @@ std::string captureStreamToString(const std::function& f, fflush(captureStream); dup2(newStreamFd, captureStreamFd); // replace original fd by tmp file - close(newStreamFd); // close remaining copy + close(newStreamFd); // close remaining copy f(); fflush(captureStream); dup2(oldStreamFd, captureStreamFd); // restore (closes tmp file) - close(oldStreamFd); // close remainingv copy + close(oldStreamFd); // close remainingv copy std::ifstream ifs(tempFileName); - return std::string ((std::istreambuf_iterator(ifs)), - std::istreambuf_iterator()); + return std::string( + (std::istreambuf_iterator(ifs)), + std::istreambuf_iterator()); } double getLogLikelihoodOffset(int n) { - const double pi = atan(1) * 4.0; - return - n * 0.5 * log(2.0 * pi); + double const pi = atan(1) * 4.0; + return -n * 0.5 * log(2.0 * pi); } } // namespace parpe diff --git a/tests/parpecommon/testingMisc.h b/tests/parpecommon/testingMisc.h index fde0709d2..81ec33459 100644 --- a/tests/parpecommon/testingMisc.h +++ b/tests/parpecommon/testingMisc.h @@ -1,10 +1,10 @@ #ifndef PARPE_TESTING_MISC_H #define PARPE_TESTING_MISC_H +#include #include -#include #include -#include +#include #include namespace parpe { @@ -18,13 +18,28 @@ double getLogLikelihoodOffset(int n); int randInt(int min, int max); -bool withinTolerance(double expected, double actual, double atol, double rtol, int index); - -void checkEqualArray(const double *expected, const double *actual, int length, double atol, double rtol); - -std::string captureStreamToString(const std::function& f, std::ostream &os = std::cout); - -std::string captureStreamToString(const std::function& f, std::FILE* captureStream = stdout, int captureStreamFd = STDOUT_FILENO); +bool withinTolerance( + double expected, + double actual, + double atol, + double rtol, + int index); + +void checkEqualArray( + double const* expected, + double const* actual, + int length, + double atol, + double rtol); + +std::string captureStreamToString( + std::function const& f, + std::ostream& os = std::cout); + +std::string captureStreamToString( + std::function const& f, + std::FILE* captureStream = stdout, + int captureStreamFd = STDOUT_FILENO); } // namespace parpe diff --git a/tests/parpeloadbalancer/loadBalancerMasterTest.cpp b/tests/parpeloadbalancer/loadBalancerMasterTest.cpp index 73a75a3d0..1f36dfb9f 100644 --- a/tests/parpeloadbalancer/loadBalancerMasterTest.cpp +++ b/tests/parpeloadbalancer/loadBalancerMasterTest.cpp @@ -1,8 +1,8 @@ -#include #include +#include -#include #include +#include #include @@ -13,60 +13,83 @@ using ::testing::_; -static std::function _MPI_Comm_size; +static std::function _MPI_Comm_size; // mock for MPI_Comm_size -int MPI_Comm_size(MPI_Comm comm, int *size) { +int MPI_Comm_size(MPI_Comm comm, int* size) { _MPI_Comm_size(comm, size); *size = 10; return MPI_SUCCESS; } -int MPI_Testany(int /*count*/, MPI_Request /*array_of_requests*/[], int */*index*/, - int */*flag*/, MPI_Status */*status*/) { +int MPI_Testany( + int /*count*/, + MPI_Request /*array_of_requests*/[], + int* /*index*/, + int* /*flag*/, + MPI_Status* /*status*/) { sleep(1); return 0; } -int MPI_Iprobe(int /*source*/, int /*tag*/, MPI_Comm /*comm*/, int */*flag*/, - MPI_Status */*status*/) { +int MPI_Iprobe( + int /*source*/, + int /*tag*/, + MPI_Comm /*comm*/, + int* /*flag*/, + MPI_Status* /*status*/) { return 0; } -int MPI_Isend(const void */*buf*/, int /*count*/, MPI_Datatype /*datatype*/, - int /*dest*/, int /*tag*/, MPI_Comm /*comm*/, - MPI_Request */*request*/) { +int MPI_Isend( + void const* /*buf*/, + int /*count*/, + MPI_Datatype /*datatype*/, + int /*dest*/, + int /*tag*/, + MPI_Comm /*comm*/, + MPI_Request* /*request*/) { sleep(1); return 0; } class MockMPI { -public: - MOCK_CONST_METHOD2(MPI_Comm_size, int(MPI_Comm comm, int *size)); - MOCK_CONST_METHOD5(MPI_Testany, int(int count, - MPI_Request array_of_requests[], - int *index, - int *flag, MPI_Status *status)); - MOCK_CONST_METHOD5(MPI_Iprobe, int(int source, int tag, MPI_Comm comm, - int *flag, MPI_Status *status)); - - MOCK_CONST_METHOD7(MPI_Isend, int(const void *buf, int count, - MPI_Datatype datatype, int dest, - int tag, MPI_Comm comm, - MPI_Request *request)); + public: + MOCK_CONST_METHOD2(MPI_Comm_size, int(MPI_Comm comm, int* size)); + MOCK_CONST_METHOD5( + MPI_Testany, + int(int count, + MPI_Request array_of_requests[], + int* index, + int* flag, + MPI_Status* status)); + MOCK_CONST_METHOD5( + MPI_Iprobe, + int(int source, int tag, MPI_Comm comm, int* flag, MPI_Status* status)); + + MOCK_CONST_METHOD7( + MPI_Isend, + int(void const* buf, + int count, + MPI_Datatype datatype, + int dest, + int tag, + MPI_Comm comm, + MPI_Request* request)); MockMPI() { - _MPI_Comm_size = [this](MPI_Comm comm, int *size){ return MPI_Comm_size(comm, size); }; + _MPI_Comm_size = [this](MPI_Comm comm, int* size) { + return MPI_Comm_size(comm, size); + }; } }; class LoadBalancer : public ::testing::Test { -protected: + protected: MockMPI mockMpi; }; - #include TEST_F(LoadBalancer, QueueInited) { diff --git a/tests/parpeoptimization/localOptimizationCeresTest.cpp b/tests/parpeoptimization/localOptimizationCeresTest.cpp index 07c832db1..60a9ed36f 100644 --- a/tests/parpeoptimization/localOptimizationCeresTest.cpp +++ b/tests/parpeoptimization/localOptimizationCeresTest.cpp @@ -2,18 +2,16 @@ #include -#include "quadraticTestProblem.h" #include "../parpecommon/testingMisc.h" +#include "quadraticTestProblem.h" -#include #include - +#include using ::testing::_; +using ::testing::AtLeast; using ::testing::Eq; using ::testing::Ne; -using ::testing::AtLeast; - TEST(LocalOptimizationCeres, Optimization) { parpe::QuadraticTestProblem problem; @@ -57,13 +55,22 @@ TEST(LocalOptimizationCeres, IsReporterCalled) { problem.setOptimizationOptions(o); EXPECT_CALL(*problem.reporter, starting(_)); - EXPECT_CALL(*dynamic_cast(problem.cost_fun_.get()), - numParameters()).Times(3); - EXPECT_CALL(*problem.reporter, beforeCostFunctionCall(_)).Times(1 + o.maxOptimizerIterations); - EXPECT_CALL(*dynamic_cast(problem.cost_fun_.get()), - evaluate_impl(_, _, Ne(gsl::span()), _, _)).Times(1 + o.maxOptimizerIterations); - EXPECT_CALL(*problem.reporter, iterationFinished(_, _, _)).Times(1 + o.maxOptimizerIterations); - EXPECT_CALL(*problem.reporter, afterCostFunctionCall(_, _, _)).Times(1 + o.maxOptimizerIterations); + EXPECT_CALL( + *dynamic_cast( + problem.cost_fun_.get()), + numParameters()) + .Times(3); + EXPECT_CALL(*problem.reporter, beforeCostFunctionCall(_)) + .Times(1 + o.maxOptimizerIterations); + EXPECT_CALL( + *dynamic_cast( + problem.cost_fun_.get()), + evaluate_impl(_, _, Ne(gsl::span()), _, _)) + .Times(1 + o.maxOptimizerIterations); + EXPECT_CALL(*problem.reporter, iterationFinished(_, _, _)) + .Times(1 + o.maxOptimizerIterations); + EXPECT_CALL(*problem.reporter, afterCostFunctionCall(_, _, _)) + .Times(1 + o.maxOptimizerIterations); EXPECT_CALL(*problem.reporter, finished(_, _, _)); diff --git a/tests/parpeoptimization/localOptimizationFidesTest.cpp b/tests/parpeoptimization/localOptimizationFidesTest.cpp index d41d9b34c..8e14324fb 100644 --- a/tests/parpeoptimization/localOptimizationFidesTest.cpp +++ b/tests/parpeoptimization/localOptimizationFidesTest.cpp @@ -13,8 +13,7 @@ using ::testing::AtLeast; using ::testing::Eq; using ::testing::Ne; -TEST(LocalOptimizationFides, FindsOptimum) -{ +TEST(LocalOptimizationFides, FindsOptimum) { parpe::QuadraticTestProblem problem; // should trigger termination @@ -26,7 +25,6 @@ TEST(LocalOptimizationFides, FindsOptimum) auto gatol = -1.0; auto grtol = -1.0; - auto optimization_options = problem.getOptimizationOptions(); optimization_options.setOption("xtol", xtol); optimization_options.setOption("fatol", fatol); @@ -37,22 +35,25 @@ TEST(LocalOptimizationFides, FindsOptimum) problem.setOptimizationOptions(optimization_options); EXPECT_CALL(*problem.reporter, starting(_)); - EXPECT_CALL(*problem.reporter, - finished(_, _, static_cast(fides::ExitStatus::ftol))); + EXPECT_CALL( + *problem.reporter, + finished(_, _, static_cast(fides::ExitStatus::ftol))); // No calls without gradient - EXPECT_CALL(*dynamic_cast( - problem.cost_fun_.get()), - evaluate_impl(_, _, Eq(gsl::span()), _, _)) - .Times(0); + EXPECT_CALL( + *dynamic_cast( + problem.cost_fun_.get()), + evaluate_impl(_, _, Eq(gsl::span()), _, _)) + .Times(0); // At least one gradient evaluation - EXPECT_CALL(*dynamic_cast( - problem.cost_fun_.get()), - evaluate_impl(_, _, Ne(gsl::span()), _, _)) - .Times(AtLeast(1)); + EXPECT_CALL( + *dynamic_cast( + problem.cost_fun_.get()), + evaluate_impl(_, _, Ne(gsl::span()), _, _)) + .Times(AtLeast(1)); parpe::OptimizerFides optimizer; - auto [status, fval, parameters]= optimizer.optimize(&problem); + auto [status, fval, parameters] = optimizer.optimize(&problem); // check status, cost, parameter EXPECT_EQ(0, status); diff --git a/tests/parpeoptimization/localOptimizationFsqpTest.cpp b/tests/parpeoptimization/localOptimizationFsqpTest.cpp index ac38383d1..768c5aee3 100644 --- a/tests/parpeoptimization/localOptimizationFsqpTest.cpp +++ b/tests/parpeoptimization/localOptimizationFsqpTest.cpp @@ -22,32 +22,33 @@ TEST_GROUP(localOptimizationFsqp){ }; // clang-format on - TEST(localOptimizationFsqp, testOptimizationGetlocalOptimum) { parpe::QuadraticTestProblem problem; mock().ignoreOtherCalls(); parpe::OptimizerFsqp optimizer; - //auto result = optimizer.optimize(&problem); + // auto result = optimizer.optimize(&problem); auto result = optimizer.optimize(&problem); // check status, cost, parameter -// CHECK_EQUAL(0, std::get<0>(result)); + // CHECK_EQUAL(0, std::get<0>(result)); DOUBLES_EQUAL(42.0, std::get<1>(result), 1e-12); - DOUBLES_EQUAL(-1.0, std::get<2>(result).at(0), 1e-8); // TODO adapt to optimizer tolerances + DOUBLES_EQUAL( + -1.0, + std::get<2>(result).at(0), + 1e-8); // TODO adapt to optimizer tolerances } - - TEST(localOptimizationFsqp, testParallelMultistart) { /* Test if thread-safe * Test with: - * while ./build/optimization/tests/unittests_optimization_fsqp; do :; done + * while ./build/optimization/tests/unittests_optimization_fsqp; do :; + * done */ mock().disable(); // mock() is not thread-safe - constexpr int numStarts {10}; + constexpr int numStarts{10}; parpe::QuadraticOptimizationMultiStartProblem msp(numStarts, false); msp.options.optimizer = parpe::optimizerName::OPTIMIZER_FSQP; @@ -57,8 +58,6 @@ TEST(localOptimizationFsqp, testParallelMultistart) { mock().enable(); } - - TEST(localOptimizationFsqp, testReporterCalled) { parpe::QuadraticTestProblem problem; auto o = problem.getOptimizationOptions(); @@ -78,16 +77,23 @@ TEST(localOptimizationFsqp, testReporterCalled) { // "normal" iterations // "before" and "after" are called for f and g, but g is already cached - mock().expectNCalls(o.maxOptimizerIterations * 2, "OptimizationReporterTest::beforeCostFunctionCall"); + mock().expectNCalls( + o.maxOptimizerIterations * 2, + "OptimizationReporterTest::beforeCostFunctionCall"); mock().expectNCalls(o.maxOptimizerIterations, "testObjGrad"); - mock().expectNCalls(o.maxOptimizerIterations * 2, "OptimizationReporterTest::afterCostFunctionCall"); - mock().expectNCalls(o.maxOptimizerIterations, "OptimizationReporterTest::iterationFinished"); + mock().expectNCalls( + o.maxOptimizerIterations * 2, + "OptimizationReporterTest::afterCostFunctionCall"); + mock().expectNCalls( + o.maxOptimizerIterations, + "OptimizationReporterTest::iterationFinished"); - mock().expectOneCall("OptimizationReporterTest::finished").ignoreOtherParameters(); + mock() + .expectOneCall("OptimizationReporterTest::finished") + .ignoreOtherParameters(); parpe::OptimizerFsqp optimizer; optimizer.optimize(&problem); // don't check results. could be anywhere, due to low iteration limit } - diff --git a/tests/parpeoptimization/localOptimizationIpoptTest.cpp b/tests/parpeoptimization/localOptimizationIpoptTest.cpp index 29f1870f6..e85670208 100644 --- a/tests/parpeoptimization/localOptimizationIpoptTest.cpp +++ b/tests/parpeoptimization/localOptimizationIpoptTest.cpp @@ -3,13 +3,13 @@ #include #include -#include "quadraticTestProblem.h" #include "../parpecommon/testingMisc.h" +#include "quadraticTestProblem.h" using ::testing::_; +using ::testing::AtLeast; using ::testing::Eq; using ::testing::Ne; -using ::testing::AtLeast; TEST(localOptimizationIpopt, testOptimizationResult) { parpe::QuadraticTestProblem problem; @@ -17,10 +17,16 @@ TEST(localOptimizationIpopt, testOptimizationResult) { EXPECT_CALL(*problem.reporter, starting(_)); EXPECT_CALL(*problem.reporter, finished(_, _, 0)); - EXPECT_CALL(*dynamic_cast(problem.cost_fun_.get()), - evaluate_impl(_, _, Eq(gsl::span()), _, _)).Times(AtLeast(1)); - EXPECT_CALL(*dynamic_cast(problem.cost_fun_.get()), - evaluate_impl(_, _, Ne(gsl::span()), _, _)).Times(AtLeast(1)); + EXPECT_CALL( + *dynamic_cast( + problem.cost_fun_.get()), + evaluate_impl(_, _, Eq(gsl::span()), _, _)) + .Times(AtLeast(1)); + EXPECT_CALL( + *dynamic_cast( + problem.cost_fun_.get()), + evaluate_impl(_, _, Ne(gsl::span()), _, _)) + .Times(AtLeast(1)); // TODO mock().ignoreOtherCalls(); @@ -43,15 +49,27 @@ TEST(localOptimizationIpopt, testReporterCalled) { EXPECT_CALL(*problem.reporter, starting(_)); - EXPECT_CALL(*problem.reporter, beforeCostFunctionCall(_)).Times(3 + o.maxOptimizerIterations * 2); - EXPECT_CALL(*dynamic_cast(problem.cost_fun_.get()), - evaluate_impl(_, _, Ne(gsl::span()), _, _)).Times(1 + o.maxOptimizerIterations); - EXPECT_CALL(*dynamic_cast(problem.cost_fun_.get()), - evaluate_impl(_, _, Eq(gsl::span()), _, _)).Times(o.maxOptimizerIterations); - EXPECT_CALL(*dynamic_cast(problem.cost_fun_.get()), - numParameters()).Times(2 + 0*o.maxOptimizerIterations); - EXPECT_CALL(*problem.reporter, iterationFinished(_, _, _)).Times(1 + o.maxOptimizerIterations); - EXPECT_CALL(*problem.reporter, afterCostFunctionCall(_, _, _)).Times(3 + o.maxOptimizerIterations * 2); + EXPECT_CALL(*problem.reporter, beforeCostFunctionCall(_)) + .Times(3 + o.maxOptimizerIterations * 2); + EXPECT_CALL( + *dynamic_cast( + problem.cost_fun_.get()), + evaluate_impl(_, _, Ne(gsl::span()), _, _)) + .Times(1 + o.maxOptimizerIterations); + EXPECT_CALL( + *dynamic_cast( + problem.cost_fun_.get()), + evaluate_impl(_, _, Eq(gsl::span()), _, _)) + .Times(o.maxOptimizerIterations); + EXPECT_CALL( + *dynamic_cast( + problem.cost_fun_.get()), + numParameters()) + .Times(2 + 0 * o.maxOptimizerIterations); + EXPECT_CALL(*problem.reporter, iterationFinished(_, _, _)) + .Times(1 + o.maxOptimizerIterations); + EXPECT_CALL(*problem.reporter, afterCostFunctionCall(_, _, _)) + .Times(3 + o.maxOptimizerIterations * 2); EXPECT_CALL(*problem.reporter, finished(_, _, _)); @@ -60,4 +78,3 @@ TEST(localOptimizationIpopt, testReporterCalled) { // don't check results. could be anywhere, due to low iteration limit } - diff --git a/tests/parpeoptimization/localOptimizationToms611Test.cpp b/tests/parpeoptimization/localOptimizationToms611Test.cpp index ac0fa2e42..6b9ad6eaa 100644 --- a/tests/parpeoptimization/localOptimizationToms611Test.cpp +++ b/tests/parpeoptimization/localOptimizationToms611Test.cpp @@ -23,19 +23,28 @@ TEST_GROUP(localOptimizationToms611){ }; // clang-format on - - -void calcf(integer &n, doublereal *x, integer &nf, doublereal &f, - integer *uiparm, doublereal *urparm, void *ufparm) { +void calcf( + integer& n, + doublereal* x, + integer& nf, + doublereal& f, + integer* uiparm, + doublereal* urparm, + void* ufparm) { f = pow(x[0] + 1.0, 2) + 42.0; - std::cout<(calcf), (S_fp)calcg, - iv, liv, - lv, v, - nullptr, nullptr, nullptr); + sumsl_( + numOptimizationVariables2, + scaling, + startingPoint, + reinterpret_cast(calcf), + (S_fp)calcg, + iv, + liv, + lv, + v, + nullptr, + nullptr, + nullptr); CHECK_EQUAL(relative_function_convergence, iv[0]); DOUBLES_EQUAL(-1.0, startingPoint[0], 1e-7); - //std::cout<(result)); DOUBLES_EQUAL(42.0, std::get<1>(result), 1e-12); - DOUBLES_EQUAL(-1.0, std::get<2>(result).at(0), 1e-8); // TODO adapt to optimizer tolerances + DOUBLES_EQUAL( + -1.0, + std::get<2>(result).at(0), + 1e-8); // TODO adapt to optimizer tolerances } - TEST(localOptimizationToms611, testReporterCalled) { - const int allowedLineSearchSteps = 3; // only true when iteration limit == 1 + int const allowedLineSearchSteps = 3; // only true when iteration limit == 1 parpe::QuadraticTestProblem problem; auto o = problem.getOptimizationOptions(); o.maxOptimizerIterations = 1; - o.setOption("mxfcal", allowedLineSearchSteps + 1); // +1 for initial function evaluation + o.setOption( + "mxfcal", + allowedLineSearchSteps + 1); // +1 for initial function evaluation problem.setOptimizationOptions(o); // iteration 0 @@ -107,13 +127,22 @@ TEST(localOptimizationToms611, testReporterCalled) { mock().expectOneCall("OptimizationReporterTest::iterationFinished"); // others - mock().expectNCalls(o.maxOptimizerIterations * allowedLineSearchSteps + 1, "OptimizationReporterTest::beforeCostFunctionCall"); - mock().expectNCalls(o.maxOptimizerIterations * allowedLineSearchSteps, "testObj"); + mock().expectNCalls( + o.maxOptimizerIterations * allowedLineSearchSteps + 1, + "OptimizationReporterTest::beforeCostFunctionCall"); + mock().expectNCalls( + o.maxOptimizerIterations * allowedLineSearchSteps, "testObj"); mock().expectNCalls(o.maxOptimizerIterations, "testObjGrad"); - mock().expectNCalls(o.maxOptimizerIterations, "OptimizationReporterTest::iterationFinished"); - mock().expectNCalls(o.maxOptimizerIterations * allowedLineSearchSteps + 1, "OptimizationReporterTest::afterCostFunctionCall"); - - mock().expectOneCall("OptimizationReporterTest::finished").ignoreOtherParameters(); + mock().expectNCalls( + o.maxOptimizerIterations, + "OptimizationReporterTest::iterationFinished"); + mock().expectNCalls( + o.maxOptimizerIterations * allowedLineSearchSteps + 1, + "OptimizationReporterTest::afterCostFunctionCall"); + + mock() + .expectOneCall("OptimizationReporterTest::finished") + .ignoreOtherParameters(); parpe::OptimizerToms611TrustRegionSumsl optimizer; optimizer.optimize(&problem); diff --git a/tests/parpeoptimization/main.cpp b/tests/parpeoptimization/main.cpp index f0b2ee417..6c0277a4b 100644 --- a/tests/parpeoptimization/main.cpp +++ b/tests/parpeoptimization/main.cpp @@ -1,10 +1,10 @@ #include -#include #include -#include +#include #include #include +#include #ifdef PARPE_ENABLE_IPOPT #include "localOptimizationIpoptTest.h" @@ -31,9 +31,7 @@ #include #include - -int main(int argc, char *argv[]) -{ +int main(int argc, char* argv[]) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); } diff --git a/tests/parpeoptimization/minibatchOptimizationTest.cpp b/tests/parpeoptimization/minibatchOptimizationTest.cpp index fe8b12d99..6061fa865 100644 --- a/tests/parpeoptimization/minibatchOptimizationTest.cpp +++ b/tests/parpeoptimization/minibatchOptimizationTest.cpp @@ -1,18 +1,17 @@ #include +#include #include #include -#include #include -#include "quadraticTestProblem.h" #include "../parpecommon/testingMisc.h" +#include "quadraticTestProblem.h" #include +#include #include #include -#include - TEST(MinibatchOptimization, CreatesBatches) { @@ -30,24 +29,32 @@ TEST(MinibatchOptimization, CreatesBatches) { batchSize = 5; batchesAct = parpe::getBatches(input, batchSize); EXPECT_EQ(2UL, batchesAct.size()); - EXPECT_TRUE(std::vector(input.begin(), input.begin() + batchSize) == batchesAct[0]); - EXPECT_TRUE(std::vector(input.begin() + batchSize, input.end()) == batchesAct[1]); + EXPECT_TRUE( + std::vector(input.begin(), input.begin() + batchSize) == + batchesAct[0]); + EXPECT_TRUE( + std::vector(input.begin() + batchSize, input.end()) == + batchesAct[1]); // 2 batches, unequal batchSize = 6; batchesAct = parpe::getBatches(input, batchSize); EXPECT_EQ(2UL, batchesAct.size()); - EXPECT_TRUE(std::vector(input.begin(), input.begin() + batchSize) == batchesAct[0]); - EXPECT_TRUE(std::vector(input.begin() + batchSize, input.end()) == batchesAct[1]); + EXPECT_TRUE( + std::vector(input.begin(), input.begin() + batchSize) == + batchesAct[0]); + EXPECT_TRUE( + std::vector(input.begin() + batchSize, input.end()) == + batchesAct[1]); } TEST(MinibatchOptimization, UpdatesParameters) { // Test whether the most simple parameter updater works reliably - std::vector gradient {3.0, 4.0}; - std::vector parameters {2.0, 3.0}; - std::vector lowerBounds {-5.0, -5.0}; - std::vector upperBounds {5.0, 5.0}; - std::vector parametersExp {1.7, 2.6}; + std::vector gradient{3.0, 4.0}; + std::vector parameters{2.0, 3.0}; + std::vector lowerBounds{-5.0, -5.0}; + std::vector upperBounds{5.0, 5.0}; + std::vector parametersExp{1.7, 2.6}; double learningRate = 0.5; int iteration = 1; @@ -57,23 +64,33 @@ TEST(MinibatchOptimization, UpdatesParameters) { parpe::ParameterUpdaterVanilla pu; pu.initialize(2); - pu.updateParameters(learningRate, iteration, gradient, parameters, lowerBounds, upperBounds); - - std::transform(parametersExp.begin( ), parametersExp.end( ), parameters.begin( ), parametersExp.begin( ), std::minus( )); + pu.updateParameters( + learningRate, + iteration, + gradient, + parameters, + lowerBounds, + upperBounds); + + std::transform( + parametersExp.begin(), + parametersExp.end(), + parameters.begin(), + parametersExp.begin(), + std::minus()); for (int i = 0; i < numParameters; i++) if (!errored) - if (parametersExp[i] > toleratedError or parametersExp[i] < -toleratedError) + if (parametersExp[i] > toleratedError or + parametersExp[i] < -toleratedError) errored = true; EXPECT_TRUE(!errored); } - - class MinibatchOptimizationLinearModel : public ::testing::Test { -protected: + protected: void SetUp() override { generateRandomFeatures(); @@ -85,22 +102,24 @@ class MinibatchOptimizationLinearModel : public ::testing::Test { lm.evaluate(trueParameters, data, labels); } - void generateRandomFeatures() { // generate data or feature vector std::uniform_real_distribution unif(0, 10); std::mt19937 rng(rd()); - data.assign(numDatasets, std::vector(trueParameters.size() - 1)); + data.assign( + numDatasets, std::vector(trueParameters.size() - 1)); for (int i = 0; i < numDatasets; ++i) { - std::generate(data[i].begin(), data[i].end(), [&unif, &rng]() {return unif(rng);}); - //std::cout< getLinearModelMSE() { // prepare model for optimization - auto lm2 = std::make_unique < parpe::LinearModelMSE > (trueParameters.size()); + auto lm2 = + std::make_unique(trueParameters.size()); lm2->datasets = data; lm2->labels = labels; return lm2; @@ -109,8 +128,9 @@ class MinibatchOptimizationLinearModel : public ::testing::Test { std::unique_ptr getOptimizationProblem() { auto lm2 = getLinearModelMSE(); - auto sgf = std::make_unique < parpe::SummedGradientFunctionGradientFunctionAdapter - > (std::move(lm2), dataIndices); + auto sgf = std::make_unique< + parpe::SummedGradientFunctionGradientFunctionAdapter>( + std::move(lm2), dataIndices); auto p = std::make_unique(); p->cost_fun_ = std::move(sgf); p->setParametersMin(std::vector(trueParameters.size(), 0.0)); @@ -119,11 +139,9 @@ class MinibatchOptimizationLinearModel : public ::testing::Test { return p; } + void TearDown() override {} - void TearDown() override { - } - - std::vector trueParameters = { 3.0, 2.0 }; + std::vector trueParameters = {3.0, 2.0}; int numDatasets = 10; int batchSize = 2; @@ -136,16 +154,18 @@ class MinibatchOptimizationLinearModel : public ::testing::Test { std::vector dataIndices; }; - -TEST_F(MinibatchOptimizationLinearModel, CostWithTrueParametersIsZeroIndivdually) { +TEST_F( + MinibatchOptimizationLinearModel, + CostWithTrueParametersIsZeroIndivdually) { // verify cost gradient with true parameters is 0 auto lm2 = getLinearModelMSE(); double mse = NAN; std::vector gradient(trueParameters.size()); - for(int i = 0; i < numDatasets; ++i) { + for (int i = 0; i < numDatasets; ++i) { lm2->evaluate(trueParameters, i, mse, gradient, nullptr, nullptr); EXPECT_EQ(0.0, mse); - EXPECT_TRUE(std::vector(trueParameters.size(), 0.0) == gradient); + EXPECT_TRUE( + std::vector(trueParameters.size(), 0.0) == gradient); } } @@ -166,8 +186,17 @@ TEST_F(MinibatchOptimizationLinearModel, MinibatchSucceedFromOptimum) { mb.maxEpochs = 20; mb.batchSize = 2; std::vector startingPoint = {3.0, 2.0}; - auto result = mb.optimize(*lm2, dataIndices, startingPoint, gsl::span(), gsl::span(), nullptr, nullptr); - EXPECT_EQ((int)parpe::minibatchExitStatus::gradientNormConvergence, std::get<0>(result)); + auto result = mb.optimize( + *lm2, + dataIndices, + startingPoint, + gsl::span(), + gsl::span(), + nullptr, + nullptr); + EXPECT_EQ( + (int)parpe::minibatchExitStatus::gradientNormConvergence, + std::get<0>(result)); EXPECT_EQ(0.0, std::get<1>(result)); EXPECT_TRUE(trueParameters == std::get<2>(result)); } @@ -176,7 +205,7 @@ TEST_F(MinibatchOptimizationLinearModel, LinearModelCheckCostGradient) { // use gradient checker auto p = getOptimizationProblem(); - for(int i = 0; i < 10; ++i) + for (int i = 0; i < 10; ++i) parpe::optimizationProblemGradientCheck(p.get(), 10, 1e-1); // TODO: check results automatically @@ -195,15 +224,16 @@ TEST_F(MinibatchOptimizationLinearModel, linearModelDoesBatchOptimizerSucceed) { auto resultBatchOpt = o.optimize(p.get()); EXPECT_NEAR(0.0, std::get<1>(resultBatchOpt), 1e-8); - for(int i = 0; (unsigned) i < trueParameters.size(); ++i) - EXPECT_NEAR(trueParameters[i], std::get<2>(resultBatchOpt)[i], 1e-6); + for (int i = 0; (unsigned)i < trueParameters.size(); ++i) + EXPECT_NEAR(trueParameters[i], std::get<2>(resultBatchOpt)[i], 1e-6); // -> is identifiable and gradient okay } #endif TEST_F(MinibatchOptimizationLinearModel, LinearModel) { - // optimization/tests/unittests_optimization -sg minibatchOptimizationLinearModel -sn linearModel - std::cout<<"True parameters "< startingPoint = {2.0, 4}; @@ -216,16 +246,22 @@ TEST_F(MinibatchOptimizationLinearModel, LinearModel) { parpe::MinibatchOptimizer mb; mb.maxEpochs = 100; - //mb.parameterUpdater = std::make_unique(0.02); + // mb.parameterUpdater = + // std::make_unique(0.02); mb.parameterUpdater = std::make_unique(); mb.batchSize = batchSize; - auto result = mb.optimize(*lm3, dataIndices, startingPoint, - gsl::span(), gsl::span(), - nullptr, nullptr); + auto result = mb.optimize( + *lm3, + dataIndices, + startingPoint, + gsl::span(), + gsl::span(), + nullptr, + nullptr); // TODO add some gaussian noise // std::normal_distribution norm(0.0, 1.0); - //for(auto &e : labels) - //e += norm(rng); + // for(auto &e : labels) + // e += norm(rng); // TODO: add test with mockReporter (also for other optimizers) } diff --git a/tests/parpeoptimization/multiStartOptimizationTest.cpp b/tests/parpeoptimization/multiStartOptimizationTest.cpp index a2b1991fa..a3b65a0b0 100644 --- a/tests/parpeoptimization/multiStartOptimizationTest.cpp +++ b/tests/parpeoptimization/multiStartOptimizationTest.cpp @@ -3,8 +3,8 @@ #include #include -#include "quadraticTestProblem.h" #include "../parpecommon/testingMisc.h" +#include "quadraticTestProblem.h" #ifdef PARPE_ENABLE_IPOPT #include diff --git a/tests/parpeoptimization/optimizationOptionsTest.cpp b/tests/parpeoptimization/optimizationOptionsTest.cpp index a05c65b06..6298769f0 100644 --- a/tests/parpeoptimization/optimizationOptionsTest.cpp +++ b/tests/parpeoptimization/optimizationOptionsTest.cpp @@ -2,31 +2,30 @@ #include +#include #include #include -#include #include "../parpecommon/testingMisc.h" - #ifdef PARPE_ENABLE_IPOPT #include #include #endif #ifdef PARPE_ENABLE_CERES -#include #include +#include // need prototype here, otherwise mess with headers // (including ceres.h causes some errors with EIGEN) namespace parpe { -void setCeresOption(const std::pair &pair, - ceres::GradientProblemSolver::Options* options); +void setCeresOption( + std::pair const& pair, + ceres::GradientProblemSolver::Options* options); } // namespace parpe #endif - TEST(OptimizationOptions, setGetOptionStr) { parpe::OptimizationOptions o; std::string key = "str"; @@ -57,7 +56,6 @@ TEST(OptimizationOptions, setGetOptionDouble) { EXPECT_EQ(expVal, actVal); } - TEST(OptimizationOptions, getNonExistingOption) { parpe::OptimizationOptions o; @@ -74,8 +72,8 @@ TEST(OptimizationOptions, setIpOptOptions) { parpe::OptimizationOptions o; o.setOption(key, expVal); - o.for_each*>(parpe::setIpOptOption, - &options); + o.for_each*>( + parpe::setIpOptOption, &options); int actVal = 0; EXPECT_EQ(true, options->GetIntegerValue(key, actVal, "")); @@ -92,8 +90,8 @@ TEST(OptimizationOptions, setCeresOptions) { parpe::OptimizationOptions o; o.setOption(key, expVal); - o.for_each(parpe::setCeresOption, - &options); + o.for_each( + parpe::setCeresOption, &options); int actVal = options.max_num_iterations; @@ -104,28 +102,34 @@ TEST(OptimizationOptions, setCeresOptions) { #endif TEST(OptimizationOptions, fromHDF5) { - const char* tmpName = "parpeTest_fromHDF5.h5"; + char const* tmpName = "parpeTest_fromHDF5.h5"; auto _ = gsl::finally([tmpName] { remove(tmpName); }); // fail on non-existing file (hide hdf5 errors) - parpe::captureStreamToString([tmpName](){ - EXPECT_THROW(parpe::OptimizationOptions::fromHDF5(tmpName), - parpe::HDF5Exception); - }, stdout); + parpe::captureStreamToString( + [tmpName]() { + EXPECT_THROW( + parpe::OptimizationOptions::fromHDF5(tmpName), + parpe::HDF5Exception); + }, + stdout); // create file auto file = parpe::hdf5CreateFile(tmpName, false); parpe::hdf5CreateGroup(file, "/optimizationOptions/ceres", true); int optimizer = 1; - H5LTset_attribute_int(file.getId(), "/optimizationOptions", "optimizer", - &optimizer, 1); - H5LTset_attribute_int(file.getId(), "/optimizationOptions/ceres", "someOption", - &optimizer, 1); + H5LTset_attribute_int( + file.getId(), "/optimizationOptions", "optimizer", &optimizer, 1); + H5LTset_attribute_int( + file.getId(), + "/optimizationOptions/ceres", + "someOption", + &optimizer, + 1); hsize_t dims[] = {2, 3}; - double buf[] = {1, 2, 3, - 4, 5, 6}; - H5LTmake_dataset_double(file.getId(), "/optimizationOptions/randomStarts", 2, - dims, buf); + double buf[] = {1, 2, 3, 4, 5, 6}; + H5LTmake_dataset_double( + file.getId(), "/optimizationOptions/randomStarts", 2, dims, buf); auto startingPoint = parpe::OptimizationOptions::getStartingPoint(file, 0); EXPECT_EQ(1, startingPoint[0]); diff --git a/tests/parpeoptimization/optimizationProblemTest.cpp b/tests/parpeoptimization/optimizationProblemTest.cpp index 942373093..1c64528b0 100644 --- a/tests/parpeoptimization/optimizationProblemTest.cpp +++ b/tests/parpeoptimization/optimizationProblemTest.cpp @@ -1,8 +1,8 @@ #include +#include #include #include -#include #include "quadraticTestProblem.h" @@ -12,7 +12,6 @@ #include - /** * @brief The SummedGradientFunctionLinearModelTest class is a linear model with * mean squared error cost function. @@ -22,18 +21,18 @@ * cost = MSE = 1/N \sum_i^N (\bar{y} - y)^2 */ class SummedGradientFunctionLinearModelTest - : public parpe::SummedGradientFunction { -public: + : public parpe::SummedGradientFunction { + public: parpe::FunctionEvaluationStatus evaluate( - gsl::span parameters, - double dataset, - double &fval, - gsl::span gradient, - parpe::Logger* logger, double *cpuTime) const override - { + gsl::span parameters, + double dataset, + double& fval, + gsl::span gradient, + parpe::Logger* logger, + double* cpuTime) const override { fval = parameters[0] * dataset + parameters[1]; - if(!gradient.empty()) { + if (!gradient.empty()) { gradient[0] = dataset; gradient[1] = 0; } @@ -52,83 +51,76 @@ class SummedGradientFunctionLinearModelTest * @return */ parpe::FunctionEvaluationStatus evaluate( - gsl::span parameters, - std::vector datasets, - double &fval, - gsl::span gradient, - parpe::Logger* logger, double* cpuTime) const override - { + gsl::span parameters, + std::vector datasets, + double& fval, + gsl::span gradient, + parpe::Logger* logger, + double* cpuTime) const override { fval = 0; - if(!gradient.empty()) + if (!gradient.empty()) std::fill(gradient.begin(), gradient.end(), 0); double tmpFVal = 0; std::vector tmpGradient(parameters.size()); - for(auto& d : datasets) { - auto status = evaluate(parameters, d, tmpFVal, tmpGradient, - nullptr, nullptr); - if(status != parpe::functionEvaluationSuccess) + for (auto& d : datasets) { + auto status = + evaluate(parameters, d, tmpFVal, tmpGradient, nullptr, nullptr); + if (status != parpe::functionEvaluationSuccess) return status; fval += tmpFVal; - if(!gradient.empty()) { - for(int i = 0; i < numParameters(); ++i) + if (!gradient.empty()) { + for (int i = 0; i < numParameters(); ++i) gradient[i] += tmpGradient[i]; } } return parpe::functionEvaluationSuccess; } - int numParameters() const override - { - return numParameters_; - } + int numParameters() const override { return numParameters_; } std::vector getParameterIds() const override { - return std::vector {"p1", "p2"}; + return std::vector{"p1", "p2"}; } int numParameters_ = 2; - }; - TEST(OptimizationProblem, quadraticTestFunction) { // Test QuadraticGradientFunction for f(-1) = 42 - parpe::QuadraticGradientFunction f {}; + parpe::QuadraticGradientFunction f{}; double parameter = -1; double fValExp = 42.0; double gradientExp = 0; double fValAct = NAN; double gradientAct = NAN; - f.evaluate(gsl::span(¶meter, 1), fValAct, - gsl::span(&gradientAct, 1)); + f.evaluate( + gsl::span(¶meter, 1), + fValAct, + gsl::span(&gradientAct, 1)); EXPECT_EQ(fValExp, fValAct); EXPECT_EQ(gradientExp, gradientAct); } - - TEST(OptimizationProblem, gradientChecker) { - parpe::QuadraticTestProblem problem {}; - constexpr int numParameterIndices {1}; - int parameterIndices[numParameterIndices] {0}; + parpe::QuadraticTestProblem problem{}; + constexpr int numParameterIndices{1}; + int parameterIndices[numParameterIndices]{0}; parpe::optimizationProblemGradientCheck(&problem, parameterIndices, 1e-6); - parpe::optimizationProblemGradientCheckMultiEps(&problem, - numParameterIndices - ); + parpe::optimizationProblemGradientCheckMultiEps( + &problem, numParameterIndices); } - TEST(OptimizationProblem, linearModel) { // Test if the linear model produces correct results SummedGradientFunctionLinearModelTest model; - std::vector parameters {1.0, 2.0}; + std::vector parameters{1.0, 2.0}; double fval = NAN; std::vector gradient(model.numParameters(), NAN); @@ -137,24 +129,23 @@ TEST(OptimizationProblem, linearModel) { EXPECT_EQ(1.0, gradient[0]); EXPECT_EQ(0.0, gradient[1]); - std::vector dataset {2.0, 3.0}; + std::vector dataset{2.0, 3.0}; model.evaluate(parameters, dataset, fval, gradient, nullptr, nullptr); EXPECT_EQ(9.0, fval); EXPECT_EQ(5.0, gradient[0]); EXPECT_EQ(0.0, gradient[1]); } - TEST(OptimizationProblem, linearModelToGradientFun) { // Test that the SummedGradientFunction <-> GradientFunction Adapter works // with the linear model - std::vector dataset {2.0, 3.0}; - auto model = std::unique_ptr >( - new SummedGradientFunctionLinearModelTest()); + std::vector dataset{2.0, 3.0}; + auto model = std::unique_ptr>( + new SummedGradientFunctionLinearModelTest()); parpe::SummedGradientFunctionGradientFunctionAdapter gradFun( - std::move(model), dataset); + std::move(model), dataset); - std::vector parameters {1.0, 2.0}; + std::vector parameters{1.0, 2.0}; double fval = NAN; std::vector gradient(gradFun.numParameters(), NAN); @@ -164,22 +155,21 @@ TEST(OptimizationProblem, linearModelToGradientFun) { EXPECT_EQ(0.0, gradient[1]); } - #ifdef PARPE_ENABLE_IPOPT TEST(OptimizationProblem, linearModelToGradientFunOptimization) { // create optimization problem for the linear model // does not do anything meaningful yet - std::vector dataset {2.0, 3.0}; - auto model = std::unique_ptr >( - new SummedGradientFunctionLinearModelTest()); + std::vector dataset{2.0, 3.0}; + auto model = std::unique_ptr>( + new SummedGradientFunctionLinearModelTest()); auto gradFun = std::unique_ptr( - new parpe::SummedGradientFunctionGradientFunctionAdapter( - std::move(model), dataset)); - parpe::OptimizationProblemImpl problem {std::move(gradFun), - std::make_unique()}; + new parpe::SummedGradientFunctionGradientFunctionAdapter( + std::move(model), dataset)); + parpe::OptimizationProblemImpl problem{ + std::move(gradFun), std::make_unique()}; - std::vector parametersMin {-100, -100}; - std::vector parametersMax {100, 100}; + std::vector parametersMin{-100, -100}; + std::vector parametersMax{100, 100}; problem.setParametersMin(parametersMin); problem.setParametersMax(parametersMax); diff --git a/tests/parpeoptimization/optimizationResultWriterTest.cpp b/tests/parpeoptimization/optimizationResultWriterTest.cpp index f6f9f6544..a56c9967c 100644 --- a/tests/parpeoptimization/optimizationResultWriterTest.cpp +++ b/tests/parpeoptimization/optimizationResultWriterTest.cpp @@ -3,11 +3,11 @@ #include #include -#include #include +#include TEST(OptimizationResultWriter, ResultWriter) { - const char* tmpFilename = "deleteme.h5"; + char const* tmpFilename = "deleteme.h5"; parpe::OptimizationResultWriter w(tmpFilename, true, "/bla/"); @@ -17,15 +17,15 @@ TEST(OptimizationResultWriter, ResultWriter) { w.setRootPath("/bla2"); - w.logOptimizerIteration(1, gsl::span(), 0.0, gsl::span(), - 1.0, 2.0); + w.logOptimizerIteration( + 1, gsl::span(), 0.0, gsl::span(), 1.0, 2.0); // should it be possible to have the same iteration twice? - w.logOptimizerIteration(1, gsl::span(), 0.0, gsl::span(), - 1.0, 2.0); + w.logOptimizerIteration( + 1, gsl::span(), 0.0, gsl::span(), 1.0, 2.0); - w.logObjectiveFunctionEvaluation(gsl::span(), 1.0, - gsl::span(), 1, 2, 3.0); + w.logObjectiveFunctionEvaluation( + gsl::span(), 1.0, gsl::span(), 1, 2, 3.0); w.saveOptimizerResults(1.0, gsl::span(), 12.0, 17.0, 0); diff --git a/tests/parpeoptimization/quadraticTestProblem.cpp b/tests/parpeoptimization/quadraticTestProblem.cpp index 5b212697a..c112f7f28 100644 --- a/tests/parpeoptimization/quadraticTestProblem.cpp +++ b/tests/parpeoptimization/quadraticTestProblem.cpp @@ -1,5 +1,5 @@ -#include #include +#include #include @@ -15,8 +15,7 @@ namespace parpe { QuadraticTestProblem::QuadraticTestProblem(std::unique_ptr logger) : OptimizationProblem( std::make_unique>(), - std::move(logger) - ) { + std::move(logger)) { auto options = getOptimizationOptions(); options.maxOptimizerIterations = 12; @@ -24,31 +23,29 @@ QuadraticTestProblem::QuadraticTestProblem(std::unique_ptr logger) setOptimizationOptions(options); // will keep ref, but will be passed as unique pointer, so getReporter // must only be called once - reporter = new NiceMock(cost_fun_.get(), - std::make_unique()); + reporter = new NiceMock( + cost_fun_.get(), std::make_unique()); } -void QuadraticTestProblem::fillParametersMin(gsl::span buffer) const -{ +void QuadraticTestProblem::fillParametersMin(gsl::span buffer) const { buffer[0] = -1e5; } -void QuadraticTestProblem::fillParametersMax(gsl::span buffer) const -{ +void QuadraticTestProblem::fillParametersMax(gsl::span buffer) const { buffer[0] = 1e5; } -std::unique_ptr QuadraticTestProblem::getReporter() const -{ +std::unique_ptr +QuadraticTestProblem::getReporter() const { getReporterCalled = true; return std::unique_ptr(reporter); } std::unique_ptr QuadraticOptimizationMultiStartProblem::getLocalProblem( - int multiStartIndex) const { - auto loggerPrefix = std::string("[start ") - + std::to_string(multiStartIndex) + "]"; + int multiStartIndex) const { + auto loggerPrefix = + std::string("[start ") + std::to_string(multiStartIndex) + "]"; auto p = std::make_unique( std::make_unique(loggerPrefix)); p->setOptimizationOptions(options); @@ -56,8 +53,8 @@ QuadraticOptimizationMultiStartProblem::getLocalProblem( } QuadraticGradientFunctionMock::QuadraticGradientFunctionMock() { - using ::testing::Invoke; using ::testing::_; + using ::testing::Invoke; ON_CALL(*this, evaluate_impl(_, _, _, _, _)) .WillByDefault(Invoke(&fun, &QuadraticGradientFunction::evaluate)); @@ -66,81 +63,84 @@ QuadraticGradientFunctionMock::QuadraticGradientFunctionMock() { } FunctionEvaluationStatus QuadraticGradientFunction::evaluate( - gsl::span parameters, - double &fval, gsl::span gradient, Logger */*logger*/, - double */*cpuTime*/) const -{ + gsl::span parameters, + double& fval, + gsl::span gradient, + Logger* /*logger*/, + double* /*cpuTime*/) const { fval = pow(parameters[0] + 1.0, 2) + 42.0; if (!gradient.empty()) { -// mock().actualCall("testObjGrad"); + // mock().actualCall("testObjGrad"); gradient[0] = 2.0 * parameters[0] + 2.0; } else { -// mock().actualCall("testObj"); + // mock().actualCall("testObj"); } return functionEvaluationSuccess; } -int QuadraticGradientFunction::numParameters() const -{ -// mock().actualCall("GradientFunction::numParameters"); +int QuadraticGradientFunction::numParameters() const { + // mock().actualCall("GradientFunction::numParameters"); return 1; } -std::vector QuadraticGradientFunction::getParameterIds() const -{ - return std::vector {"p1"}; +std::vector QuadraticGradientFunction::getParameterIds() const { + return std::vector{"p1"}; } -bool OptimizationReporterTest::starting(gsl::span /*parameters*/) const -{ -// mock().actualCall("OptimizationReporterTest::starting"); +bool OptimizationReporterTest::starting( + gsl::span /*parameters*/) const { + // mock().actualCall("OptimizationReporterTest::starting"); return false; } -bool OptimizationReporterTest::iterationFinished(gsl::span /*parameters*/, - double /*objectiveFunctionValue*/, - gsl::span /*objectiveFunctionGradient*/) const -{ -// mock().actualCall("OptimizationReporterTest::iterationFinished"); +bool OptimizationReporterTest::iterationFinished( + gsl::span /*parameters*/, + double /*objectiveFunctionValue*/, + gsl::span /*objectiveFunctionGradient*/) const { + // mock().actualCall("OptimizationReporterTest::iterationFinished"); return false; } -bool OptimizationReporterTest::beforeCostFunctionCall(gsl::span /*parameters*/) const -{ -// mock().actualCall("OptimizationReporterTest::beforeCostFunctionCall"); +bool OptimizationReporterTest::beforeCostFunctionCall( + gsl::span /*parameters*/) const { + // mock().actualCall("OptimizationReporterTest::beforeCostFunctionCall"); return false; } -bool OptimizationReporterTest::afterCostFunctionCall(gsl::span parameters, - double objectiveFunctionValue, - gsl::span objectiveFunctionGradient) const -{ -// mock().actualCall("OptimizationReporterTest::afterCostFunctionCall"); +bool OptimizationReporterTest::afterCostFunctionCall( + gsl::span parameters, + double objectiveFunctionValue, + gsl::span objectiveFunctionGradient) const { + // mock().actualCall("OptimizationReporterTest::afterCostFunctionCall"); - if(printDebug) { + if (printDebug) { if (!objectiveFunctionGradient.empty()) { - printf("g: x: %f f(x): %f f'(x): %f\n", parameters[0], - objectiveFunctionValue, objectiveFunctionGradient[0]); + printf( + "g: x: %f f(x): %f f'(x): %f\n", + parameters[0], + objectiveFunctionValue, + objectiveFunctionGradient[0]); } else { - printf("f: x: %f f(x): %f\n", parameters[0], objectiveFunctionValue); + printf( + "f: x: %f f(x): %f\n", parameters[0], objectiveFunctionValue); } } - return false; } -void OptimizationReporterTest::finished(double /*optimalCost*/, - gsl::span /*parameters*/, - int exitStatus) const -{ -// mock().actualCall("OptimizationReporterTest::finished").withIntParameter("exitStatus", exitStatus); +void OptimizationReporterTest::finished( + double /*optimalCost*/, + gsl::span /*parameters*/, + int exitStatus) const { + // mock().actualCall("OptimizationReporterTest::finished").withIntParameter("exitStatus", + // exitStatus); } } // namespace parpe diff --git a/tests/parpeoptimization/quadraticTestProblem.h b/tests/parpeoptimization/quadraticTestProblem.h index cccd76438..72fcdd710 100644 --- a/tests/parpeoptimization/quadraticTestProblem.h +++ b/tests/parpeoptimization/quadraticTestProblem.h @@ -16,72 +16,90 @@ namespace parpe { * OptimizationReporter */ class OptimizationReporterTest : public OptimizationReporter { -public: + public: using OptimizationReporter::OptimizationReporter; - bool starting(gsl::span parameters) const override; + bool starting(gsl::span parameters) const override; - bool iterationFinished(gsl::span parameters, - double objectiveFunctionValue, - gsl::span objectiveFunctionGradient) const override; + bool iterationFinished( + gsl::span parameters, + double objectiveFunctionValue, + gsl::span objectiveFunctionGradient) const override; - bool beforeCostFunctionCall(gsl::span parameters) const override; + bool + beforeCostFunctionCall(gsl::span parameters) const override; - bool afterCostFunctionCall(gsl::span parameters, - double objectiveFunctionValue, - gsl::span objectiveFunctionGradient) const override; + bool afterCostFunctionCall( + gsl::span parameters, + double objectiveFunctionValue, + gsl::span objectiveFunctionGradient) const override; - void finished(double optimalCost, - gsl::span parameters, int exitStatus) const override; + void finished( + double optimalCost, + gsl::span parameters, + int exitStatus) const override; bool printDebug = false; }; - -class OptimizationReporterMock: public OptimizationReporter { -public: - OptimizationReporterMock(GradientFunction *gradFun, - std::unique_ptr logger) - :OptimizationReporter(gradFun, std::make_unique(*logger)) - { - testRep = std::make_unique(gradFun, std::move(logger)); +class OptimizationReporterMock : public OptimizationReporter { + public: + OptimizationReporterMock( + GradientFunction* gradFun, + std::unique_ptr logger) + : OptimizationReporter(gradFun, std::make_unique(*logger)) { + testRep = std::make_unique( + gradFun, std::move(logger)); ON_CALL(*this, starting(_)) - .WillByDefault(Invoke(testRep.get(), &OptimizationReporterTest::starting)); + .WillByDefault( + Invoke(testRep.get(), &OptimizationReporterTest::starting)); ON_CALL(*this, iterationFinished(_, _, _)) - .WillByDefault(Invoke(testRep.get(), &OptimizationReporterTest::iterationFinished)); + .WillByDefault(Invoke( + testRep.get(), &OptimizationReporterTest::iterationFinished)); ON_CALL(*this, beforeCostFunctionCall(_)) - .WillByDefault(Invoke(testRep.get(), &OptimizationReporterTest::beforeCostFunctionCall)); + .WillByDefault(Invoke( + testRep.get(), + &OptimizationReporterTest::beforeCostFunctionCall)); ON_CALL(*this, afterCostFunctionCall(_, _, _)) - .WillByDefault(Invoke(testRep.get(), &OptimizationReporterTest::afterCostFunctionCall)); + .WillByDefault(Invoke( + testRep.get(), + &OptimizationReporterTest::afterCostFunctionCall)); ON_CALL(*this, finished(_, _, _)) - .WillByDefault(Invoke(testRep.get(), &OptimizationReporterTest::finished)); + .WillByDefault( + Invoke(testRep.get(), &OptimizationReporterTest::finished)); } - - MOCK_CONST_METHOD1(starting, bool(gsl::span parameters)); - - MOCK_CONST_METHOD3(iterationFinished, - bool(gsl::span parameters, - double objectiveFunctionValue, - gsl::span objectiveFunctionGradient)); - - MOCK_CONST_METHOD1(beforeCostFunctionCall, - bool(gsl::span parameters)); - - MOCK_CONST_METHOD3(afterCostFunctionCall, - bool(gsl::span parameters, - double objectiveFunctionValue, - gsl::span objectiveFunctionGradient)); - - MOCK_CONST_METHOD3(finished, - void(double optimalCost, gsl::span parameters, - int exitStatus)); + MOCK_CONST_METHOD1(starting, bool(gsl::span parameters)); + + MOCK_CONST_METHOD3( + iterationFinished, + bool( + gsl::span parameters, + double objectiveFunctionValue, + gsl::span objectiveFunctionGradient)); + + MOCK_CONST_METHOD1( + beforeCostFunctionCall, + bool(gsl::span parameters)); + + MOCK_CONST_METHOD3( + afterCostFunctionCall, + bool( + gsl::span parameters, + double objectiveFunctionValue, + gsl::span objectiveFunctionGradient)); + + MOCK_CONST_METHOD3( + finished, + void( + double optimalCost, + gsl::span parameters, + int exitStatus)); std::unique_ptr testRep; }; - /** * @brief The QuadraticGradientFunction class is a simple function for testing * the optimization framework. @@ -99,79 +117,84 @@ class OptimizationReporterMock: public OptimizationReporter { */ class QuadraticGradientFunction : public GradientFunction { -public: - FunctionEvaluationStatus evaluate(gsl::span parameters, - double &fval, - gsl::span gradient, - Logger *logger = nullptr, - double *cpuTime = nullptr) const override; + public: + FunctionEvaluationStatus evaluate( + gsl::span parameters, + double& fval, + gsl::span gradient, + Logger* logger = nullptr, + double* cpuTime = nullptr) const override; int numParameters() const override; std::vector getParameterIds() const override; }; class QuadraticGradientFunctionMock : public GradientFunction { -public: + public: QuadraticGradientFunctionMock(); - MOCK_CONST_METHOD5(evaluate_impl, FunctionEvaluationStatus( - gsl::span parameters, - double &fval, - gsl::span gradient, - Logger *logger, - double *cpuTime)); + MOCK_CONST_METHOD5( + evaluate_impl, + FunctionEvaluationStatus( + gsl::span parameters, + double& fval, + gsl::span gradient, + Logger* logger, + double* cpuTime)); virtual FunctionEvaluationStatus evaluate( - gsl::span parameters, - double &fval, - gsl::span gradient, - Logger *logger = nullptr, - double *cpuTime = nullptr) const { + gsl::span parameters, + double& fval, + gsl::span gradient, + Logger* logger = nullptr, + double* cpuTime = nullptr) const { return evaluate_impl(parameters, fval, gradient, logger, cpuTime); } MOCK_CONST_METHOD0(numParameters, int()); MOCK_CONST_METHOD0(getParameterIds, std::vector()); - -private: + private: QuadraticGradientFunction fun; }; - /** * @brief The QuadraticTestProblem class is a test optimization problem built * around QuadraticGradientFunction */ class QuadraticTestProblem : public OptimizationProblem { -public: - explicit QuadraticTestProblem(std::unique_ptr logger = std::make_unique()); + public: + explicit QuadraticTestProblem( + std::unique_ptr logger = std::make_unique()); void fillParametersMin(gsl::span buffer) const override; void fillParametersMax(gsl::span buffer) const override; ~QuadraticTestProblem() { - if(!getReporterCalled && reporter) + if (!getReporterCalled && reporter) // manual cleanup if not passed in unique_ptr delete reporter; } std::unique_ptr getReporter() const override; - OptimizationReporterMock *reporter; + OptimizationReporterMock* reporter; mutable bool getReporterCalled = false; }; - -class QuadraticOptimizationMultiStartProblem : public MultiStartOptimizationProblem { -public: - QuadraticOptimizationMultiStartProblem(int numberOfStarts, bool restartOnFailure = false) - : numberOfStarts(numberOfStarts), restartOnFailure_(restartOnFailure) - { +class QuadraticOptimizationMultiStartProblem + : public MultiStartOptimizationProblem { + public: + QuadraticOptimizationMultiStartProblem( + int numberOfStarts, + bool restartOnFailure = false) + : numberOfStarts(numberOfStarts) + , restartOnFailure_(restartOnFailure) { QuadraticTestProblem p; options = p.getOptimizationOptions(); } - std::unique_ptr getLocalProblem(int multiStartIndex) const override; + std::unique_ptr + getLocalProblem(int multiStartIndex) const override; int getNumberOfStarts() const override { return numberOfStarts; } @@ -179,8 +202,7 @@ class QuadraticOptimizationMultiStartProblem : public MultiStartOptimizationProb OptimizationOptions options; -private: - + private: int numberOfStarts = 1; bool restartOnFailure_ = false; };