diff --git a/corelib/src/libs/SireIO/moleculeparser.cpp b/corelib/src/libs/SireIO/moleculeparser.cpp index 63f4b92bd..d3d7fb7b1 100644 --- a/corelib/src/libs/SireIO/moleculeparser.cpp +++ b/corelib/src/libs/SireIO/moleculeparser.cpp @@ -1929,8 +1929,19 @@ MoleculeParserPtr MoleculeParser::parse(const System &system, const QString &for cannot be recognised, or if there is an error in parsing. */ MoleculeParserPtr MoleculeParser::parse(const QString &filename, const PropertyMap &map) { - MoleculeParserPtr parser = MoleculeParser::_pvt_parse(filename, map); - getFileCache()->clear(); + MoleculeParserPtr parser; + + try + { + parser = MoleculeParser::_pvt_parse(filename, map); + getFileCache()->clear(); + } + catch (...) + { + getFileCache()->clear(); + throw; + } + return parser; } @@ -1939,48 +1950,56 @@ QList MoleculeParser::parse(const QStringList &filenames, con { QList result; - if (filenames.count() == 1) - { - result.append(MoleculeParser::_pvt_parse(filenames[0], map)); - } - else + try { - QVector parsers(filenames.count()); - - bool run_parallel = true; - - if (map["parallel"].hasValue()) + if (filenames.count() == 1) { - run_parallel = map["parallel"].value().asA().value(); - } - - if (run_parallel) - { - // parse the files in parallel - we use a grain size of 1 - // as each file can be pretty big, and there won't be many of them - tbb::parallel_for( - tbb::blocked_range(0, filenames.count(), 1), - [&](tbb::blocked_range r) - { - for (int i = r.begin(); i < r.end(); ++i) - { - parsers[i] = MoleculeParser::_pvt_parse(filenames[i], map); - } - }, - tbb::simple_partitioner()); + result.append(MoleculeParser::_pvt_parse(filenames[0], map)); } else { - for (int i = 0; i < filenames.count(); ++i) + QVector parsers(filenames.count()); + + bool run_parallel = true; + + if (map["parallel"].hasValue()) + { + run_parallel = map["parallel"].value().asA().value(); + } + + if (run_parallel) { - parsers[i] = MoleculeParser::_pvt_parse(filenames[i], map); + // parse the files in parallel - we use a grain size of 1 + // as each file can be pretty big, and there won't be many of them + tbb::parallel_for( + tbb::blocked_range(0, filenames.count(), 1), + [&](tbb::blocked_range r) + { + for (int i = r.begin(); i < r.end(); ++i) + { + parsers[i] = MoleculeParser::_pvt_parse(filenames[i], map); + } + }, + tbb::simple_partitioner()); } + else + { + for (int i = 0; i < filenames.count(); ++i) + { + parsers[i] = MoleculeParser::_pvt_parse(filenames[i], map); + } + } + + result = parsers.toList(); } - result = parsers.toList(); + getFileCache()->clear(); + } + catch (...) + { + getFileCache()->clear(); + throw; } - - getFileCache()->clear(); return result; } diff --git a/doc/source/changelog.rst b/doc/source/changelog.rst index 56a20a7ca..b53b75eaa 100644 --- a/doc/source/changelog.rst +++ b/doc/source/changelog.rst @@ -57,6 +57,15 @@ organisation on `GitHub `__. a zero sigma value. They will either be sigma=1/epsilon=0 for non-perturbable atoms, or sigma=1e-9/epsilon=1e-9 for perturbable atoms. +* Optimised the ``LambaLever`` class so that it caches the forcefield parameters + calculated at different lambda values. This means that we don't have to + re-calculate the parameters at each lambda update step. This is a + significant speed-up for alchemical free energy simulations. + +* Now catch ``std::bad_alloc`` and raise it as a ``MemoryError``. This + means that we can catch out-of-memory errors and raise a more + informative exception. + * Please add an item to this changelog when you create your PR `2023.4.1 `__ - October 2023 diff --git a/wrapper/Convert/SireOpenMM/CMakeLists.txt b/wrapper/Convert/SireOpenMM/CMakeLists.txt index bee36a1f6..ffde94ed2 100644 --- a/wrapper/Convert/SireOpenMM/CMakeLists.txt +++ b/wrapper/Convert/SireOpenMM/CMakeLists.txt @@ -24,6 +24,25 @@ if (${SIRE_USE_OPENMM}) # Other python wrapping directories include_directories(${CMAKE_SOURCE_DIR}) + # Check to see if we have support for updating some parameters in context + include(CheckCXXSourceCompiles) + check_cxx_source_compiles( "#include + int main() { + OpenMM::CustomNonbondedForce *force; + OpenMM::Context *context; + force->updateSomeParametersInContext(0, 0, *context); + return 0; + }" + SIREOPENMM_HAS_UPDATESOMEPARAMETERSINCONTEXT ) + + if ( ${SIREOPENMM_HAS_UPDATESOMEPARAMETERSINCONTEXT} ) + message( STATUS "OpenMM has support for updating some parameters in context") + add_definitions("-DSIRE_HAS_UPDATE_SOME_IN_CONTEXT") + else() + message( STATUS "OpenMM does not have support for updating some parameters in context") + message( STATUS "The free energy code will be a little slower.") + endif() + # Define the sources in SireOpenMM set ( SIREOPENMM_SOURCES diff --git a/wrapper/Convert/SireOpenMM/lambdalever.cpp b/wrapper/Convert/SireOpenMM/lambdalever.cpp index 0f014f58f..a3cfccff6 100644 --- a/wrapper/Convert/SireOpenMM/lambdalever.cpp +++ b/wrapper/Convert/SireOpenMM/lambdalever.cpp @@ -35,6 +35,125 @@ using namespace SireOpenMM; using namespace SireCAS; +////// +////// Implementation of MolLambdaCache +////// + +MolLambdaCache::MolLambdaCache() : lam_val(0) +{ +} + +MolLambdaCache::MolLambdaCache(double lam) : lam_val(lam) +{ +} + +MolLambdaCache::MolLambdaCache(const MolLambdaCache &other) + : lam_val(other.lam_val), cache(other.cache) +{ +} + +MolLambdaCache::~MolLambdaCache() +{ +} + +MolLambdaCache &MolLambdaCache::operator=(const MolLambdaCache &other) +{ + if (this != &other) + { + lam_val = other.lam_val; + cache = other.cache; + } + + return *this; +} + +const QVector &MolLambdaCache::morph(const LambdaSchedule &schedule, + const QString &key, + const QVector &initial, + const QVector &final) const +{ + auto nonconst_this = const_cast(this); + + QReadLocker lkr(&(nonconst_this->lock)); + + auto it = cache.constFind(key); + + if (it != cache.constEnd()) + return it.value(); + + lkr.unlock(); + + QWriteLocker wkr(&(nonconst_this->lock)); + + // check that someone didn't beat us to create the values + it = cache.constFind(key); + + if (it != cache.constEnd()) + return it.value(); + + // create the values + nonconst_this->cache.insert(key, schedule.morph(key, initial, final, lam_val)); + + return cache.constFind(key).value(); +} + +////// +////// Implementation of LeverCache +////// + +LeverCache::LeverCache() +{ +} + +LeverCache::LeverCache(const LeverCache &other) : cache(other.cache) +{ +} + +LeverCache::~LeverCache() +{ +} + +LeverCache &LeverCache::operator=(const LeverCache &other) +{ + if (this != &other) + { + cache = other.cache; + } + + return *this; +} + +const MolLambdaCache &LeverCache::get(int molidx, double lam_val) const +{ + auto nonconst_this = const_cast(this); + + if (not this->cache.contains(molidx)) + { + nonconst_this->cache.insert(molidx, QHash()); + } + + auto &mol_cache = nonconst_this->cache.find(molidx).value(); + + auto it = mol_cache.constFind(lam_val); + + if (it == mol_cache.constEnd()) + { + // need to create a new cache for this lambda value + it = mol_cache.insert(lam_val, MolLambdaCache(lam_val)); + } + + return it.value(); +} + +void LeverCache::clear() +{ + cache.clear(); +} + +////// +////// Implementation of LambdaLever +////// + LambdaLever::LambdaLever() : SireBase::ConcreteProperty() { } @@ -46,7 +165,8 @@ LambdaLever::LambdaLever(const LambdaLever &other) lambda_schedule(other.lambda_schedule), perturbable_mols(other.perturbable_mols), start_indicies(other.start_indicies), - perturbable_maps(other.perturbable_maps) + perturbable_maps(other.perturbable_maps), + lambda_cache(other.lambda_cache) { } @@ -64,6 +184,7 @@ LambdaLever &LambdaLever::operator=(const LambdaLever &other) perturbable_mols = other.perturbable_mols; start_indicies = other.start_indicies; perturbable_maps = other.perturbable_maps; + lambda_cache = other.lambda_cache; Property::operator=(other); } @@ -108,6 +229,7 @@ bool LambdaLever::hasLever(const QString &lever_name) void LambdaLever::addLever(const QString &lever_name) { this->lambda_schedule.addLever(lever_name); + this->lambda_cache.clear(); } /** Get the index of the force called 'name'. Returns -1 if @@ -253,84 +375,90 @@ double LambdaLever::setLambda(OpenMM::Context &context, std::vector custom_params = {0.0, 0.0, 0.0, 0.0}; + // record the range of indicies of the atoms which change + int start_change_atom = -1; + int end_change_atom = -1; + // change the parameters for all of the perturbable molecules for (int i = 0; i < this->perturbable_mols.count(); ++i) { const auto &perturbable_mol = this->perturbable_mols[i]; const auto &start_idxs = this->start_indicies[i]; + const auto &cache = this->lambda_cache.get(i, lambda_value); + // calculate the new parameters for this lambda value - const auto morphed_charges = this->lambda_schedule.morph( + const auto morphed_charges = cache.morph( + this->lambda_schedule, "charge", perturbable_mol.getCharges0(), - perturbable_mol.getCharges1(), - lambda_value); + perturbable_mol.getCharges1()); - const auto morphed_sigmas = this->lambda_schedule.morph( + const auto morphed_sigmas = cache.morph( + this->lambda_schedule, "sigma", perturbable_mol.getSigmas0(), - perturbable_mol.getSigmas1(), - lambda_value); + perturbable_mol.getSigmas1()); - const auto morphed_epsilons = this->lambda_schedule.morph( + const auto morphed_epsilons = cache.morph( + this->lambda_schedule, "epsilon", perturbable_mol.getEpsilons0(), - perturbable_mol.getEpsilons1(), - lambda_value); + perturbable_mol.getEpsilons1()); - const auto morphed_alphas = this->lambda_schedule.morph( + const auto morphed_alphas = cache.morph( + this->lambda_schedule, "alpha", perturbable_mol.getAlphas0(), - perturbable_mol.getAlphas1(), - lambda_value); + perturbable_mol.getAlphas1()); - const auto morphed_bond_k = this->lambda_schedule.morph( + const auto morphed_bond_k = cache.morph( + this->lambda_schedule, "bond_k", perturbable_mol.getBondKs0(), - perturbable_mol.getBondKs1(), - lambda_value); + perturbable_mol.getBondKs1()); - const auto morphed_bond_length = this->lambda_schedule.morph( + const auto morphed_bond_length = cache.morph( + this->lambda_schedule, "bond_length", perturbable_mol.getBondLengths0(), - perturbable_mol.getBondLengths1(), - lambda_value); + perturbable_mol.getBondLengths1()); - const auto morphed_angle_k = this->lambda_schedule.morph( + const auto morphed_angle_k = cache.morph( + this->lambda_schedule, "angle_k", perturbable_mol.getAngleKs0(), - perturbable_mol.getAngleKs1(), - lambda_value); + perturbable_mol.getAngleKs1()); - const auto morphed_angle_size = this->lambda_schedule.morph( + const auto morphed_angle_size = cache.morph( + this->lambda_schedule, "angle_size", perturbable_mol.getAngleSizes0(), - perturbable_mol.getAngleSizes1(), - lambda_value); + perturbable_mol.getAngleSizes1()); - const auto morphed_torsion_phase = this->lambda_schedule.morph( + const auto morphed_torsion_phase = cache.morph( + this->lambda_schedule, "torsion_phase", perturbable_mol.getTorsionPhases0(), - perturbable_mol.getTorsionPhases1(), - lambda_value); + perturbable_mol.getTorsionPhases1()); - const auto morphed_torsion_k = this->lambda_schedule.morph( + const auto morphed_torsion_k = cache.morph( + this->lambda_schedule, "torsion_k", perturbable_mol.getTorsionKs0(), - perturbable_mol.getTorsionKs1(), - lambda_value); + perturbable_mol.getTorsionKs1()); - const auto morphed_charge_scale = this->lambda_schedule.morph( + const auto morphed_charge_scale = cache.morph( + this->lambda_schedule, "charge_scale", perturbable_mol.getChargeScales0(), - perturbable_mol.getChargeScales1(), - lambda_value); + perturbable_mol.getChargeScales1()); - const auto morphed_lj_scale = this->lambda_schedule.morph( + const auto morphed_lj_scale = cache.morph( + this->lambda_schedule, "lj_scale", perturbable_mol.getLJScales0(), - perturbable_mol.getLJScales1(), - lambda_value); + perturbable_mol.getLJScales1()); // now update the forcefields int start_index = start_idxs.value("clj", -1); @@ -339,6 +467,16 @@ double LambdaLever::setLambda(OpenMM::Context &context, { const int nparams = morphed_charges.count(); + if (start_change_atom == -1) + { + start_change_atom = start_index; + end_change_atom = start_index + nparams; + } + else if (start_index >= end_change_atom) + { + end_change_atom = start_index + nparams; + } + if (have_ghost_atoms) { for (int j = 0; j < nparams; ++j) @@ -532,17 +670,22 @@ double LambdaLever::setLambda(OpenMM::Context &context, cljff->updateParametersInContext(context); if (ghost_ghostff) +#ifdef SIRE_HAS_UPDATE_SOME_IN_CONTEXT + ghost_ghostff->updateSomeParametersInContext(start_change_atom, end_change_atom - start_change_atom, context); +#else ghost_ghostff->updateParametersInContext(context); +#endif if (ghost_nonghostff) +#ifdef SIRE_HAS_UPDATE_SOME_IN_CONTEXT + ghost_nonghostff->updateSomeParametersInContext(start_change_atom, end_change_atom - start_change_atom, context); +#else ghost_nonghostff->updateParametersInContext(context); +#endif if (ghost_14ff) ghost_14ff->updateParametersInContext(context); - // in OpenMM 8.1beta updating the bond parameters past lambda=0.25 - // causes a "All Forces must have identical exclusions" error, - // when running minimisation without h-bond constraints... if (bondff) bondff->updateParametersInContext(context); @@ -767,6 +910,7 @@ int LambdaLever::addPerturbableMolecule(const OpenMMMolecule &molecule, this->perturbable_mols.append(PerturbableOpenMMMolecule(molecule)); this->start_indicies.append(starts); this->perturbable_maps.insert(molecule.number, molecule.perturtable_map); + this->lambda_cache.clear(); return this->perturbable_mols.count() - 1; } @@ -805,4 +949,6 @@ void LambdaLever::setSchedule(const LambdaSchedule &sched) { lambda_schedule.addLever(lever); } + + this->lambda_cache.clear(); } diff --git a/wrapper/Convert/SireOpenMM/lambdalever.h b/wrapper/Convert/SireOpenMM/lambdalever.h index ee158ce25..f934c7d4a 100644 --- a/wrapper/Convert/SireOpenMM/lambdalever.h +++ b/wrapper/Convert/SireOpenMM/lambdalever.h @@ -33,10 +33,52 @@ #include "SireCAS/lambdaschedule.h" +#include + +#include + SIRE_BEGIN_HEADER namespace SireOpenMM { + class MolLambdaCache + { + public: + MolLambdaCache(); + MolLambdaCache(double lam_val); + MolLambdaCache(const MolLambdaCache &other); + ~MolLambdaCache(); + + MolLambdaCache &operator=(const MolLambdaCache &other); + + const QVector &morph(const SireCAS::LambdaSchedule &schedule, + const QString &key, + const QVector &initial, + const QVector &final) const; + + private: + QHash> cache; + QReadWriteLock lock; + double lam_val; + }; + + class LeverCache + { + public: + LeverCache(); + LeverCache(const LeverCache &other); + ~LeverCache(); + + LeverCache &operator=(const LeverCache &other); + + const MolLambdaCache &get(int molidx, double lam_val) const; + + void clear(); + + private: + QHash> cache; + }; + /** This is a lever that is used to change the parameters in an OpenMM * context according to a lambda value. This is actually a collection * of levers, each of which is controlled by the main lever. @@ -117,6 +159,9 @@ namespace SireOpenMM /** All of the property maps for the perturbable molecules */ QHash perturbable_maps; + + /** Cache of the parameters for different lambda values */ + LeverCache lambda_cache; }; #ifndef SIRE_SKIP_INLINE_FUNCTION diff --git a/wrapper/Convert/SireOpenMM/openmmmolecule.cpp b/wrapper/Convert/SireOpenMM/openmmmolecule.cpp index dbc1f3276..9655c73cf 100644 --- a/wrapper/Convert/SireOpenMM/openmmmolecule.cpp +++ b/wrapper/Convert/SireOpenMM/openmmmolecule.cpp @@ -500,11 +500,17 @@ void OpenMMMolecule::constructFromAmber(const Molecule &mol, double sig = lj.sigma().to(SireUnits::nanometer); double eps = lj.epsilon().to(SireUnits::kJ_per_mol); - if (std::abs(sig) < 1e-9) + if (sig == 0) { // this must be a null parameter + // Using eps=0 sig=1 causes instability though (NaN errors) + // so we will set sig=1e-9. This seems to be more stable eps = 0; - sig = 1; + sig = 1e-9; + } + else if (std::abs(sig) < 1e-9) + { + sig = 1e-9; } cljs_data[i] = std::make_tuple(chg, sig, eps); diff --git a/wrapper/Error/wrap_exceptions.cpp b/wrapper/Error/wrap_exceptions.cpp index fc366336d..3b9fb8b55 100644 --- a/wrapper/Error/wrap_exceptions.cpp +++ b/wrapper/Error/wrap_exceptions.cpp @@ -152,17 +152,33 @@ namespace SireError void exception_translator(const SireError::exception &ex) { boost::python::release_gil_policy::acquire_gil_no_raii(); - PyErr_SetString(PyExc_UserWarning, + PyErr_SetString(PyExc_RuntimeError, get_exception_string(ex).toUtf8()); } - void std_exception_translator(const std::exception &ex) + void bad_alloc_exception_translator(const std::bad_alloc &ex) { boost::python::release_gil_policy::acquire_gil_no_raii(); - PyErr_SetString(PyExc_UserWarning, + PyErr_SetString(PyExc_MemoryError, QString("%1").arg(ex.what()).toUtf8()); } + void std_exception_translator(const std::exception &ex) + { + boost::python::release_gil_policy::acquire_gil_no_raii(); + + if (dynamic_cast(&ex) != 0) + { + PyErr_SetString(PyExc_MemoryError, + "Memory error - out of memory?"); + } + else + { + PyErr_SetString(PyExc_RuntimeError, + QString("%1").arg(ex.what()).toUtf8()); + } + } + SireError::FastExceptionFlag *fast_exception_flag(0); void enable_backtrace_exceptions()