diff --git a/TIES_MD/TIES.py b/TIES_MD/TIES.py
index 0a3d302..4915795 100644
--- a/TIES_MD/TIES.py
+++ b/TIES_MD/TIES.py
@@ -52,7 +52,7 @@ class TIES(object):
:param exp_name: str, for the names of experiment i.e. complex -> complex.pdb/complex.prmtop
:param run_type: str, flag to say if we should run dynamics or not
:param devices: list, list of ints for which cuda devices to use
- :param node_id: float, id denoting what replica of this simulation this execution of TIES_MD should run
+ :param rep_id: float, id denoting what replica of this simulation this execution of TIES_MD should run
:param windows_mask: list containing ints for start and end range of windows to be run
:param periodic: boolean determines if the simulation will be periodic
:param lam: Lambda class, allow passing of custom lambda schedule
@@ -60,10 +60,11 @@ class TIES(object):
:param **kwargs: dict, containing setting from config file
'''
- def __init__(self, cwd, exp_name, run_type='class', devices=None, node_id=None, windows_mask=None, periodic=True,
- lam=None, platform='CUDA', **kwargs):
+ def __init__(self, cwd, exp_name='complex', run_type='class', devices=None, rep_id=None, windows_mask=None,
+ periodic=True, lam=None, platform='CUDA', **kwargs):
nice_print('TIES')
- if run_type == 'class' and kwargs is None:
+
+ if run_type == 'class' and kwargs == {}:
kwargs = read_config(os.path.join(cwd, 'TIES.cfg'))
print('If you use this software please cite:')
print('Wade, A.D., et al. 2022. Alchemical Free Energy Estimators and Molecular Dynamics Engines:'
@@ -77,9 +78,7 @@ def __init__(self, cwd, exp_name, run_type='class', devices=None, node_id=None,
#check all the config file args we need are present
args_list = ['engine', 'temperature', 'pressure', 'sampling_per_window', 'equili_per_window', 'methods',
'total_reps', 'split_run', 'elec_edges', 'ster_edges', 'global_lambdas', 'constraint_file',
- 'constraint_column', 'input_type', 'box_type']
-
- optional_args = ['cell_basis_vec1', 'cell_basis_vec2', 'cell_basis_vec3', 'edge_length']
+ 'constraint_column', 'input_type', 'cell_basis_vec1', 'cell_basis_vec2', 'cell_basis_vec3']
# check we have all required arguments
for argument in args_list:
@@ -88,14 +87,14 @@ def __init__(self, cwd, exp_name, run_type='class', devices=None, node_id=None,
# check we have no unexpected arguments
for argument in kwargs.keys():
- if argument not in args_list+optional_args:
+ if argument not in args_list:
raise ValueError('Argument {} not supported for this engine or at all.'
' Please remove from the TIES.cfg.'.format(argument))
- self.all_args = args_list+optional_args
-
+ self.all_args = args_list
#engine must be delt with first to set namd_version which other options may need.
- api_sensitive = ['engine', 'split_run', 'elec_edges', 'ster_edges', 'global_lambdas', 'box_type']
+ api_sensitive = ['engine', 'split_run', 'elec_edges', 'ster_edges', 'global_lambdas',
+ 'cell_basis_vec1', 'cell_basis_vec2', 'cell_basis_vec3']
#Iterate over our args_dict to set attributes of class to values in dict
print('Read arguments from file:')
@@ -108,11 +107,6 @@ def __init__(self, cwd, exp_name, run_type='class', devices=None, node_id=None,
setattr(self, full_k, v)
print('')
- #set any nonexistant optional args to None
- for option in optional_args:
- if option not in kwargs.keys():
- setattr(self, option, None)
-
#set any attr the api needs
self._split_run = bool(int(self._split_run))
if self._split_run:
@@ -144,6 +138,7 @@ def __init__(self, cwd, exp_name, run_type='class', devices=None, node_id=None,
self.run_type = run_type
self.methods = self.methods.split(',')
+ self.basis_vectors = [[], [], []]
self.total_reps = int(self.total_reps)
self.reps_per_exec = int(self.reps_per_exec)
@@ -168,14 +163,15 @@ def __init__(self, cwd, exp_name, run_type='class', devices=None, node_id=None,
self.platform = platform
self.cwd = cwd
- self.node_id = node_id
+ self.rep_id = rep_id
self.periodic = periodic
#run through api logic
for prop in api_sensitive:
setattr(self, prop, self.__getattribute__('_'+prop))
- self.sub_header, self.sub_run_line = None, None
+ self.pre_run_line, self.run_line = None, None
+ self.sub_run_line, self.sub_header = None, None
# build schedule for lambdas do this last so passed lam can overwrite if desired
print('Lambda schedule:')
@@ -209,51 +205,76 @@ def __init__(self, cwd, exp_name, run_type='class', devices=None, node_id=None,
nice_print('END')
@property
- def box_type(self):
+ def cell_basis_vec1(self):
+ """
+ What is the 1st basis vector of the simulation cell
+
+ :return: list of floats for x, y, z components of vector
+ """
+ return self._cell_basis_vec1
+
+ @cell_basis_vec1.setter
+ def cell_basis_vec1(self, value):
+ '''
+ Setter for cell_basis_vec1
+ :param value: list for x, y, z of box, updates basis_vectors
+
+ :return: None
+ '''
+ if isinstance(value, str):
+ self._cell_basis_vec1 = [float(x) for x in value.split(',')]
+ else:
+ assert len(value) == 3
+ self._cell_basis_vec1 = [x for x in value]
+ self.basis_vectors[0] = Vec3(*self._cell_basis_vec1) * unit.angstrom
+
+ @property
+ def cell_basis_vec2(self):
"""
- What type of simulation box is being used (cube, truncatedOctahedron, rhombicDodecahedron or na for manual)
+ What is the 2nd basis vector of the simulation cell
- :return: string for box type.
+ :return: list of floats for x, y, z components of vector
"""
- return self._box_type
+ return self._cell_basis_vec2
- @box_type.setter
- def box_type(self, value):
+ @cell_basis_vec2.setter
+ def cell_basis_vec2(self, value):
'''
- Setting function for box type, will build manual box from cell basis vectors if user passes box type na
- :param value: str, for what box type we want
+ Setter for cell_basis_vec2
+ :param value: list for x, y, z of box, updates basis_vectors
:return: None
'''
- self._box_type = value
- if self._box_type == 'na':
- vecs = ['cell_basis_vec1', 'cell_basis_vec2', 'cell_basis_vec3']
- for vec in vecs:
- if self.__getattribute__(vec) is None:
- raise ValueError(
- 'If box type is unspecified as na in TIES.cfg the box vectors must be manually specified.'
- ' Please add options {} {} {} to TIES.cfg'.format(*vecs))
- self.cell_basis_vec1 = [float(x) for x in self.cell_basis_vec1.split(',')]
- self.cell_basis_vec2 = [float(x) for x in self.cell_basis_vec2.split(',')]
- self.cell_basis_vec3 = [float(x) for x in self.cell_basis_vec3.split(',')]
-
- self.basis_vectors = [Vec3(*self.cell_basis_vec1) * unit.angstrom,
- Vec3(*self.cell_basis_vec2) * unit.angstrom,
- Vec3(*self.cell_basis_vec3) * unit.angstrom]
+ if isinstance(value, str):
+ self._cell_basis_vec2 = [float(x) for x in value.split(',')]
+ else:
+ assert len(value) == 3
+ self._cell_basis_vec2 = [x for x in value]
+ self.basis_vectors[1] = Vec3(*self._cell_basis_vec2) * unit.angstrom
+ @property
+ def cell_basis_vec3(self):
+ """
+ What is the 3rd basis vector of the simulation cell
+
+ :return: list of floats for x, y, z components of vector
+ """
+ return self._cell_basis_vec3
+
+ @cell_basis_vec3.setter
+ def cell_basis_vec3(self, value):
+ '''
+ Setter for cell_basis_vec3
+ :param value: list for x, y, z of box, updates basis_vectors
+
+ :return: None
+ '''
+ if isinstance(value, str):
+ self._cell_basis_vec3 = [float(x) for x in value.split(',')]
else:
- print('Getting box vectors for {} box. Ignoring cell basis vectors'.format(self.box_type))
- if self.edge_length is None:
- raise ValueError('Must provide edge_length option in TIES.cfg to compute box vectors. If custom box vectors'
- ' are desired set box_type = na in TIES.cfg.')
- self.edge_length = self.edge_length.split('*unit.')
- self.edge_length = unit.Quantity(float(self.edge_length[0]), getattr(unit, self.edge_length[1]))
- self.edge_length = self.edge_length.in_units_of(unit.angstrom) / unit.angstrom
- self.basis_vectors = get_box_vectors(self.box_type, self.edge_length)
-
- self.cell_basis_vec1 = [float(x) for x in self.basis_vectors[0] / unit.angstrom]
- self.cell_basis_vec2 = [float(x) for x in self.basis_vectors[1] / unit.angstrom]
- self.cell_basis_vec3 = [float(x) for x in self.basis_vectors[2] / unit.angstrom]
+ assert len(value) == 3
+ self._cell_basis_vec3 = [x for x in value]
+ self.basis_vectors[2] = Vec3(*self._cell_basis_vec3) * unit.angstrom
@property
def engine(self):
@@ -386,6 +407,8 @@ def split_run(self, value):
if self._split_run:
self.reps_per_exec = 1
else:
+ if self.rep_id is not None:
+ raise ValueError('Split run is off but rep_id has a set value of {}. Unset rep_id'.format(self.rep_id))
self.reps_per_exec = self.total_reps
def update_cfg(self):
@@ -417,8 +440,6 @@ def update_cfg(self):
cons_file='na' if self.constraint_file is None else self.constraint_file,
constraint_column='na' if self.constraint_column is None else self.constraint_column,
input_type=self.input_type,
- box_type='na' if self.box_type is None else self.box_type,
- edge_length='na' if self.edge_length is None else self.edge_length,
**solv_oct_box)
with open(os.path.join(self.cwd, 'TIES.cfg'), 'w') as f:
f.write(ties_script)
@@ -430,12 +451,14 @@ def setup(self):
:return: None
'''
- sub_header, sub_run_line = get_header_and_run(self.engine, self.namd_version, self.split_run,
- self.num_windows, self.total_reps, self.exp_name, self.devices)
+ sub_header, sub_run_line = get_header_and_run(self.engine, self.namd_version, self.split_run,self.num_windows,
+ self.total_reps, self.exp_name, self.devices)
if self.sub_header is None:
self.sub_header = sub_header
- if self.sub_run_line is None:
+ if self.run_line is None and self.pre_run_line is None:
self.sub_run_line = sub_run_line
+ else:
+ self.sub_run_line = str(self.pre_run_line) + str(self.run_line)
if self.engine == 'namd':
folders = ['equilibration', 'simulation']
@@ -511,7 +534,7 @@ def get_options(self):
:return: None
'''
- other_user_options = ['sub_header', 'sub_run_line']
+ other_user_options = ['sub_header', 'pre_run_line', 'run_line']
for arg in self.all_args+other_user_options:
print('{}: {}'.format(arg, self.__getattribute__(arg)))
@@ -522,8 +545,8 @@ def run(self):
:return: None
'''
if self.split_run:
- if self.node_id is None:
- raise ValueError('For a split run set --node_id on the command line, or pass node_id as an '
+ if self.rep_id is None:
+ raise ValueError('For a split run set --rep_id on the command line, or pass rep_id as an '
'argument to the TIES() class.')
system = AlchSys(self.cwd, self.exp_name, self.temperature, self.pressure, self.constraint_file,
@@ -531,7 +554,7 @@ def run(self):
self.periodic, self.platform)
if self.split_run:
- system_ids = [System_ID(self.devices[0], self.node_id)]
+ system_ids = [System_ID(self.devices[0], self.rep_id)]
else:
system_ids = [System_ID(self.devices[i % len(self.devices)], i) for i in range(self.total_reps)]
@@ -539,7 +562,7 @@ def run(self):
for rep in system_ids:
for lam in self.lam.str_lams:
lam_dir = 'LAMBDA_{}'.format(lam)
- path = os.path.join(self.cwd, lam_dir, 'rep{}'.format(rep.node_id))
+ path = os.path.join(self.cwd, lam_dir, 'rep{}'.format(rep.rep_id))
if not os.path.exists(path):
raise ValueError('Output dir {} missing.'.format(path))
@@ -1073,7 +1096,7 @@ def get_header_and_run(engine, namd_version, split_run, num_windows, reps, exp_n
#BSUB -e eLIGPAIR.%J""".format(int(np.ceil(num_jobs/gpus_per_node)))
sub_run_line = 'jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 TIES_MD --config_file=$ties_dir/TIES.cfg' \
' --exp_name={} --windows_mask=$lambda,$(expr $lambda + 1)' \
- ' --node_id=$i > $ties_dir/$lambda_$i.out&'.format(exp_name)
+ ' --rep_id=$i > $ties_dir/$lambda_$i.out&'.format(exp_name)
else:
#summit specific
gpus_per_node = 6
diff --git a/TIES_MD/alch.py b/TIES_MD/alch.py
index 2d2dc4e..13238db 100644
--- a/TIES_MD/alch.py
+++ b/TIES_MD/alch.py
@@ -608,12 +608,12 @@ class System_ID(object):
what number repeat this simulation is out of some total number of repeats.
:param device_id: int, for OpenMM GPU device id
- :param node_id: str, id number denoting which replica this is
+ :param rep_id: str, id number denoting which replica this is
'''
- def __init__(self, device_id, node_id):
+ def __init__(self, device_id, rep_id):
self.device_id = str(device_id)
- self.node_id = str(node_id)
+ self.rep_id = str(rep_id)
def minimization(NVT, constraint):
@@ -787,7 +787,7 @@ def simulate_system(ids, alch_sys, Lam, mask, cwd, niter, equili_steps, steps_pe
beta = 1.0 / (unit.BOLTZMANN_CONSTANT_kB * alch_sys.temp)
print('Running simulation on device {}'.format(ids.device_id))
- print('Running replica {} of windows {}'.format(ids.node_id, list(range(mask[0], mask[1]))))
+ print('Running replica {} of windows {}'.format(ids.rep_id, list(range(mask[0], mask[1]))))
nlambda = len(Lam.schedule[0])
nstates = len(Lam.schedule[mask[0]:mask[1]])
@@ -850,7 +850,7 @@ def simulate_system(ids, alch_sys, Lam, mask, cwd, niter, equili_steps, steps_pe
#If velocities are randmozied is this still equilibriated?
NPT['sim'].context.setVelocitiesToTemperature(alch_sys.temp)
else:
- equili_file = os.path.join(cwd, 'LAMBDA_{}'.format(Lam.str_lams[i+mask[0]]), 'rep{}'.format(ids.node_id),
+ equili_file = os.path.join(cwd, 'LAMBDA_{}'.format(Lam.str_lams[i+mask[0]]), 'rep{}'.format(ids.rep_id),
'equilibration', 'state')
equili_state_f = equili_file+'_NPT.xml'
@@ -867,7 +867,7 @@ def simulate_system(ids, alch_sys, Lam, mask, cwd, niter, equili_steps, steps_pe
NPT['sim'].loadState(equili_state_f)
#add reporter to simulation
- log = os.path.join(cwd, 'LAMBDA_{}'.format(Lam.str_lams[i+mask[0]]), 'rep{}'.format(ids.node_id), 'simulation',
+ log = os.path.join(cwd, 'LAMBDA_{}'.format(Lam.str_lams[i+mask[0]]), 'rep{}'.format(ids.rep_id), 'simulation',
'log')
add_simulation_reporters(NPT['sim'], total_sim_NPT, save=log)
@@ -875,7 +875,7 @@ def simulate_system(ids, alch_sys, Lam, mask, cwd, niter, equili_steps, steps_pe
for iteration in range(niter):
print('Propagating iteration {}/{} in window {}/{} for replica {}'.format(iteration + 1,
niter, i+mask[0]+1,
- all_states, ids.node_id))
+ all_states, ids.rep_id))
# propogate system in current state
NPT['sim'].step(steps_per_iter)
@@ -904,13 +904,13 @@ def simulate_system(ids, alch_sys, Lam, mask, cwd, niter, equili_steps, steps_pe
print('Saving results')
for i, j in enumerate(Lam.str_lams[mask[0]: mask[1]]):
if 'TI' in alch_sys.methods:
- file = os.path.join(cwd, 'LAMBDA_{}'.format(j), 'rep{}'.format(ids.node_id), 'results',
+ file = os.path.join(cwd, 'LAMBDA_{}'.format(j), 'rep{}'.format(ids.rep_id), 'results',
'TI.npy')
#print('Saving {} result to disk'.format(file))
np.save(file, grads[i, :, :])
if 'FEP' in alch_sys.methods:
- file = os.path.join(cwd, 'LAMBDA_{}'.format(j), 'rep{}'.format(ids.node_id), 'results',
+ file = os.path.join(cwd, 'LAMBDA_{}'.format(j), 'rep{}'.format(ids.rep_id), 'results',
'FEP.npy')
#print('Saving {} result to disk'.format(file))
np.save(file, u_kln[i, :, :])
diff --git a/TIES_MD/cli.py b/TIES_MD/cli.py
index c0d1f48..bf3970b 100644
--- a/TIES_MD/cli.py
+++ b/TIES_MD/cli.py
@@ -31,7 +31,7 @@
TIES_MD
Command line input should be used as follows...
Usage:
-TIES_MD [--devices=LIST] [--run_type=STRING] [--config_file=STRING] [--node_id=INT] [--windows_mask=LIST] [--exp_name=STR]...
+TIES_MD [--devices=LIST] [--run_type=STRING] [--config_file=STRING] [--rep_id=INT] [--windows_mask=LIST] [--exp_name=STR]...
"""
def main(argv=None):
@@ -88,13 +88,13 @@ def main(argv=None):
else:
devices = None
- if args['--node_id']:
+ if args['--rep_id']:
if not_openmm:
- raise ValueError(not_openmm_msg.format('--node_id'))
- node_id = args['--node_id']
- node_id = int(node_id)
+ raise ValueError(not_openmm_msg.format('--rep_id'))
+ rep_id = args['--rep_id']
+ rep_id = int(rep_id)
else:
- node_id = None
+ rep_id = None
print(msg.format('node id string', 'None'))
if args['--windows_mask']:
@@ -109,5 +109,5 @@ def main(argv=None):
# removed this as an option there is no need to expose it for now
periodic=True
- TIES(input_folder, exp_name, run_type, devices, node_id, mask, periodic, **args_dict)
+ TIES(input_folder, exp_name, run_type, devices, rep_id, mask, periodic, **args_dict)
diff --git a/TIES_MD/doc/source/HPC_submissions.rst b/TIES_MD/doc/source/HPC_submissions.rst
index 5550b9d..9f9e3e2 100644
--- a/TIES_MD/doc/source/HPC_submissions.rst
+++ b/TIES_MD/doc/source/HPC_submissions.rst
@@ -4,7 +4,7 @@ HPC Submission scripts
Here we provide some example submission scripts for various HPC systems. ``TIES MD`` will attempt to automatically write sensible submission
scripts for ``NAMD2`` targeting `ARCHER 2 `_ and for ``OpenMM`` targeting `Summit `_.
In general the user can make there own script for whichever HPC or cluster they prefer. To aid with writing general
-scripts ``TIES MD`` exposes 2 options in the :ref:`API` called ``sub_header`` and ``sub_run_line``. The strings passed
+scripts ``TIES MD`` exposes 3 options in the :ref:`API` called ``sub_header``, ``pre_run_line`` and ``run_line``. The strings passed
with these options will be injected into a general template for a ``NAMD2`` or ``OpenMM`` submission. All generated
submission scripts are written to the base ``TIES MD`` directory as sub.sh. An example of this is provided in here :ref:`Running`.
@@ -91,12 +91,12 @@ Here we provide an example of ``TIES MD`` running with ``OpenMM`` on `Summit $ties_dir/0.out&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=1,2 --node_id="0" > $ties_dir/1.out&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=2,3 --node_id="0" > $ties_dir/2.out&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=3,4 --node_id="0" > $ties_dir/3.out&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=4,5 --node_id="0" > $ties_dir/4.out&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=5,6 --node_id="0" > $ties_dir/5.out&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=0,1 --rep_id=0 > $ties_dir/0.out&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=1,2 --rep_id=0 > $ties_dir/1.out&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=2,3 --rep_id=0 > $ties_dir/2.out&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=3,4 --rep_id=0 > $ties_dir/3.out&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=4,5 --rep_id=0 > $ties_dir/4.out&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=5,6 --rep_id=0 > $ties_dir/5.out&
wait
NAMD 3
diff --git a/TIES_MD/doc/source/binding_free_energies.rst b/TIES_MD/doc/source/binding_free_energies.rst
index 120453d..7725f25 100644
--- a/TIES_MD/doc/source/binding_free_energies.rst
+++ b/TIES_MD/doc/source/binding_free_energies.rst
@@ -35,9 +35,7 @@ hybrid ligands but also setup binding free energy calculations for the TIES prot
simulations as follows::
#ties20 imports
- from ties import Pair, Ligand
- from ties import Config
- from ties.protein import Protein
+ from ties import Pair, Config, Ligand, Protein
#Setting for system building
config = Config()
@@ -68,7 +66,7 @@ simulations as follows::
#now declare protein
config.protein = 'receptor.pdb'
config.protein_ff = 'leaprc.protein.ff14SB'
- protein = Protein(config.protein, config)
+ protein = Protein(config)
#re-prepare simulation input, now protein is declared and passed as argument com simulation is built
hybrid.prepare_inputs(protein=protein)
@@ -86,12 +84,12 @@ At this point we have prepped a simulation of one thermodynamic cycle with two l
set these legs up in the directories ``ties/ties-ligandA-ligandB/(lig/com)`` and these map to the
``system/ligand/thermodynamic_leg/`` directory structure that was discussed in the :ref:`Tutorial` section.
In ``ties/ties-ligandA-ligandB/(lig/com)`` there will be the ``build`` directory and ``TIES.cfg`` files as also seen in
-the :ref:`Tutorial`. The settings in ``TIES.cfg`` will be good for a default simulation but in general we may wish to
+the :ref:`Tutorial`. The automatic settings in ``TIES.cfg`` will be good for a default simulation but in general we may wish to
change these quickly and or write submission scripts for these simulations. To do this we can use the ``TIES_MD`` API as
follows::
#tiesMD imports
- from TIES_MD import TIES, cli
+ from TIES_MD import TIES
import os
#iterate over both legs of BFE calculation
@@ -100,11 +98,10 @@ follows::
ties_dir = os.path.join(os.getcwd(), 'ties', 'ties-ligandA-ligandB', thermo_leg)
#read the default TIES.cfg to initialize
- args_dict = cli.read_config(os.path.join(ties_dir, 'TIES.cfg'))
- md = TIES.TIES(cwd=ties_dir, exp_name='complex', **args_dict)
+ md = TIES(ties_dir)
#change some settings in TIES.cfg
- md.reps_per_exec = 1
+ md.split_run = 1
md.total_reps = 6
#inspect all the options we can configure and change
@@ -120,41 +117,20 @@ follows::
#BSUB -o oLIGPAIR.%J
#BSUB -e eLIGPAIR.%J"""
- #run line in submission scripts can also be changed
- md.sub_run_line = 'jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --windows_mask=$lambda,$(expr $lambda + 1) --node_id=$i > $ties_dir/$lambda$i.out&'
+ #Setting HPC specific elements of run line (example here is Summit)
+ md.pre_run_line = 'jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 '
+
+ #Setting ties_md part of run line
+ md.run_line = 'ties_md --config_file=$ties_dir/TIES.cfg --windows_mask=$lambda,$(expr $lambda + 1) --node_id=$i'
#setup the new simulation with changed options (also writes submission script)
md.setup()
- #must make sure the TIES.cfg on disk is updated with new settings.
- md.update_cfg()
-
-This changes the TIES.cfg options ``reps_per_exec`` to 1 and ``total_reps`` to 6. To see all configurable options the user
+This changes the TIES.cfg options ``split_run`` to 1 (True) and ``total_reps`` to 6. To see all configurable options the user
can run ``md.get_options()`` as shown above. To generate a general submission script we are modifying the
-``sub_header`` and ``sub_run_line`` internal options and these set what ``TIES_MD`` writes into the submission script. The
-settings above yield the following script::
-
- #!/bin/bash
- #Example script for Summit OpenMM
- #BSUB -P CHM155_001
- #BSUB -W 120
- #BSUB -nnodes 13
- #BSUB -alloc_flags "gpudefault smt1"
- #BSUB -J LIGPAIR
- #BSUB -o oLIGPAIR.%J
- #BSUB -e eLIGPAIR.%J
-
- export ties_dir="ties/ties-ligandA-ligandB/lig"
- cd $ties_dir
-
- for lambda in 0 1 2 3 4 5 6 7 8 9 10 11 12; do
- for i in {0..5}; do
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --windows_mask=$lambda,$(expr $lambda + 1) --node_id=$i > $ties_dir/$lambda$i.out&
- done
- done
- wait
-
-These scripts can be summited to the HPC scheduler, once they finish the last step to get a :math:`{ΞΞ G}` is analysis.
+``sub_header``, ``pre_run_line`` and ``run_line`` internal options and these set what ``TIES_MD`` writes into the
+submission script, for more details see :ref:`API`. These scripts can be summited to the HPC scheduler, once they
+finish the last step to get a :math:`{ΞΞ G}` is analysis.
BFE Analysis
------------
diff --git a/TIES_MD/doc/source/parallelization.rst b/TIES_MD/doc/source/parallelization.rst
index 4e650ac..0c98a1c 100644
--- a/TIES_MD/doc/source/parallelization.rst
+++ b/TIES_MD/doc/source/parallelization.rst
@@ -27,15 +27,13 @@ If we wanted to parallelize 3 repeats over 3 GPUs on one node we would run::
Each ``CUDA`` device will then run 8 windows of the 1 replica. Equally ths could be spit into to separate runs of ``TIES MD``
masked to only see one device::
- ties_md --exp_name=sys_solv --devices=0 --node_id=0
- ties_md --exp_name=sys_solv --devices=1 --node_id=1
- ties_md --exp_name=sys_solv --devices=3 --node_id=1
+ ties_md --exp_name=sys_solv --devices=0 --rep_id=0&
+ ties_md --exp_name=sys_solv --devices=1 --rep_id=1&
+ ties_md --exp_name=sys_solv --devices=2 --rep_id=2&
-To run in this configuration the options ``total_reps=3`` and ``reps_per_exec=1`` are set in TIES.cfg to tell ``TIES MD`` that
-there are a total of 3 replicas being run and that each execution of ``TIES MD`` should run only one. Also note we have set
-``--node_id`` to some different values for otherwise identical run lines and this ensures these parallel runs write output
-to unique locations. ``--node_id`` only needs to be set when identical replicas of a simulation are run in separate executions
-of ``TIES MD``.
+To run in this configuration the options ``total_reps=3`` and ``split_run=1`` are set in TIES.cfg to tell ``TIES MD`` that
+there are a total of 3 replicas being run and that each execution of ``TIES MD`` should run only one. ``--rep_id``
+determines which replica each instance will run. ``--rep_id`` only needs to be set when using ``split_run=1``.
If we need further parallelization over alchemical windows we can use the command line option ``--windows_mask``
this option takes a ``Python`` range (start inclusive and end exclusive) of the windows which that instance of
@@ -50,27 +48,27 @@ this option takes a ``Python`` range (start inclusive and end exclusive) of the
ties_md --exp_name=sys_solv --windows_mask=6,7 --devices=6&
ties_md --exp_name=sys_solv --windows_mask=7,8 --devices=7&
-Again using the configuration options ``total_reps=3`` and ``reps_per_exec=1`` the above runs 1 replica of each alchemical
+Now sing the configuration options ``total_reps=3`` and ``split_run=0`` the above runs 3 replica of each alchemical
window on a different GPU.
For maximum parallelism we combine parallelizing over replicas and alchemical windows. For clarity we now consider the
same example as above but now with 6 alchemical windows, 2 replica simulations and one simulation per GPU, so in
-TIES.cfg ``global_lambdas=0.0, 0.1, 0.4, 0.6, 0.9, 1.0``, ``total_reps=2`` and ``reps_per_exec=1``. To scale over multiple node
+TIES.cfg ``global_lambdas=0.0, 0.1, 0.4, 0.6, 0.9, 1.0``, ``total_reps=2`` and ``split_run=1``. To scale over multiple node
we could use the resource allocator of the HPC for example `jsrun `_
on `Summit `_. would allow us to run with 2 replicas of 6 windows as follows::
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=0,1 --node_id=0&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=1,2 --node_id=0&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=2,3 --node_id=0&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=3,4 --node_id=0&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=4,5 --node_id=0&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=5,6 --node_id=0&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=0,1 --node_id=1&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=1,2 --node_id=1&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=2,3 --node_id=1&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=3,4 --node_id=1&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=4,5 --node_id=1&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=5,6 --node_id=1&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=0,1 --rep_id=0&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=1,2 --rep_id=0&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=2,3 --rep_id=0&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=3,4 --rep_id=0&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=4,5 --rep_id=0&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=5,6 --rep_id=0&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=0,1 --rep_id=1&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=1,2 --rep_id=1&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=2,3 --rep_id=1&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=3,4 --rep_id=1&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=4,5 --rep_id=1&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=5,6 --rep_id=1&
Note here we do not set ``--devices`` as the masking of GPUs is handled by the resource allocator, this is not the general case.
If a resource allocator is not available an alternative method to run multiple simulations across nodes is to use a message passing interface
@@ -82,9 +80,9 @@ TIES-NAMD
---------
The parallelization of TIES in ``NAMD2`` follows the same ideas as ``OpenMM`` above. We want to run independent simulations
-for all alchemical window and replica simulations. To achieve parallelization over replica simulations there are two options.
-If in TIES.cfg ``total_reps==reps_per_exec`` the submission script that ``TIES_MD`` writes will use the ``NAMD`` option
-``+replicas X`` this makes each ``NAMD`` run ``X`` replicas and the run lines in sub.sh will look something like::
+for all alchemical window and replica simulations. If in TIES.cfg ``split_run=0`` the submission script that
+``TIES_MD`` writes will use the ``NAMD`` option ``+replicas X`` this makes each ``NAMD`` run ``X`` replicas and the
+run lines in sub.sh will look something like::
for stage in {0..3}; do
for lambda in 0.00 0.05 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 0.95 1.0; do
@@ -94,7 +92,7 @@ If in TIES.cfg ``total_reps==reps_per_exec`` the submission script that ``TIES_M
wait
done
-Alternatively if ``total_reps=!reps_per_exec`` with ``reps_per_exec=1`` the run lines will look like::
+Alternatively if ``split_run=1`` the run lines will look like::
for stage in {0..3}; do
for lambda in 0.00 0.05 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 0.95 1.0; do
@@ -112,6 +110,6 @@ we have tested up to ``+replicas 135`` on `ARCHER 2
examples the parallelism over alchemical windows is achieved in the loop over lambda.
Using ``NAMD3`` parallelization can be achieved like so (:ref:`NAMD 3`). ``NAMD`` in general has extensive options to provision
-hardware and achieve parallelism, what have outlined here is not exhaustive and we would suggest consulting the `documentation `_
-for more a more comprehensive information.
+hardware and achieve parallelism, what have outlined here is not exhaustive and we would suggest consulting
+the `documentation `_ for more a more comprehensive information.
diff --git a/TIES_MD/doc/source/tutorial.rst b/TIES_MD/doc/source/tutorial.rst
index 90f3a0d..067a124 100644
--- a/TIES_MD/doc/source/tutorial.rst
+++ b/TIES_MD/doc/source/tutorial.rst
@@ -4,7 +4,7 @@ Tutorial
Getting started
---------------
-``TIES MD`` is a package which is intended to be used on the command line and submitted to a HPC system. In this document
+``TIES MD`` is a package for the preparation, running and analysis of binding free energy calculations. In this document
we will outline what commands should be run to calculate binding free energies. To start with any free energy calculations
we must first outline what are the expected input files to the ``TIES MD`` program.
@@ -13,22 +13,25 @@ In this tutorial we will refer to example systems which can be found in the
git clone https://github.com/UCL-CCS/TIES_MD.git
-and navigating to ``TIES_MD/TIES_MD/examples/``
+and found by navigating to ``TIES_MD/TIES_MD/examples/``
Input
------
``TIES MD`` expects a number of input files, these are two essential files, e.g. ``complex.pdb`` and ``complex.prmtop``.
These files contain information about the position, topology and parameters for the system. Currently we only support
-the ``AMBER`` based format ``prmtop`` but provide a utility to `build `_ them online. ``complex.pdb`` also
-contains the alchemical indexes denoting which atoms will appear and disappear during the simulation. There is also
-an optional input file, ``constraints.pdb``, and this contains indexes denoting which atoms, if any, are constrained
-during the pre-production simulation. This input should all be placed in a directory named build located
-where the user wishes to run the simulation. Examples of these files can be found `here `_.
+the ``AMBER`` based format ``prmtop``. ``complex.pdb`` also contains the alchemical indexes denoting which atoms will
+appear and disappear during the simulation. There is also an optional input file, ``constraints.pdb``, and this
+contains indexes denoting which atoms, if any, are constrained during the pre-production simulation. This input should
+all be placed in a directory named build located where the user wishes to run the simulation. Examples of these files
+can be found `here `_.
+
Please use a directory structure like ``study/system/ligand/thermodynamic_leg/build`` this will allow the analysis scripts to
understand the structure and perform analysis automatically. ``study``, ``system``, ``ligand`` and ``thermodynamic_leg``
can be renamed to anything but the name of the ``build`` directory is fixed. If input for novel ligand transformations is desired the
-`TIES20 `_ program can be used to generate all required inputs.
+`TIES20 `_ program can be used to generate all required inputs. ``TIES 20`` can be
+used via our online `service `_ or locally and details of how to use this will be provided
+later in these documents.
The only non standard input to ``TIES MD`` is a configuration file (``TIES.cfg``) which specifies options which the user my wish to
occasionally change. This file must be placed alongside the build directory. Here we provide an example of such a file::
@@ -61,8 +64,8 @@ occasionally change. This file must be placed alongside the build directory. Her
#How many total replicas of each window are run (we recommend at least 5).
total_reps = 5
- #How many replicas should this evocation of TIES_MD run, used for parallelisation
- reps_per_exec = 5
+ #Boolean for if we will split all replicas into separate runs. (1 for maximum parallelism)
+ split_run = 0
#Where in lambda schedule (0->1) should the electrostatic potentials begin, stop appearing.
elec_edges = 0.5, 1.0
@@ -79,13 +82,7 @@ occasionally change. This file must be placed alongside the build directory. Her
#Which column in pdb are constraints provided valid options are occupancy/beta_factor. (beta_factor is standard)
constraint_column = beta_factor
- #What type of simulation cell is used valid options are cube, truncatedOctahedron, rhombicDodecahedron or na for manual.
- box_type = na
-
- #The edge length used to compute the cube or truncatedOctahedron or rhombicDodecahedron box vectors
- edge_length = 10*unit.nanometer
-
- #If box_type is na the manually specify box vectors of this simulation, unit Angstrom.
+ #Manually specify box vectors of this simulation, unit Angstrom.
cell_basis_vec1 = 50, 0.0, 0.0
cell_basis_vec2 = 0.0, 50, 0.0
cell_basis_vec3 = 0.0, 0.0, 50
@@ -93,9 +90,9 @@ occasionally change. This file must be placed alongside the build directory. Her
#What input type is provided, only AMBER supported.
input_type = AMBER
-``total_reps`` and ``reps_per_exec`` are options which can be used to achieve simple parallelism of the simulations.
+``total_reps`` and ``split_run`` are options which can be used to achieve simple parallelism of the simulations.
For example if you wished to run a total of 5 simulations on 5 GPUs in parallel one could use the settings
-``total_reps = 5`` and ``reps_per_exec = 1``. See the :ref:`Parallelization` section for more details of how to
+``total_reps = 5`` and ``split_run = 1``. See the :ref:`Parallelization` section for more details of how to
achieve this.
The following image shows ``TIES_MD`` applied to one alchemical transformation.
@@ -113,11 +110,10 @@ energy functions of the system and for more information these settings please se
Note the option ``constraint_column`` which determines if the constraint indexes will be read from the temperature factor
or occupancy column of the constraints PDB. The alchemical indexes are always be read from the temperature factor column
-in the main PDB ``complex.pdb``. The ``edge_length`` option can be found in the ``leap.log`` file created during system
-preparation preformed by the users or ``TIES20``. ``TIES20`` will populate a TIES.cfg automatically with the correct box size.
+in the main PDB ``complex.pdb``. ``TIES20`` will populate a TIES.cfg automatically with the correct box size.
Typically a constraint file may be used during preproduction of simulations involving proteins but possibly not a small
-drug like molecule in only solvent. It will be show later in the Binding Free Energy Calculations section when and
+drug like molecule in only solvent. It will be shown later in the :ref:`Binding Free Energy Tutorial` section when and
why we use a constraints file.
Command Line
@@ -146,7 +142,7 @@ values are as follows::
A comma separated list of integers which tells TIES OpenMM which GPUs to run on. If multiple GPUs
are specified then TIES OpenMM will parallelize requested replicas over the available GPUs.
- [--node_id=0]
+ [--rep_id=0]
An int which will be used to generate the names of output files. Should be used if many independent replicas of the
same simulation are run on different nodes to ensure output is writen to unique location.
@@ -154,9 +150,6 @@ values are as follows::
Comma separated list of integers. These specify what alchemical windows the current instance of TIES OpenMM should
run. By default all windows will be run.
- [--periodic=1]
- A value of 1 sets the simulation box as periodic a value of 0 sets the simulation box as non-periodic.
-
Simulation Preparation
----------------------
@@ -199,7 +192,7 @@ to ``1``, there is therefore 6x1 = 6 total simulations to perform. If a HPC subm
ties_md --exp_name=sys_solv --windows_mask=4,5 --devices=4&
ties_md --exp_name=sys_solv --windows_mask=5,6 --devices=5&
-There are a lot of options for how these ``OpenMM`` calcualtions can be structured and parallelized with ``TIES_MD`` see our
+There are a lot of options for how these ``OpenMM`` calculations can be structured and parallelized with ``TIES_MD`` see our
:ref:`Parallelization` page for more information on this. For a ``NAMD`` calculation if the submission script requested 6 CPU
nodes each with 128 cores the run lines in the submission script might look like::
diff --git a/TIES_MD/eng_scripts/cfg_scripts/TIES.cfg b/TIES_MD/eng_scripts/cfg_scripts/TIES.cfg
index b7cf53b..ad0d2ec 100644
--- a/TIES_MD/eng_scripts/cfg_scripts/TIES.cfg
+++ b/TIES_MD/eng_scripts/cfg_scripts/TIES.cfg
@@ -46,11 +46,6 @@ constraint_column = {constraint_column}
#AMBER only supported input.
input_type = {input_type}
-#If cell basis vectors are given then below options are ignored
-#What type of simulation cell is used valid options are cube, truncatedOctahedron, rhombicDodecahedron or na for manual.
-box_type = {box_type}
-edge_length = {edge_length}
-
#simulation cell
cell_basis_vec1 = {cbv1}, {cbv2}, {cbv3}
cell_basis_vec2 = {cbv4}, {cbv5}, {cbv6}
diff --git a/TIES_MD/examples/analysis/OpenMM/FEP/ethane/zero_sum/leg1/overlap0.npy b/TIES_MD/examples/analysis/OpenMM/FEP/ethane/zero_sum/leg1/overlap0.npy
index 699a061..1c05e12 100644
Binary files a/TIES_MD/examples/analysis/OpenMM/FEP/ethane/zero_sum/leg1/overlap0.npy and b/TIES_MD/examples/analysis/OpenMM/FEP/ethane/zero_sum/leg1/overlap0.npy differ
diff --git a/TIES_MD/examples/analysis/OpenMM/FEP/ethane/zero_sum/leg1/overlap1.npy b/TIES_MD/examples/analysis/OpenMM/FEP/ethane/zero_sum/leg1/overlap1.npy
index c8af17f..1ed6c95 100644
Binary files a/TIES_MD/examples/analysis/OpenMM/FEP/ethane/zero_sum/leg1/overlap1.npy and b/TIES_MD/examples/analysis/OpenMM/FEP/ethane/zero_sum/leg1/overlap1.npy differ
diff --git a/TIES_MD/examples/analysis/OpenMM/FEP/ethane/zero_sum/leg1/overlap2.npy b/TIES_MD/examples/analysis/OpenMM/FEP/ethane/zero_sum/leg1/overlap2.npy
index b00a87d..914f6ba 100644
Binary files a/TIES_MD/examples/analysis/OpenMM/FEP/ethane/zero_sum/leg1/overlap2.npy and b/TIES_MD/examples/analysis/OpenMM/FEP/ethane/zero_sum/leg1/overlap2.npy differ
diff --git a/TIES_MD/examples/analysis/OpenMM/TI/ethane/zero_sum/leg1/dUdlam.png b/TIES_MD/examples/analysis/OpenMM/TI/ethane/zero_sum/leg1/dUdlam.png
index 5e17772..9f15bc7 100644
Binary files a/TIES_MD/examples/analysis/OpenMM/TI/ethane/zero_sum/leg1/dUdlam.png and b/TIES_MD/examples/analysis/OpenMM/TI/ethane/zero_sum/leg1/dUdlam.png differ
diff --git a/TIES_MD/examples/ethane/zero_sum/leg1/TIES.cfg b/TIES_MD/examples/ethane/zero_sum/leg1/TIES.cfg
index c1fe13f..738a39c 100644
--- a/TIES_MD/examples/ethane/zero_sum/leg1/TIES.cfg
+++ b/TIES_MD/examples/ethane/zero_sum/leg1/TIES.cfg
@@ -25,8 +25,8 @@ methods = FEP,TI
#How many total replicas of each window are run (we recommend at least 5).
total_reps = 3
-#How many replicas should each instance of TIES_MD run
-reps_per_exec = 3
+#bool, if we will split all replicas into separate runs
+split_run = 0
#Where in lambda schedule (0->1) should the electrostatic potentials begin, stop appearing.
elec_edges = 0.5,1.0
diff --git a/TIES_MD/examples/ethane_namd/zero_sum/leg1/TIES.cfg b/TIES_MD/examples/ethane_namd/zero_sum/leg1/TIES.cfg
index a5f45a1..a1ad54f 100644
--- a/TIES_MD/examples/ethane_namd/zero_sum/leg1/TIES.cfg
+++ b/TIES_MD/examples/ethane_namd/zero_sum/leg1/TIES.cfg
@@ -25,8 +25,8 @@ methods = TI
#How many total replicas of each window are run (we recommend at least 5).
total_reps = 2
-#How many replicas should each instance of TIES_MD run
-reps_per_exec = 1
+#bool, if we will split all replicas into separate runs
+split_run = 0
#Where in lambda schedule (0->1) should the electrostatic potentials begin, stop appearing.
elec_edges = 0.45, 1.0
diff --git a/TIES_MD/examples/hydration/l1-l2/leg1/TIES.cfg b/TIES_MD/examples/hydration/l1-l2/leg1/TIES.cfg
index 33a4daa..d10af4b 100644
--- a/TIES_MD/examples/hydration/l1-l2/leg1/TIES.cfg
+++ b/TIES_MD/examples/hydration/l1-l2/leg1/TIES.cfg
@@ -25,8 +25,8 @@ methods = FEP, TI
#How many total replicas of each window are run (we recommend at least 5).
total_reps = 1
-#How many replicas should each instance of TIES_MD run
-reps_per_exec = 1
+#bool, if we will split all replicas into separate runs
+split_run = 0
#Where in lambda schedule (0->1) should the electrostatic potentials begin, stop appearing.
elec_edges = 0.5, 1.0
diff --git a/TIES_MD/examples/ligand_protein/ties-l2-l1/com/TIES.cfg b/TIES_MD/examples/ligand_protein/ties-l2-l1/com/TIES.cfg
index 942b6ee..4c36bb2 100644
--- a/TIES_MD/examples/ligand_protein/ties-l2-l1/com/TIES.cfg
+++ b/TIES_MD/examples/ligand_protein/ties-l2-l1/com/TIES.cfg
@@ -25,8 +25,8 @@ methods = FEP, TI
#How many total replicas of each window are run (we recommend at least 5).
total_reps = 1
-#How many replicas should each instance of TIES_MD run
-reps_per_exec = 1
+#bool, if we will split all replicas into separate runs
+split_run = 0
#Where in lambda schedule (0->1) should the electrostatic potentials begin, stop appearing.
elec_edges = 0.5, 1.0
diff --git a/TIES_MD/examples/result.dat b/TIES_MD/examples/result.dat
index 2d7b64d..a738c64 100644
--- a/TIES_MD/examples/result.dat
+++ b/TIES_MD/examples/result.dat
@@ -1 +1 @@
-{'OpenMM_FEP': {'ethane': {'zero_sum': [0.0066543761920464265, 0.024134991408633517]}}, 'OpenMM_TI': {'ethane': {'zero_sum': [0.027367754486809615, 0.05992671201405186]}}}
+{'OpenMM_FEP': {'ethane': {'zero_sum': [0.00603386354750524, 0.02403031204797301]}}, 'OpenMM_TI': {'ethane': {'zero_sum': [0.02788597784813418, 0.05971823858786783]}}}
diff --git a/TIES_MD/tests/test_TIES.py b/TIES_MD/tests/test_TIES.py
index 8a7832a..ce19494 100644
--- a/TIES_MD/tests/test_TIES.py
+++ b/TIES_MD/tests/test_TIES.py
@@ -23,7 +23,6 @@ def test_initilization(self):
'global_lambdas': '0.0,0.5,1.0',
'constraint_file': 'cons.pdb',
'constraint_column': 'beta_factor',
- 'box_type': 'na',
'input_type': 'AMBER',
'cell_basis_vec1': '46.644591,0.0,0.0',
'cell_basis_vec2': '0.0,46.888166,0.0',
@@ -31,8 +30,8 @@ def test_initilization(self):
test_msg = '{} was not initialized correctly.'
- test_init = TIES(cwd='./', run_type='class', exp_name='sol', devices=[0], node_id='test',
- windows_mask=[0,2], periodic=True, lam=None, **args_dict)
+ test_init = TIES(cwd='./', run_type='class', exp_name='sol', devices=[0], rep_id=None,
+ windows_mask=[0, 2], periodic=True, lam=None, **args_dict)
self.assertEqual(test_init.sampling_per_window, unit.Quantity(2, unit.picoseconds),
test_msg.format('Sampling per window'))
diff --git a/TIES_MD/tests/test_alch.py b/TIES_MD/tests/test_alch.py
index 6895c49..503d82a 100644
--- a/TIES_MD/tests/test_alch.py
+++ b/TIES_MD/tests/test_alch.py
@@ -250,8 +250,8 @@ def test_run_sim(self):
system = AlchSys(cwd, exp_name, temp, pressure, None, args_dict['constraint_column'], args_dict['methods'],
basis_vec, input_type='AMBER', absolute=False, periodic=True, platform=GLOBAL_PALT)
- node_id = 0
- ids = System_ID(device_id='0', node_id=node_id)
+ rep_id = 0
+ ids = System_ID(device_id='0', rep_id=rep_id)
Lam = Lambdas([0.5, 1], [0.0, 0.5], [x/5 for x in range(0, 6)])
mask = [0, 2]
@@ -262,7 +262,7 @@ def test_run_sim(self):
#build some output dirs
for d in ['results', 'simulation', 'equilibration']:
for l in ['0.00', '0.20']:
- path = os.path.join(cwd, 'LAMBDA_{}/rep{}/{}'.format(l, node_id, d))
+ path = os.path.join(cwd, 'LAMBDA_{}/rep{}/{}'.format(l, rep_id, d))
Path(path).mkdir(parents=True, exist_ok=True)
simulate_system(ids, system, Lam, mask, cwd, niter, equili_steps, steps_per_iter)
@@ -273,7 +273,7 @@ def test_run_sim(self):
#look for output
for l in ['0.00', '0.20']:
for ans, method in zip(expected_shapes, ['TI', 'FEP']):
- path = os.path.join(cwd, 'LAMBDA_{}/rep{}/results/{}.npy'.format(l, node_id, method))
+ path = os.path.join(cwd, 'LAMBDA_{}/rep{}/results/{}.npy'.format(l, rep_id, method))
array = np.load(path)
self.assertEqual(array.shape, ans, 'Failed to generate results of expected shape')
diff --git a/TIES_MD/ties_analysis/ties_analysis.py b/TIES_MD/ties_analysis/ties_analysis.py
index 7ea1ace..75d69f2 100644
--- a/TIES_MD/ties_analysis/ties_analysis.py
+++ b/TIES_MD/ties_analysis/ties_analysis.py
@@ -108,6 +108,7 @@ def run(self):
with open('./result.dat', 'w') as f:
print(result, file=f)
+ nice_print('END')
def nice_print(string):
@@ -152,7 +153,6 @@ def make_exp(verbose=True):
with open('exp.dat', 'w') as fp:
json.dump(exp_dat, fp)
- nice_print('END')
def main():
'''
diff --git a/docs/API.html b/docs/API.html
new file mode 100644
index 0000000..6a18d9d
--- /dev/null
+++ b/docs/API.html
@@ -0,0 +1,226 @@
+
+
+
+
+
+
+ TIES MD API — TIES_MD 1.0 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
TIESMD can be used on the command line but for greater automation we also provide an API that exposes some options that
+may be routinely changed during setup.
Here we detail all the options in the API and what should be passed. The options that were previously on the command line
+can be passed into the TIES class like so:
Once the TIES class is constructed the options that were previously in TIES.cfg can now be set as attributes of the TIES
+class like so:
+
# openmm.unit is needed to set values with units
+fromopenmmimportunit
+
+#string for the molecular dynamics engine (openmm/namd2.14/namd3)
+md.engine='openmm'
+
+#Target temperature for the thermostat
+md.temperature=300.0*unit.kelvin
+
+#Target pressure for barostat
+md.pressure=1.0*unit.atmosphere
+
+#How much production sampling to run per alchemical window (4ns recommended)
+md.sampling_per_window=0.04*unit.nanosecond
+
+#How much equilibration to run per alchemical window (2ns recommended)
+md.equili_per_window=0.002*unit.nanosecond
+
+#List for which estimators to use.
+md.methods=['FEP','TI']
+
+#How many total replicas of each window are run (we recommend at least 5).
+md.total_reps=3
+
+#Boolean for if we will split all replicas into separate runs. (True for maximum parallelism)
+md.split_run=False
+
+#List for where in lambda schedule (0->1) should the electrostatic potentials begin, stop appearing.
+md.elec_edges=[0.5,1.0]
+
+#List for where in lambda schedule (0->1) should the Lennard_Jones potentials begin, stop appearing.
+md.ster_edges=[0.0,0.5]
+
+#List for the value the global controlling parameter takes in each window.
+md.global_lambdas=[0.00,0.05,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.95,1.00]
+
+#String for name of the pdb file with constraints in the build directory, i.e. 'cons.pdb'
+md.constraint_file=None
+
+#String for column in pdb are constraints provided valid options are 'occupancy'/'beta_factor'.
+md.constraint_column=None
+
+#String for what input type is provided, only AMBER supported.
+md.input_type='AMBER'
+
+#list of x, y, z floats for box vectors of this simulation, unit Angstrom.
+md.cell_basis_vec1=[34.55,0.0,0.0]
+md.cell_basis_vec2=[-11.516722937414105,32.574214501232206,0.0]
+md.cell_basis_vec3=[-11.516722937414105,-16.287105279373797,28.21009840448772]
+
+
+
Finally there are three additional options that donβt appear in TIES.cfg which are:
+
#A header to a the submission script that will be written for this job.
+md.sub_header="""#Example script for Summit OpenMM
+#BSUB -P CHM155_001
+#BSUB -W 120
+#BSUB -nnodes 13
+#BSUB -alloc_flags "gpudefault smt1"
+#BSUB -J LIGPAIR
+#BSUB -o oLIGPAIR.%J
+#BSUB -e eLIGPAIR.%J"""
+
+#The prefix for the run line of this job.
+md.pre_run_line='jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 '
+
+#the ties_md run line of this job.
+md.run_line='ties_md --config_file=$ties_dir/TIES.cfg --windows_mask=$lambda,$(expr $lambda + 1) --node_id=$i'
+
+
+
The use of these three setting will produce a submission script which looks like:
If sub_header, pre_run_line and run_line are not set TIES_MD will make a best guess for a submission script.
+Ideally only small modification should be need to run using the best guess scripts. Any tweaks that are applied to get the
+scripts working can then be passed into sub_header, pre_run_line and run_line for future system setups. For
+general idea on how to make submission scripts see HPC Submission scripts.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/HPC_submissions.html b/docs/HPC_submissions.html
index 70abdbf..4789406 100644
--- a/docs/HPC_submissions.html
+++ b/docs/HPC_submissions.html
@@ -42,6 +42,7 @@
HPC Submission scriptsHere we provide some example submission scripts for various HPC systems. TIESMD will attempt to automatically write sensible submission
scripts for NAMD2 targeting ARCHER 2 and for OpenMM targeting Summit.
In general the user can make there own script for whichever HPC or cluster they prefer. To aid with writing general
-scripts TIESMD exposes 2 options in the API called sub_header and sub_run_line. The strings passed
+scripts TIESMD exposes 3 options in the API called sub_header, pre_run_line and run_line. The strings passed
with these options will be injected into a general template for a NAMD2 or OpenMM submission. All generated
submission scripts are written to the base TIESMD directory as sub.sh. An example of this is provided in here Running.
Class as a ID for a simulation providing information for what GPU this simulation should run on and
what number repeat this simulation is out of some total number of repeats.
diff --git a/docs/_sources/API.rst.txt b/docs/_sources/API.rst.txt
new file mode 100644
index 0000000..76c1696
--- /dev/null
+++ b/docs/_sources/API.rst.txt
@@ -0,0 +1,114 @@
+TIES MD API
+===========
+
+``TIES MD`` can be used on the command line but for greater automation we also provide an API that exposes some options that
+may be routinely changed during setup.
+
+API
+---
+
+Here we detail all the options in the API and what should be passed. The options that were previously on the command line
+can be passed into the TIES class like so::
+
+ from TIES_MD import TIES
+ md = TIES(cwd='./', windows_mask=[0,1], rep_id=0, exp_name='sys_solv')
+
+Once the TIES class is constructed the options that were previously in TIES.cfg can now be set as attributes of the TIES
+class like so::
+
+ # openmm.unit is needed to set values with units
+ from openmm import unit
+
+ #string for the molecular dynamics engine (openmm/namd2.14/namd3)
+ md.engine = 'openmm'
+
+ #Target temperature for the thermostat
+ md.temperature = 300.0*unit.kelvin
+
+ #Target pressure for barostat
+ md.pressure = 1.0*unit.atmosphere
+
+ #How much production sampling to run per alchemical window (4ns recommended)
+ md.sampling_per_window = 0.04*unit.nanosecond
+
+ #How much equilibration to run per alchemical window (2ns recommended)
+ md.equili_per_window = 0.002*unit.nanosecond
+
+ #List for which estimators to use.
+ md.methods = ['FEP', 'TI']
+
+ #How many total replicas of each window are run (we recommend at least 5).
+ md.total_reps = 3
+
+ #Boolean for if we will split all replicas into separate runs. (True for maximum parallelism)
+ md.split_run = False
+
+ #List for where in lambda schedule (0->1) should the electrostatic potentials begin, stop appearing.
+ md.elec_edges = [0.5, 1.0]
+
+ #List for where in lambda schedule (0->1) should the Lennard_Jones potentials begin, stop appearing.
+ md.ster_edges = [0.0, 0.5]
+
+ #List for the value the global controlling parameter takes in each window.
+ md.global_lambdas = [0.00, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 1.00]
+
+ #String for name of the pdb file with constraints in the build directory, i.e. 'cons.pdb'
+ md.constraint_file = None
+
+ #String for column in pdb are constraints provided valid options are 'occupancy'/'beta_factor'.
+ md.constraint_column = None
+
+ #String for what input type is provided, only AMBER supported.
+ md.input_type = 'AMBER'
+
+ #list of x, y, z floats for box vectors of this simulation, unit Angstrom.
+ md.cell_basis_vec1 = [34.55, 0.0, 0.0]
+ md.cell_basis_vec2 = [-11.516722937414105, 32.574214501232206, 0.0]
+ md.cell_basis_vec3 = [-11.516722937414105, -16.287105279373797, 28.21009840448772]
+
+
+Finally there are three additional options that don't appear in TIES.cfg which are::
+
+ #A header to a the submission script that will be written for this job.
+ md.sub_header = """#Example script for Summit OpenMM
+ #BSUB -P CHM155_001
+ #BSUB -W 120
+ #BSUB -nnodes 13
+ #BSUB -alloc_flags "gpudefault smt1"
+ #BSUB -J LIGPAIR
+ #BSUB -o oLIGPAIR.%J
+ #BSUB -e eLIGPAIR.%J"""
+
+ #The prefix for the run line of this job.
+ md.pre_run_line = 'jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 '
+
+ #the ties_md run line of this job.
+ md.run_line = 'ties_md --config_file=$ties_dir/TIES.cfg --windows_mask=$lambda,$(expr $lambda + 1) --node_id=$i'
+
+The use of these three setting will produce a submission script which looks like::
+
+ #!/bin/bash
+ #Example script for Summit OpenMM
+ #BSUB -P CHM155_001
+ #BSUB -W 120
+ #BSUB -nnodes 13
+ #BSUB -alloc_flags "gpudefault smt1"
+ #BSUB -J LIGPAIR
+ #BSUB -o oLIGPAIR.%J
+ #BSUB -e eLIGPAIR.%J
+
+ export ties_dir="ties/ties-ligandA-ligandB/lig"
+ cd $ties_dir
+
+ for lambda in 0 1 2 3 4 5 6 7 8 9 10 11 12; do
+ for i in {0..5}; do
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --windows_mask=$lambda,$(expr $lambda + 1) --node_id=$i&
+ done
+ done
+ wait
+
+If ``sub_header``, ``pre_run_line`` and ``run_line`` are not set ``TIES_MD`` will make a best guess for a submission script.
+Ideally only small modification should be need to run using the best guess scripts. Any tweaks that are applied to get the
+scripts working can then be passed into ``sub_header``, ``pre_run_line`` and ``run_line`` for future system setups. For
+general idea on how to make submission scripts see :ref:`HPC Submission scripts`.
+
diff --git a/docs/_sources/HPC_submissions.rst.txt b/docs/_sources/HPC_submissions.rst.txt
index 5550b9d..9f9e3e2 100644
--- a/docs/_sources/HPC_submissions.rst.txt
+++ b/docs/_sources/HPC_submissions.rst.txt
@@ -4,7 +4,7 @@ HPC Submission scripts
Here we provide some example submission scripts for various HPC systems. ``TIES MD`` will attempt to automatically write sensible submission
scripts for ``NAMD2`` targeting `ARCHER 2 `_ and for ``OpenMM`` targeting `Summit `_.
In general the user can make there own script for whichever HPC or cluster they prefer. To aid with writing general
-scripts ``TIES MD`` exposes 2 options in the :ref:`API` called ``sub_header`` and ``sub_run_line``. The strings passed
+scripts ``TIES MD`` exposes 3 options in the :ref:`API` called ``sub_header``, ``pre_run_line`` and ``run_line``. The strings passed
with these options will be injected into a general template for a ``NAMD2`` or ``OpenMM`` submission. All generated
submission scripts are written to the base ``TIES MD`` directory as sub.sh. An example of this is provided in here :ref:`Running`.
@@ -91,12 +91,12 @@ Here we provide an example of ``TIES MD`` running with ``OpenMM`` on `Summit $ties_dir/0.out&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=1,2 --node_id="0" > $ties_dir/1.out&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=2,3 --node_id="0" > $ties_dir/2.out&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=3,4 --node_id="0" > $ties_dir/3.out&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=4,5 --node_id="0" > $ties_dir/4.out&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=5,6 --node_id="0" > $ties_dir/5.out&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=0,1 --rep_id=0 > $ties_dir/0.out&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=1,2 --rep_id=0 > $ties_dir/1.out&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=2,3 --rep_id=0 > $ties_dir/2.out&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=3,4 --rep_id=0 > $ties_dir/3.out&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=4,5 --rep_id=0 > $ties_dir/4.out&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=5,6 --rep_id=0 > $ties_dir/5.out&
wait
NAMD 3
diff --git a/docs/_sources/TIES_MD.eng_scripts.openmm_sub.rst.txt b/docs/_sources/TIES_MD.eng_scripts.openmm_sub.rst.txt
new file mode 100644
index 0000000..ebb9e7e
--- /dev/null
+++ b/docs/_sources/TIES_MD.eng_scripts.openmm_sub.rst.txt
@@ -0,0 +1,10 @@
+TIES\_MD.eng\_scripts.openmm\_sub package
+=========================================
+
+Module contents
+---------------
+
+.. automodule:: TIES_MD.eng_scripts.openmm_sub
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/docs/_sources/binding_free_energies.rst.txt b/docs/_sources/binding_free_energies.rst.txt
index 120453d..7725f25 100644
--- a/docs/_sources/binding_free_energies.rst.txt
+++ b/docs/_sources/binding_free_energies.rst.txt
@@ -35,9 +35,7 @@ hybrid ligands but also setup binding free energy calculations for the TIES prot
simulations as follows::
#ties20 imports
- from ties import Pair, Ligand
- from ties import Config
- from ties.protein import Protein
+ from ties import Pair, Config, Ligand, Protein
#Setting for system building
config = Config()
@@ -68,7 +66,7 @@ simulations as follows::
#now declare protein
config.protein = 'receptor.pdb'
config.protein_ff = 'leaprc.protein.ff14SB'
- protein = Protein(config.protein, config)
+ protein = Protein(config)
#re-prepare simulation input, now protein is declared and passed as argument com simulation is built
hybrid.prepare_inputs(protein=protein)
@@ -86,12 +84,12 @@ At this point we have prepped a simulation of one thermodynamic cycle with two l
set these legs up in the directories ``ties/ties-ligandA-ligandB/(lig/com)`` and these map to the
``system/ligand/thermodynamic_leg/`` directory structure that was discussed in the :ref:`Tutorial` section.
In ``ties/ties-ligandA-ligandB/(lig/com)`` there will be the ``build`` directory and ``TIES.cfg`` files as also seen in
-the :ref:`Tutorial`. The settings in ``TIES.cfg`` will be good for a default simulation but in general we may wish to
+the :ref:`Tutorial`. The automatic settings in ``TIES.cfg`` will be good for a default simulation but in general we may wish to
change these quickly and or write submission scripts for these simulations. To do this we can use the ``TIES_MD`` API as
follows::
#tiesMD imports
- from TIES_MD import TIES, cli
+ from TIES_MD import TIES
import os
#iterate over both legs of BFE calculation
@@ -100,11 +98,10 @@ follows::
ties_dir = os.path.join(os.getcwd(), 'ties', 'ties-ligandA-ligandB', thermo_leg)
#read the default TIES.cfg to initialize
- args_dict = cli.read_config(os.path.join(ties_dir, 'TIES.cfg'))
- md = TIES.TIES(cwd=ties_dir, exp_name='complex', **args_dict)
+ md = TIES(ties_dir)
#change some settings in TIES.cfg
- md.reps_per_exec = 1
+ md.split_run = 1
md.total_reps = 6
#inspect all the options we can configure and change
@@ -120,41 +117,20 @@ follows::
#BSUB -o oLIGPAIR.%J
#BSUB -e eLIGPAIR.%J"""
- #run line in submission scripts can also be changed
- md.sub_run_line = 'jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --windows_mask=$lambda,$(expr $lambda + 1) --node_id=$i > $ties_dir/$lambda$i.out&'
+ #Setting HPC specific elements of run line (example here is Summit)
+ md.pre_run_line = 'jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 '
+
+ #Setting ties_md part of run line
+ md.run_line = 'ties_md --config_file=$ties_dir/TIES.cfg --windows_mask=$lambda,$(expr $lambda + 1) --node_id=$i'
#setup the new simulation with changed options (also writes submission script)
md.setup()
- #must make sure the TIES.cfg on disk is updated with new settings.
- md.update_cfg()
-
-This changes the TIES.cfg options ``reps_per_exec`` to 1 and ``total_reps`` to 6. To see all configurable options the user
+This changes the TIES.cfg options ``split_run`` to 1 (True) and ``total_reps`` to 6. To see all configurable options the user
can run ``md.get_options()`` as shown above. To generate a general submission script we are modifying the
-``sub_header`` and ``sub_run_line`` internal options and these set what ``TIES_MD`` writes into the submission script. The
-settings above yield the following script::
-
- #!/bin/bash
- #Example script for Summit OpenMM
- #BSUB -P CHM155_001
- #BSUB -W 120
- #BSUB -nnodes 13
- #BSUB -alloc_flags "gpudefault smt1"
- #BSUB -J LIGPAIR
- #BSUB -o oLIGPAIR.%J
- #BSUB -e eLIGPAIR.%J
-
- export ties_dir="ties/ties-ligandA-ligandB/lig"
- cd $ties_dir
-
- for lambda in 0 1 2 3 4 5 6 7 8 9 10 11 12; do
- for i in {0..5}; do
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --windows_mask=$lambda,$(expr $lambda + 1) --node_id=$i > $ties_dir/$lambda$i.out&
- done
- done
- wait
-
-These scripts can be summited to the HPC scheduler, once they finish the last step to get a :math:`{ΞΞ G}` is analysis.
+``sub_header``, ``pre_run_line`` and ``run_line`` internal options and these set what ``TIES_MD`` writes into the
+submission script, for more details see :ref:`API`. These scripts can be summited to the HPC scheduler, once they
+finish the last step to get a :math:`{ΞΞ G}` is analysis.
BFE Analysis
------------
diff --git a/docs/_sources/parallelization.rst.txt b/docs/_sources/parallelization.rst.txt
index 4e650ac..0c98a1c 100644
--- a/docs/_sources/parallelization.rst.txt
+++ b/docs/_sources/parallelization.rst.txt
@@ -27,15 +27,13 @@ If we wanted to parallelize 3 repeats over 3 GPUs on one node we would run::
Each ``CUDA`` device will then run 8 windows of the 1 replica. Equally ths could be spit into to separate runs of ``TIES MD``
masked to only see one device::
- ties_md --exp_name=sys_solv --devices=0 --node_id=0
- ties_md --exp_name=sys_solv --devices=1 --node_id=1
- ties_md --exp_name=sys_solv --devices=3 --node_id=1
+ ties_md --exp_name=sys_solv --devices=0 --rep_id=0&
+ ties_md --exp_name=sys_solv --devices=1 --rep_id=1&
+ ties_md --exp_name=sys_solv --devices=2 --rep_id=2&
-To run in this configuration the options ``total_reps=3`` and ``reps_per_exec=1`` are set in TIES.cfg to tell ``TIES MD`` that
-there are a total of 3 replicas being run and that each execution of ``TIES MD`` should run only one. Also note we have set
-``--node_id`` to some different values for otherwise identical run lines and this ensures these parallel runs write output
-to unique locations. ``--node_id`` only needs to be set when identical replicas of a simulation are run in separate executions
-of ``TIES MD``.
+To run in this configuration the options ``total_reps=3`` and ``split_run=1`` are set in TIES.cfg to tell ``TIES MD`` that
+there are a total of 3 replicas being run and that each execution of ``TIES MD`` should run only one. ``--rep_id``
+determines which replica each instance will run. ``--rep_id`` only needs to be set when using ``split_run=1``.
If we need further parallelization over alchemical windows we can use the command line option ``--windows_mask``
this option takes a ``Python`` range (start inclusive and end exclusive) of the windows which that instance of
@@ -50,27 +48,27 @@ this option takes a ``Python`` range (start inclusive and end exclusive) of the
ties_md --exp_name=sys_solv --windows_mask=6,7 --devices=6&
ties_md --exp_name=sys_solv --windows_mask=7,8 --devices=7&
-Again using the configuration options ``total_reps=3`` and ``reps_per_exec=1`` the above runs 1 replica of each alchemical
+Now sing the configuration options ``total_reps=3`` and ``split_run=0`` the above runs 3 replica of each alchemical
window on a different GPU.
For maximum parallelism we combine parallelizing over replicas and alchemical windows. For clarity we now consider the
same example as above but now with 6 alchemical windows, 2 replica simulations and one simulation per GPU, so in
-TIES.cfg ``global_lambdas=0.0, 0.1, 0.4, 0.6, 0.9, 1.0``, ``total_reps=2`` and ``reps_per_exec=1``. To scale over multiple node
+TIES.cfg ``global_lambdas=0.0, 0.1, 0.4, 0.6, 0.9, 1.0``, ``total_reps=2`` and ``split_run=1``. To scale over multiple node
we could use the resource allocator of the HPC for example `jsrun `_
on `Summit `_. would allow us to run with 2 replicas of 6 windows as follows::
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=0,1 --node_id=0&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=1,2 --node_id=0&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=2,3 --node_id=0&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=3,4 --node_id=0&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=4,5 --node_id=0&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=5,6 --node_id=0&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=0,1 --node_id=1&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=1,2 --node_id=1&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=2,3 --node_id=1&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=3,4 --node_id=1&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=4,5 --node_id=1&
- jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=5,6 --node_id=1&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=0,1 --rep_id=0&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=1,2 --rep_id=0&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=2,3 --rep_id=0&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=3,4 --rep_id=0&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=4,5 --rep_id=0&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=5,6 --rep_id=0&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=0,1 --rep_id=1&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=1,2 --rep_id=1&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=2,3 --rep_id=1&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=3,4 --rep_id=1&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=4,5 --rep_id=1&
+ jsrun --smpiargs="off" -n 1 -a 1 -c 1 -g 1 -b packed:1 ties_md --config_file=$ties_dir/TIES.cfg --exp_name='sys_solv' --windows_mask=5,6 --rep_id=1&
Note here we do not set ``--devices`` as the masking of GPUs is handled by the resource allocator, this is not the general case.
If a resource allocator is not available an alternative method to run multiple simulations across nodes is to use a message passing interface
@@ -82,9 +80,9 @@ TIES-NAMD
---------
The parallelization of TIES in ``NAMD2`` follows the same ideas as ``OpenMM`` above. We want to run independent simulations
-for all alchemical window and replica simulations. To achieve parallelization over replica simulations there are two options.
-If in TIES.cfg ``total_reps==reps_per_exec`` the submission script that ``TIES_MD`` writes will use the ``NAMD`` option
-``+replicas X`` this makes each ``NAMD`` run ``X`` replicas and the run lines in sub.sh will look something like::
+for all alchemical window and replica simulations. If in TIES.cfg ``split_run=0`` the submission script that
+``TIES_MD`` writes will use the ``NAMD`` option ``+replicas X`` this makes each ``NAMD`` run ``X`` replicas and the
+run lines in sub.sh will look something like::
for stage in {0..3}; do
for lambda in 0.00 0.05 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 0.95 1.0; do
@@ -94,7 +92,7 @@ If in TIES.cfg ``total_reps==reps_per_exec`` the submission script that ``TIES_M
wait
done
-Alternatively if ``total_reps=!reps_per_exec`` with ``reps_per_exec=1`` the run lines will look like::
+Alternatively if ``split_run=1`` the run lines will look like::
for stage in {0..3}; do
for lambda in 0.00 0.05 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 0.95 1.0; do
@@ -112,6 +110,6 @@ we have tested up to ``+replicas 135`` on `ARCHER 2
examples the parallelism over alchemical windows is achieved in the loop over lambda.
Using ``NAMD3`` parallelization can be achieved like so (:ref:`NAMD 3`). ``NAMD`` in general has extensive options to provision
-hardware and achieve parallelism, what have outlined here is not exhaustive and we would suggest consulting the `documentation `_
-for more a more comprehensive information.
+hardware and achieve parallelism, what have outlined here is not exhaustive and we would suggest consulting
+the `documentation `_ for more a more comprehensive information.
diff --git a/docs/_sources/tutorial.rst.txt b/docs/_sources/tutorial.rst.txt
index 90f3a0d..067a124 100644
--- a/docs/_sources/tutorial.rst.txt
+++ b/docs/_sources/tutorial.rst.txt
@@ -4,7 +4,7 @@ Tutorial
Getting started
---------------
-``TIES MD`` is a package which is intended to be used on the command line and submitted to a HPC system. In this document
+``TIES MD`` is a package for the preparation, running and analysis of binding free energy calculations. In this document
we will outline what commands should be run to calculate binding free energies. To start with any free energy calculations
we must first outline what are the expected input files to the ``TIES MD`` program.
@@ -13,22 +13,25 @@ In this tutorial we will refer to example systems which can be found in the
git clone https://github.com/UCL-CCS/TIES_MD.git
-and navigating to ``TIES_MD/TIES_MD/examples/``
+and found by navigating to ``TIES_MD/TIES_MD/examples/``
Input
------
``TIES MD`` expects a number of input files, these are two essential files, e.g. ``complex.pdb`` and ``complex.prmtop``.
These files contain information about the position, topology and parameters for the system. Currently we only support
-the ``AMBER`` based format ``prmtop`` but provide a utility to `build `_ them online. ``complex.pdb`` also
-contains the alchemical indexes denoting which atoms will appear and disappear during the simulation. There is also
-an optional input file, ``constraints.pdb``, and this contains indexes denoting which atoms, if any, are constrained
-during the pre-production simulation. This input should all be placed in a directory named build located
-where the user wishes to run the simulation. Examples of these files can be found `here `_.
+the ``AMBER`` based format ``prmtop``. ``complex.pdb`` also contains the alchemical indexes denoting which atoms will
+appear and disappear during the simulation. There is also an optional input file, ``constraints.pdb``, and this
+contains indexes denoting which atoms, if any, are constrained during the pre-production simulation. This input should
+all be placed in a directory named build located where the user wishes to run the simulation. Examples of these files
+can be found `here `_.
+
Please use a directory structure like ``study/system/ligand/thermodynamic_leg/build`` this will allow the analysis scripts to
understand the structure and perform analysis automatically. ``study``, ``system``, ``ligand`` and ``thermodynamic_leg``
can be renamed to anything but the name of the ``build`` directory is fixed. If input for novel ligand transformations is desired the
-`TIES20 `_ program can be used to generate all required inputs.
+`TIES20 `_ program can be used to generate all required inputs. ``TIES 20`` can be
+used via our online `service `_ or locally and details of how to use this will be provided
+later in these documents.
The only non standard input to ``TIES MD`` is a configuration file (``TIES.cfg``) which specifies options which the user my wish to
occasionally change. This file must be placed alongside the build directory. Here we provide an example of such a file::
@@ -61,8 +64,8 @@ occasionally change. This file must be placed alongside the build directory. Her
#How many total replicas of each window are run (we recommend at least 5).
total_reps = 5
- #How many replicas should this evocation of TIES_MD run, used for parallelisation
- reps_per_exec = 5
+ #Boolean for if we will split all replicas into separate runs. (1 for maximum parallelism)
+ split_run = 0
#Where in lambda schedule (0->1) should the electrostatic potentials begin, stop appearing.
elec_edges = 0.5, 1.0
@@ -79,13 +82,7 @@ occasionally change. This file must be placed alongside the build directory. Her
#Which column in pdb are constraints provided valid options are occupancy/beta_factor. (beta_factor is standard)
constraint_column = beta_factor
- #What type of simulation cell is used valid options are cube, truncatedOctahedron, rhombicDodecahedron or na for manual.
- box_type = na
-
- #The edge length used to compute the cube or truncatedOctahedron or rhombicDodecahedron box vectors
- edge_length = 10*unit.nanometer
-
- #If box_type is na the manually specify box vectors of this simulation, unit Angstrom.
+ #Manually specify box vectors of this simulation, unit Angstrom.
cell_basis_vec1 = 50, 0.0, 0.0
cell_basis_vec2 = 0.0, 50, 0.0
cell_basis_vec3 = 0.0, 0.0, 50
@@ -93,9 +90,9 @@ occasionally change. This file must be placed alongside the build directory. Her
#What input type is provided, only AMBER supported.
input_type = AMBER
-``total_reps`` and ``reps_per_exec`` are options which can be used to achieve simple parallelism of the simulations.
+``total_reps`` and ``split_run`` are options which can be used to achieve simple parallelism of the simulations.
For example if you wished to run a total of 5 simulations on 5 GPUs in parallel one could use the settings
-``total_reps = 5`` and ``reps_per_exec = 1``. See the :ref:`Parallelization` section for more details of how to
+``total_reps = 5`` and ``split_run = 1``. See the :ref:`Parallelization` section for more details of how to
achieve this.
The following image shows ``TIES_MD`` applied to one alchemical transformation.
@@ -113,11 +110,10 @@ energy functions of the system and for more information these settings please se
Note the option ``constraint_column`` which determines if the constraint indexes will be read from the temperature factor
or occupancy column of the constraints PDB. The alchemical indexes are always be read from the temperature factor column
-in the main PDB ``complex.pdb``. The ``edge_length`` option can be found in the ``leap.log`` file created during system
-preparation preformed by the users or ``TIES20``. ``TIES20`` will populate a TIES.cfg automatically with the correct box size.
+in the main PDB ``complex.pdb``. ``TIES20`` will populate a TIES.cfg automatically with the correct box size.
Typically a constraint file may be used during preproduction of simulations involving proteins but possibly not a small
-drug like molecule in only solvent. It will be show later in the Binding Free Energy Calculations section when and
+drug like molecule in only solvent. It will be shown later in the :ref:`Binding Free Energy Tutorial` section when and
why we use a constraints file.
Command Line
@@ -146,7 +142,7 @@ values are as follows::
A comma separated list of integers which tells TIES OpenMM which GPUs to run on. If multiple GPUs
are specified then TIES OpenMM will parallelize requested replicas over the available GPUs.
- [--node_id=0]
+ [--rep_id=0]
An int which will be used to generate the names of output files. Should be used if many independent replicas of the
same simulation are run on different nodes to ensure output is writen to unique location.
@@ -154,9 +150,6 @@ values are as follows::
Comma separated list of integers. These specify what alchemical windows the current instance of TIES OpenMM should
run. By default all windows will be run.
- [--periodic=1]
- A value of 1 sets the simulation box as periodic a value of 0 sets the simulation box as non-periodic.
-
Simulation Preparation
----------------------
@@ -199,7 +192,7 @@ to ``1``, there is therefore 6x1 = 6 total simulations to perform. If a HPC subm
ties_md --exp_name=sys_solv --windows_mask=4,5 --devices=4&
ties_md --exp_name=sys_solv --windows_mask=5,6 --devices=5&
-There are a lot of options for how these ``OpenMM`` calcualtions can be structured and parallelized with ``TIES_MD`` see our
+There are a lot of options for how these ``OpenMM`` calculations can be structured and parallelized with ``TIES_MD`` see our
:ref:`Parallelization` page for more information on this. For a ``NAMD`` calculation if the submission script requested 6 CPU
nodes each with 128 cores the run lines in the submission script might look like::
diff --git a/docs/_static/pygments.css b/docs/_static/pygments.css
index 582d5c3..08bec68 100644
--- a/docs/_static/pygments.css
+++ b/docs/_static/pygments.css
@@ -5,22 +5,22 @@ td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5
span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
.highlight .hll { background-color: #ffffcc }
.highlight { background: #f8f8f8; }
-.highlight .c { color: #408080; font-style: italic } /* Comment */
+.highlight .c { color: #3D7B7B; font-style: italic } /* Comment */
.highlight .err { border: 1px solid #FF0000 } /* Error */
.highlight .k { color: #008000; font-weight: bold } /* Keyword */
.highlight .o { color: #666666 } /* Operator */
-.highlight .ch { color: #408080; font-style: italic } /* Comment.Hashbang */
-.highlight .cm { color: #408080; font-style: italic } /* Comment.Multiline */
-.highlight .cp { color: #BC7A00 } /* Comment.Preproc */
-.highlight .cpf { color: #408080; font-style: italic } /* Comment.PreprocFile */
-.highlight .c1 { color: #408080; font-style: italic } /* Comment.Single */
-.highlight .cs { color: #408080; font-style: italic } /* Comment.Special */
+.highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */
+.highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */
+.highlight .cp { color: #9C6500 } /* Comment.Preproc */
+.highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */
+.highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */
+.highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */
.highlight .gd { color: #A00000 } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
-.highlight .gr { color: #FF0000 } /* Generic.Error */
+.highlight .gr { color: #E40000 } /* Generic.Error */
.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
-.highlight .gi { color: #00A000 } /* Generic.Inserted */
-.highlight .go { color: #888888 } /* Generic.Output */
+.highlight .gi { color: #008400 } /* Generic.Inserted */
+.highlight .go { color: #717171 } /* Generic.Output */
.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
@@ -33,15 +33,15 @@ span.linenos.special { color: #000000; background-color: #ffffc0; padding-left:
.highlight .kt { color: #B00040 } /* Keyword.Type */
.highlight .m { color: #666666 } /* Literal.Number */
.highlight .s { color: #BA2121 } /* Literal.String */
-.highlight .na { color: #7D9029 } /* Name.Attribute */
+.highlight .na { color: #687822 } /* Name.Attribute */
.highlight .nb { color: #008000 } /* Name.Builtin */
.highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */
.highlight .no { color: #880000 } /* Name.Constant */
.highlight .nd { color: #AA22FF } /* Name.Decorator */
-.highlight .ni { color: #999999; font-weight: bold } /* Name.Entity */
-.highlight .ne { color: #D2413A; font-weight: bold } /* Name.Exception */
+.highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */
+.highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */
.highlight .nf { color: #0000FF } /* Name.Function */
-.highlight .nl { color: #A0A000 } /* Name.Label */
+.highlight .nl { color: #767600 } /* Name.Label */
.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */
.highlight .nv { color: #19177C } /* Name.Variable */
@@ -58,11 +58,11 @@ span.linenos.special { color: #000000; background-color: #ffffc0; padding-left:
.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */
.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */
.highlight .s2 { color: #BA2121 } /* Literal.String.Double */
-.highlight .se { color: #BB6622; font-weight: bold } /* Literal.String.Escape */
+.highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */
.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */
-.highlight .si { color: #BB6688; font-weight: bold } /* Literal.String.Interpol */
+.highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */
.highlight .sx { color: #008000 } /* Literal.String.Other */
-.highlight .sr { color: #BB6688 } /* Literal.String.Regex */
+.highlight .sr { color: #A45A77 } /* Literal.String.Regex */
.highlight .s1 { color: #BA2121 } /* Literal.String.Single */
.highlight .ss { color: #19177C } /* Literal.String.Symbol */
.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */
diff --git a/docs/binding_free_energies.html b/docs/binding_free_energies.html
index 12aba98..51b01b2 100644
--- a/docs/binding_free_energies.html
+++ b/docs/binding_free_energies.html
@@ -21,7 +21,7 @@
-
+
@@ -43,6 +43,7 @@
This changes the TIES.cfg options reps_per_exec to 1 and total_reps to 6. To see all configurable options the user
+
This changes the TIES.cfg options split_run to 1 (True) and total_reps to 6. To see all configurable options the user
can run md.get_options() as shown above. To generate a general submission script we are modifying the
-sub_header and sub_run_line internal options and these set what TIES_MD writes into the submission script. The
-settings above yield the following script:
These scripts can be summited to the HPC scheduler, once they finish the last step to get a \({ΞΞ G}\) is analysis.
+sub_header, pre_run_line and run_line internal options and these set what TIES_MD writes into the
+submission script, for more details see API. These scripts can be summited to the HPC scheduler, once they
+finish the last step to get a \({ΞΞ G}\) is analysis.
Again using the configuration options total_reps=3 and reps_per_exec=1 the above runs 1 replica of each alchemical
+
Now sing the configuration options total_reps=3 and split_run=0 the above runs 3 replica of each alchemical
window on a different GPU.
For maximum parallelism we combine parallelizing over replicas and alchemical windows. For clarity we now consider the
same example as above but now with 6 alchemical windows, 2 replica simulations and one simulation per GPU, so in
-TIES.cfg global_lambdas=0.0,0.1,0.4,0.6,0.9,1.0, total_reps=2 and reps_per_exec=1. To scale over multiple node
+TIES.cfg global_lambdas=0.0,0.1,0.4,0.6,0.9,1.0, total_reps=2 and split_run=1. To scale over multiple node
we could use the resource allocator of the HPC for example jsrun
on Summit. would allow us to run with 2 replicas of 6 windows as follows:
The parallelization of TIES in NAMD2 follows the same ideas as OpenMM above. We want to run independent simulations
-for all alchemical window and replica simulations. To achieve parallelization over replica simulations there are two options.
-If in TIES.cfg total_reps==reps_per_exec the submission script that TIES_MD writes will use the NAMD option
-+replicasX this makes each NAMD run X replicas and the run lines in sub.sh will look something like:
+for all alchemical window and replica simulations. If in TIES.cfg split_run=0 the submission script that
+TIES_MD writes will use the NAMD option +replicasX this makes each NAMD run X replicas and the
+run lines in sub.sh will look something like:
for stage in {0..3}; do
for lambda in 0.00 0.05 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 0.95 1.0; do
srun -N $nodes_per_namd -n $cpus_per_namd namd2 +replicas 5 --tclmain run$stage-replicas.conf $lambda&
@@ -170,7 +169,7 @@
TIES-NAMD+replicas135 on ARCHER 2 with no crashes. In the two above
examples the parallelism over alchemical windows is achieved in the loop over lambda.
Using NAMD3 parallelization can be achieved like so (NAMD 3). NAMD in general has extensive options to provision
-hardware and achieve parallelism, what have outlined here is not exhaustive and we would suggest consulting the documentation
-for more a more comprehensive information.
+hardware and achieve parallelism, what have outlined here is not exhaustive and we would suggest consulting
+the documentation for more a more comprehensive information.
diff --git a/docs/py-modindex.html b/docs/py-modindex.html
index dee2596..a70dd1f 100644
--- a/docs/py-modindex.html
+++ b/docs/py-modindex.html
@@ -42,6 +42,7 @@
TIESMD is a package which is intended to be used on the command line and submitted to a HPC system. In this document
+
TIESMD is a package for the preparation, running and analysis of binding free energy calculations. In this document
we will outline what commands should be run to calculate binding free energies. To start with any free energy calculations
we must first outline what are the expected input files to the TIESMD program.
In this tutorial we will refer to example systems which can be found in the
@@ -96,21 +97,23 @@
TIESMD expects a number of input files, these are two essential files, e.g. complex.pdb and complex.prmtop.
These files contain information about the position, topology and parameters for the system. Currently we only support
-the AMBER based format prmtop but provide a utility to build them online. complex.pdb also
-contains the alchemical indexes denoting which atoms will appear and disappear during the simulation. There is also
-an optional input file, constraints.pdb, and this contains indexes denoting which atoms, if any, are constrained
-during the pre-production simulation. This input should all be placed in a directory named build located
-where the user wishes to run the simulation. Examples of these files can be found here.
-Please use a directory structure like study/system/ligand/thermodynamic_leg/build this will allow the analysis scripts to
+the AMBER based format prmtop. complex.pdb also contains the alchemical indexes denoting which atoms will
+appear and disappear during the simulation. There is also an optional input file, constraints.pdb, and this
+contains indexes denoting which atoms, if any, are constrained during the pre-production simulation. This input should
+all be placed in a directory named build located where the user wishes to run the simulation. Examples of these files
+can be found here.
+
Please use a directory structure like study/system/ligand/thermodynamic_leg/build this will allow the analysis scripts to
understand the structure and perform analysis automatically. study, system, ligand and thermodynamic_leg
can be renamed to anything but the name of the build directory is fixed. If input for novel ligand transformations is desired the
-TIES20 program can be used to generate all required inputs.
+TIES20 program can be used to generate all required inputs. TIES20 can be
+used via our online service or locally and details of how to use this will be provided
+later in these documents.
The only non standard input to TIESMD is a configuration file (TIES.cfg) which specifies options which the user my wish to
occasionally change. This file must be placed alongside the build directory. Here we provide an example of such a file:
total_reps and reps_per_exec are options which can be used to achieve simple parallelism of the simulations.
+
total_reps and split_run are options which can be used to achieve simple parallelism of the simulations.
For example if you wished to run a total of 5 simulations on 5 GPUs in parallel one could use the settings
-total_reps=5 and reps_per_exec=1. See the Parallelization section for more details of how to
+total_reps=5 and split_run=1. See the Parallelization section for more details of how to
achieve this.
The following image shows TIES_MD applied to one alchemical transformation.
Note the option constraint_column which determines if the constraint indexes will be read from the temperature factor
or occupancy column of the constraints PDB. The alchemical indexes are always be read from the temperature factor column
-in the main PDB complex.pdb. The edge_length option can be found in the leap.log file created during system
-preparation preformed by the users or TIES20. TIES20 will populate a TIES.cfg automatically with the correct box size.
+in the main PDB complex.pdb. TIES20 will populate a TIES.cfg automatically with the correct box size.
Typically a constraint file may be used during preproduction of simulations involving proteins but possibly not a small
-drug like molecule in only solvent. It will be show later in the Binding Free Energy Calculations section when and
+drug like molecule in only solvent. It will be shown later in the Binding Free Energy Tutorial section when and
why we use a constraints file.
There are a lot of options for how these OpenMM calcualtions can be structured and parallelized with TIES_MD see our
+
There are a lot of options for how these OpenMM calculations can be structured and parallelized with TIES_MD see our
Parallelization page for more information on this. For a NAMD calculation if the submission script requested 6 CPU
nodes each with 128 cores the run lines in the submission script might look like: