Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Region mesh redux #394

Merged
merged 4 commits into from
Oct 25, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 79 additions & 0 deletions examples/parallel/KTH_PDC/Dardel_runSnudda_inside.job
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
#!/bin/bash -l
#SBATCH --partition=main
#SBATCH -o log/runSnudda-%j-output.txt
#SBATCH -e log/runSnudda-%j-error.txt
#SBATCH -t 00:30:00
#SBATCH -J Snudda
#SBATCH -A naiss2023-5-231
#SBATCH --nodes=2
#SBATCH -n 256
#SBATCH --cpus-per-task=2
#SBATCH --mem-per-cpu=930M
#SBATCH --mail-type=ALL
module load snic-env


#..
#export OMP_STACKSIZE=128G
ulimit -s unlimited


#let NWORKERS="$SLURM_NTASKS-2"
let NWORKERS="100"

export IPNWORKERS=$NWORKERS


export IPYTHONDIR="/cfs/klemming/scratch/${USER:0:1}/$USER/.ipython"
rm -r $IPYTHONDIR
export IPYTHON_PROFILE=default
source $HOME/Snudda/snudda_env/bin/activate


#.. Start the ipcontroller
export FI_CXI_DEFAULT_VNI=$(od -vAn -N4 -tu < /dev/urandom)
srun -n 1 -N 1 -c 2 --exact --overlap --mem=0 ./ipcontroller_new.sh &


echo ">>> waiting 60s for controller to start"
sleep 60

#.. Read in CONTROLLERIP
CONTROLLERIP=$(<controller_ip.txt)


##.. Start the engines
echo ">>> starting ${IPNWORKERS} engines "
#srun -n ${IPNWORKERS} -c 2 --exact --overlap ipengine --location=${CONTROLLERIP} --profile=${IPYTHON_PROFILE} --mpi \
#--ipython-dir=${IPYTHONDIR} --timeout=30.0 --log-level=DEBUG \
#--BaseParallelApplication.verbose_crash=True --IPEngine.verbose_crash=True \
#--Kernel.stop_on_error_timeout=1.0 --IPythonKernel.stop_on_error_timeout=1.0 \
#Session.buffer_threshold=4096 Session.copy_threshold=250000 \
#Session.digest_history_size=250000 c.EngineFactory.max_heartbeat_misses=10 c.MPI.use='mpi4py' \
#1> ipe_${SLURM_JOBID}.out 2> ipe_${SLURM_JOBID}.err &

#srun -n ${IPNWORKERS} -c 2 --exact --overlap valgrind --leak-check=full --show-leak-kinds=all \
#ipengine --location=${CONTROLLERIP} --profile=${IPYTHON_PROFILE} --mpi \
#--ipython-dir=${IPYTHONDIR} --timeout=30.0 c.EngineFactory.max_heartbeat_misses=10 c.MPI.use='mpi4py' \
#1> ipe_${SLURM_JOBID}.out 2> ipe_${SLURM_JOBID}.err &

export FI_CXI_DEFAULT_VNI=$(od -vAn -N4 -tu < /dev/urandom)
srun -n ${IPNWORKERS} -c 2 -N ${SLURM_JOB_NUM_NODES} --exact --overlap --mem=0 ipengine \
--location=${CONTROLLERIP} --profile=${IPYTHON_PROFILE} --mpi \
--ipython-dir=${IPYTHONDIR} --timeout=30.0 c.EngineFactory.max_heartbeat_misses=10 c.MPI.use='mpi4py' \
1> ipe_${SLURM_JOBID}.out 2> ipe_${SLURM_JOBID}.err &


echo ">>> waiting 60s for engines to start"
sleep 30

export FI_CXI_DEFAULT_VNI=$(od -vAn -N4 -tu < /dev/urandom)
srun -n 1 -N 1 --exact --overlap --mem=0 ./Dardel_runSnudda_inside.sh


echo " "

echo "JOB END "`date` start_time_network_connect.txt

wait

120 changes: 120 additions & 0 deletions examples/parallel/KTH_PDC/Dardel_runSnudda_inside.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,120 @@
#!/bin/bash



SNUDDA_DIR=$HOME/Snudda/snudda
JOBDIR=networks/test_10k

SIMSIZE=10000

# If the BasalGangliaData directory exists, then use that for our data
#/cfs/klemming/scratch/${USER:0:1}/$USER/BasalGangliaData/data
#BasalGangliaData/Parkinson/PD0
if [[ -d "$HOME/BasalGangliaData/data" ]]; then
export SNUDDA_DATA="$HOME/BasalGangliaData/data"
echo "Setting SNUDDA_DATA to $SNUDDA_DATA"
else
echo "SNUDDA_DATA environment variable not changed (may be empty): $SNUDDA_DATA"
fi

mkdir -p $JOBDIR

echo "Dardel_runSnudda.sh should be started with srun -n 1, to only get one process"

echo "SLURM_PROCID = $SLURM_PROCID"

if [ "$SLURM_PROCID" -gt 0 ]; then
mock_string="Not main process"
else

# For debug purposes:
echo "PATH: "$PATH
echo "IPYTHONDIR: "$IPYTHONDIR
echo "PYTHONPATH: "$PYTHONPATH
echo "LD_LIBRARY_PATH: "$LD_LIBRARY_PATH

echo ">>>>>> Main process starting ipcluster"
echo

echo "Start time: " > start_time_network_connect.txt
date >> start_time_network_connect.txt

echo ">>> Init: "`date`
snudda init ${JOBDIR} --size ${SIMSIZE} --overwrite --randomseed 1234 --stayInside

if [ $? != 0 ]; then
echo "Something went wrong during init, aborting!"
ipcluster stop
exit -1
fi

# WE NOW START IPCLUSTER USING ipcontroller.sh INSTEAD...
#
# echo "SLURM_NODELIST = $SLURM_NODELIST"
# let NWORKERS="$SLURM_NTASKS - 1"
#
# echo ">>> NWORKERS " $NWORKERS
# echo ">>> Starting ipcluster `date`"
#
# #.. Start the ipcluster
# ipcluster start -n ${NWORKERS} \
# --ip='*' \
# --HeartMonitor.max_heartmonitor_misses=1000 \
# --HubFactory.registration_timeout=600 \
# --HeartMonitor.period=10000 &
#
#
# #.. Sleep to allow engines to start
# echo ">>> Wait 120s to allow engines to start"
# sleep 120 #60

echo ">>> Place: "`date`
snudda place ${JOBDIR} --verbose

if [ $? != 0 ]; then
echo "Something went wrong during placement, aborting!"
# ipcluster stop
exit -1
fi

echo ">>> Detect: "`date`
snudda detect ${JOBDIR} --hvsize 50 --parallel

if [ $? != 0 ]; then
echo "Something went wrong during detection, aborting!"
# ipcluster stop
exit -1
fi

echo ">>> Prune: "`date`
snudda prune ${JOBDIR} --parallel

if [ $? != 0 ]; then
echo "Something went wrong during pruning, aborting!"
# ipcluster stop
exit -1
fi

# Disable input generation at the moment

#echo ">>> Input: "`date`
# cp -a $SNUDDA_DIR/data/input_config/input-v10-scaled.json ${JOBDIR}/input.json
cp -a $SNUDDA_DIR/data/input_config/external-input-dSTR-scaled-v4.json ${JOBDIR}/input.json

snudda input ${JOBDIR} --parallel --time 5


#.. Shut down cluster
# ipcluster stop
#.. Shutdown ipcontroller
echo "Shutting down ipcontroller"

python ipcontroller_shutdown.py


date
#echo "JOB END "`date` start_time_network_connect.txt

echo "EXITING Dardel_runjob.sh"

fi
4 changes: 2 additions & 2 deletions snudda/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,15 +46,15 @@ def snudda_cli():
action="store_true")
init_parser.add_argument("-connectionFile", "--connectionFile", default=None, dest="connection_file",
help="Use connectivity from user specified JSON file")
init_parser.add_argument("--honorStayInside", "--stayInside", default=False, dest="stay_inside", action="store_true")
init_parser.add_argument("-randomseed", "--randomseed", "--seed", default=None, help="Random seed", type=int)
init_parser.add_argument("--profile", help="Run python cProfile", action="store_true")
init_parser.add_argument("--verbose", action="store_true")

place_parser = sub_parsers.add_parser("place")
place_parser.add_argument("path", help="Location of network")
place_parser.add_argument("-randomseed", "--randomseed", "--seed", default=None, help="Random seed", type=int)
place_parser.add_argument("--raytraceBorders", help="Ray traces for more precise mesh edge detection",
action="store_true", dest="raytrace_borders", default=False)
place_parser.add_argument("--honorStayInside", "--stayInside", dest="stay_inside", default=False, action="store_true")
place_parser.add_argument("--profile", help="Run python cProfile", action="store_true")
place_parser.add_argument("--verbose", action="store_true")
place_parser.add_argument("--h5legacy", help="Use legacy hdf5 support", action="store_true")
Expand Down
30 changes: 19 additions & 11 deletions snudda/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,12 +109,14 @@ def init_config_wrapper(self, args):
neurons_dir=args.neurons_dir,
connection_file=args.connection_file,
overwrite=args.overwrite,
random_seed=args.randomseed)
random_seed=args.randomseed,
honor_stay_inside=args.stay_inside)

def init_config(self, network_size,
snudda_data=None,
neurons_dir=None,
connection_file=None,
honor_stay_inside=True, # currently the cli.py defaults to sending False
overwrite=False,
random_seed=None):

Expand Down Expand Up @@ -144,6 +146,7 @@ def init_config(self, network_size,
neurons_dir=neurons_dir,
snudda_data=snudda_data,
config_file=config_file,
honor_stay_inside=honor_stay_inside,
random_seed=random_seed,
connection_override_file=connection_file)

Expand All @@ -161,7 +164,7 @@ def place_neurons_wrapper(self, args):
args : command line arguments from argparse

Example:
snudda place [--raytraceBorders] [--profile] [--verbose] [--h5legacy] [-parallel] path
snudda place [--profile] [--verbose] [--h5legacy] [-parallel] path

"""

Expand All @@ -173,17 +176,17 @@ def place_neurons_wrapper(self, args):
self.place_neurons(random_seed=args.randomseed,
parallel=args.parallel,
ipython_profile=args.ipython_profile,
raytrace_borders=args.raytrace_borders,
h5libver=h5libver,
verbose=args.verbose)
verbose=args.verbose,
honor_stay_inside=args.stay_inside)

def place_neurons(self,
random_seed=None,
parallel=False,
ipython_profile=None,
raytrace_borders=False,
h5libver="latest",
verbose=False):
verbose=False,
honor_stay_inside=False):

# self.networkPath = args.path
print("Placing neurons")
Expand All @@ -203,8 +206,8 @@ def place_neurons(self,
verbose=verbose,
d_view=self.d_view,
h5libver=h5libver,
raytrace_borders=raytrace_borders,
random_seed=random_seed)
random_seed=random_seed,
morphologies_stay_inside=honor_stay_inside)

sp.place()

Expand Down Expand Up @@ -567,18 +570,23 @@ def simulate_wrapper(self, args):
self.simulate(network_file=args.network_file, input_file=args.input_file,
output_file=args.output_file, snudda_data=args.snudda_data,
time=args.time,
mech_dir=args.mech_dir, neuromodulation=args.neuromodulation,
mech_dir=args.mech_dir,
neuromodulation=args.neuromodulation,
disable_synapses=args.disable_synapses,
disable_gj=args.disable_gj,
record_volt=args.record_volt,
record_all=args.record_all,
export_core_neuron=args.exportCoreNeuron,
verbose=args.verbose)

def simulate(self, network_file=None, input_file=None, output_file=None,
def simulate(self,
network_file=None,
input_file=None,
output_file=None,
snudda_data=None,
time=None,
mech_dir=None, neuromodulation=None,
mech_dir=None,
neuromodulation=None,
disable_synapses=False,
disable_gj=False,
record_volt=False,
Expand Down
3 changes: 2 additions & 1 deletion snudda/detect/detect.py
Original file line number Diff line number Diff line change
Expand Up @@ -2301,6 +2301,7 @@ def load_neuron(self, neuron_info, use_cache=True):
"""

neuron_id = neuron_info["neuronID"]

if use_cache and neuron_id in self.neuron_cache:
return self.neuron_cache[neuron_id]

Expand All @@ -2310,7 +2311,7 @@ def load_neuron(self, neuron_info, use_cache=True):
else:
morphology_path = None # Get morpholog automatically from morphology_key

print(f"morphology_path = {morphology_path}")
# print(f"morphology_path = {morphology_path}")

# Clone prototype neuron (it is centred, and not rotated)
neuron = self.prototype_neurons[neuron_info["name"]].clone(parameter_key=neuron_info["parameterKey"],
Expand Down
12 changes: 9 additions & 3 deletions snudda/init/init.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
import json
import os.path
import sys
import inspect

import numexpr
#
Expand All @@ -34,7 +35,8 @@ def __init__(self,
neurons_dir=None,
config_file=None,
random_seed=None,
connection_override_file=None):
connection_override_file=None,
honor_stay_inside=False):

"""Constructor

Expand Down Expand Up @@ -99,8 +101,12 @@ def __init__(self,

if struct_def:
for sn in struct_def:
print(f"Adding {sn} with {struct_def[sn]} neurons")
struct_func[sn](num_neurons=struct_def[sn], neurons_dir=neurons_dir)
if "stay_inside" in inspect.getargspec(struct_func[sn]).args:
print(f"Adding {sn} with {struct_def[sn]} neurons (stay_inside={honor_stay_inside})")
struct_func[sn](num_neurons=struct_def[sn], neurons_dir=neurons_dir, stay_inside=honor_stay_inside)
else:
print(f"Adding {sn} with {struct_def[sn]} neurons")
struct_func[sn](num_neurons=struct_def[sn], neurons_dir=neurons_dir)

if connection_override_file:
self.replace_connectivity(connection_file=connection_override_file)
Expand Down
Loading
Loading