Skip to content

Commit

Permalink
Merge pull request #102 from ContextLab/rev-2
Browse files Browse the repository at this point in the history
Merging revision 2 changes into master branch
  • Loading branch information
jeremymanning authored Jun 11, 2021
2 parents bec2c4b + eef3c75 commit a42c76d
Show file tree
Hide file tree
Showing 53 changed files with 2,125 additions and 1,036 deletions.
1 change: 1 addition & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
*
157 changes: 157 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,157 @@
FROM ubuntu:bionic

LABEL maintainer="Contextual Dynamics Lab <[email protected]>"

ARG DEBIAN_FRONTEND=noninteractive
ARG WORKDIR="/mnt"
ARG NOTEBOOK_IP=0.0.0.0
ARG PORT=9999

ENV LANG=C.UTF-8 \
PATH="/opt/conda/bin:$PATH" \
NOTEBOOK_DIR=$WORKDIR \
NOTEBOOK_IP=$NOTEBOOK_IP \
NOTEBOOK_PORT=$PORT

SHELL ["/bin/bash", "-o", "pipefail", "-c"]

RUN sed -i 's/^#force_color_prompt=yes/force_color_prompt=yes/' /etc/skel/.bashrc \
&& apt-get update --fix-missing \
&& apt-get install -y --no-install-recommends eatmydata \
&& eatmydata apt-get install -y --no-install-recommends \
bzip2 \
ca-certificates \
curl \
gcc \
git \
libfontconfig1-dev \
libgl1-mesa-glx -y \
mpich \
pkg-config \
sudo \
swig \
vim \
wget \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* \
&& wget --quiet https://repo.anaconda.com/miniconda/Miniconda3-4.5.4-Linux-x86_64.sh -O ~/miniconda.sh \
&& /bin/bash ~/miniconda.sh -b -p /opt/conda \
&& rm ~/miniconda.sh \
&& conda config --set auto_update_conda false \
&& conda config --set notify_outdated_conda false \
&& conda install -y \
_libgcc_mutex=0.1=main \
blas=1.0=mkl \
ca-certificates=2019.5.15=1 \
certifi=2019.6.16=py36_1 \
ipykernel=5.3.4 \
ipython=7.16.1 \
ipywidgets=7.5.1 \
jinja2=2.11.2 \
libedit=3.1.20181209=hc058e9b_0 \
libffi=3.2.1=hd88cf55_4 \
libgcc-ng=8.2.0=hdf63c60_1 \
libgfortran-ng=7.3.0=hdf63c60_0 \
libstdcxx-ng=8.2.0=hdf63c60_1 \
mkl-service=2.0.2=py36h7b6447c_0 \
mkl_fft=1.0.12=py36ha843d7b_0 \
mkl_random=1.0.2=py36hd81dba3_0 \
nbconvert=5.6.1 \
nbformat=5.0.7 \
ncurses=6.1=he6710b0_1 \
notebook=6.1.4 \
numpy-base=1.16.4=py36hde5b4d6_0 \
openssl=1.1.1c=h7b6447c_1 \
pandoc=2.10 \
pip=19.0.3=py36_0 \
prometheus_client=0.8.0 \
python=3.6.8=h0371630_0 \
pyzmq=19.0.2 \
readline=7.0=h7b6447c_5 \
setuptools=40.8.0=py36_0 \
sqlite=3.27.2=h7b6447c_0 \
terminado=0.9.1 \
tk=8.6.8=hbc83047_0 \
tornado=6.0.4 \
traitlets=4.3.3 \
wheel=0.33.1=py36_0 \
widgetsnbextension=3.5.1 \
xz=5.2.4=h14c3975_4 \
zlib=1.2.11=h7b6447c_3 \
&& conda clean -tipsy \
&& pip install \
alabaster==0.7.12 \
babel==2.6.0 \
biopython==1.74 \
chardet==3.0.4 \
citeproc-py==0.4.0 \
cycler==0.10.0 \
deepdish==0.3.6 \
docutils==0.14 \
duecredit==0.7.0 \
future==0.17.1 \
hypertools==0.5.1 \
idna==2.8 \
imagesize==1.1.0 \
intel-openmp==2019.0 \
jinja2==2.10 \
jupyter_contrib_nbextensions==0.5.1 \
kiwisolver==1.0.1 \
llvmlite==0.28.0 \
lxml==4.3.3 \
markupsafe==1.1.1 \
matplotlib==3.0.3 \
mkl==2019.0 \
mock==2.0.0 \
neurosynth==0.3.8 \
nibabel==2.5.0 \
nose==1.3.7 \
numba==0.43.1 \
numexpr==2.6.9 \
numpy==1.16.2 \
packaging==19.0 \
pandas==0.24.2 \
pbr==5.1.3 \
ply==3.11 \
ppca==0.0.4 \
pygments==2.3.1 \
pyparsing==2.3.1 \
python-dateutil==2.8.0 \
pytz==2018.9 \
requests==2.21.0 \
scikit-learn==0.19.2 \
scipy==1.2.1 \
seaborn==0.9.0 \
six==1.12.0 \
snowballstemmer==1.2.1 \
sphinx==2.0.0 \
sphinxcontrib-applehelp==1.0.1 \
sphinxcontrib-devhelp==1.0.1 \
sphinxcontrib-htmlhelp==1.0.1 \
sphinxcontrib-jsmath==1.0.1 \
sphinxcontrib-qthelp==1.0.2 \
sphinxcontrib-serializinghtml==1.1.3 \
tables==3.5.1 \
umap-learn==0.3.8 \
urllib3==1.24.1 \
git+git://github.com/lucywowen/timecorr-1.git@spot_check \
git+https://github.com/FIU-Neuro/brainconn.git \
&& rm -rf ~/.cache/pip \
&& jupyter nbextension enable --py widgetsnbextension --sys-prefix \
&& jupyter notebook --generate-config \
&& ipython profile create \
&& sed -i \
-e 's/^# c.Completer.use_jedi = True/c.Completer.use_jedi = False/' \
-e 's/^#c.Completer.use_jedi = True/c.Completer.use_jedi = False/' \
-e 's/^# c.IPCompleter.use_jedi = True/c.IPCompleter.use_jedi = False/' \
~/.ipython/profile_default/ipython_config.py \
&& mkdir -p /root/.jupyter \
&& echo "from os import getenv" > /root/.jupyter/jupyter_notebook_config.py \
&& echo "c.NotebookApp.ip = getenv(\"NOTEBOOK_IP\")" >> /root/.jupyter/jupyter_notebook_config.py \
&& echo "c.NotebookApp.port = int(getenv(\"NOTEBOOK_PORT\"))" >> /root/.jupyter/jupyter_notebook_config.py \
&& echo "c.NotebookApp.notebook_dir = getenv(\"NOTEBOOK_DIR\")" >> /root/.jupyter/jupyter_notebook_config.py \
&& echo "c.NotebookApp.open_browser = False" >> /root/.jupyter/jupyter_notebook_config.py \
&& echo "c.NotebookApp.allow_root = True" >> /root/.jupyter/jupyter_notebook_config.py \
&& echo "c.FileContentsManager.delete_to_trash = False" >> /root/.jupyter/jupyter_notebook_config.py

WORKDIR $WORKDIR
20 changes: 11 additions & 9 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,24 +16,26 @@ root
```

Content of the data folder is provided [here](https://drive.google.com/file/d/1CZYe8eyAkZFuLqfwwlKoeijgkjdW6vFs/view?usp=sharing).
We also include a Dockerfile to reproduce our computational environment. Instruction for use are below (copied and modified from [MIND](https://github.com/Summer-MIND/mind-tools) repo):
We also include a Dockerfile to reproduce our computational environment. Instruction for use are below:

## One time setup

## Docker setup
1. Install Docker on your computer using the appropriate guide below:
- [OSX](https://docs.docker.com/docker-for-mac/install/#download-docker-for-mac)
- [Windows](https://docs.docker.com/docker-for-windows/install/)
- [Ubuntu](https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/)
- [Debian](https://docs.docker.com/engine/installation/linux/docker-ce/debian/)
2. Launch Docker and adjust the preferences to allocate sufficient resources (e.g. > 4GB RAM)
3. Build the docker image by opening a terminal in this repo folder and enter `docker build -t timecorr_paper .`
4. Use the image to create a new container for the workshop
- The command below will create a new container that will map your computer's `Desktop` to `/mnt` within the container, so that location is shared between your host OS and the container. Feel free to change `Desktop` to whatever folder you prefer to share instead, but make sure to provide the full path. The command will also share port `9999` with your host computer so any jupyter notebooks launched from *within* the container will be accessible at `localhost:9999` in your web browser
- `docker run -it -p 9999:9999 --name Timecorr_paper -v ~/Desktop:/mnt timecorr_paper `
4. Use the image to create a new container
- The command below will create a new container that will map your local copy of the repository to `/mnt` within the container, so that location is shared between your host OS and the container. The command will also share port `9999` with your host computer so any jupyter notebooks launched from *within* the container will be accessible in your web browser.
- `docker run -it -p 9999:9999 --name Timecorr_paper -v $PWD:/mnt timecorr_paper `
- You should now see the `root@` prefix in your terminal, if so you've successfully created a container and are running a shell from *inside*!
5. To launch any of the notebooks: `jupyter notebook --port=9999 --no-browser --ip=0.0.0.0 --allow-root`
5. To launch any of the notebooks: `jupyter notebook`

## Using the container after setup
1. You can always fire up the container by typing the following into a terminal
- `docker start Timecorr_paper && docker attach Timecorr_paper`
- When you see the `root@` prefix, letting you know you're inside the container
2. Close a running container with `ctrl + d` from the same terminal you used to launch the container, or `docker stop Timecorr_paper` from any other terminal
- `docker start --attach Timecorr_paper`
- When you see the `root@` prefix, you're inside the container
2. Stop a running jupyter notebook server with `ctrl + c`
3. Close a running container with `ctrl + d` or `exit` from the same terminal window you used to launch the container, or `docker stop Timecorr_paper` from any other terminal window
Binary file modified code/figs/PCA_rel_ave_width_10.pdf
Binary file not shown.
Binary file modified code/figs/PCA_rel_ave_width_20.pdf
Binary file not shown.
Binary file modified code/figs/PCA_rel_ave_width_5.pdf
Binary file not shown.
Binary file modified code/figs/PCA_rel_ave_width_50.pdf
Binary file not shown.
Binary file modified code/figs/block_recovery_averaged.pdf
Binary file not shown.
Binary file modified code/figs/constant_recovery_averaged.pdf
Binary file not shown.
Binary file modified code/figs/high_order_sim_block.pdf
Binary file not shown.
Binary file modified code/figs/high_order_sim_constant.pdf
Binary file not shown.
Binary file modified code/figs/high_order_sim_ramping.pdf
Binary file not shown.
Binary file modified code/figs/high_order_sim_random.pdf
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file modified code/figs/ramping_recovery_averaged.pdf
Binary file not shown.
Binary file modified code/figs/random_recovery_averaged.pdf
Binary file not shown.
Binary file added code/figs/sim_heatmaps_features.pdf
Binary file not shown.
Binary file added code/figs/sim_heatmaps_time.pdf
Binary file not shown.
73 changes: 52 additions & 21 deletions code/notebooks/synthetic_data.ipynb

Large diffs are not rendered by default.

191 changes: 0 additions & 191 deletions code/notebooks/timecorr_higher_order.ipynb

This file was deleted.

382 changes: 382 additions & 0 deletions code/notebooks/timecorr_higher_order_search.ipynb

Large diffs are not rendered by default.

Binary file removed code/scripts/__pycache__/config.cpython-36.pyc
Binary file not shown.
3 changes: 2 additions & 1 deletion code/scripts/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@

# ====== MODIFY ONLY THE CODE BETWEEN THESE LINES ======
if (socket.gethostname() == 'Lucys-MacBook-Pro-3.local') or (socket.gethostname() == 'vertex.kiewit.dartmouth.edu') or \
(socket.gethostname() == 'vertex.local')or (socket.gethostname() == 'vpn-two-factor-general-230-141-43.dartmouth.edu'):
(socket.gethostname() == 'vertex.local')or (socket.gethostname() == 'vpn-two-factor-general-231-129-248.dartmouth.edu')\
or (socket.gethostname() == 'vpn-investment-office-231-130-116.dartmouth.edu'):
config['datadir'] = '/Users/lucyowen/Desktop/timecorr_env/timecorr_paper/pieman/data'
config['workingdir'] = '/Users/lucyowen/Desktop/timecorr_env/timecorr_paper/pieman'
config['startdir'] = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) # directory to start the job in
Expand Down
68 changes: 43 additions & 25 deletions code/scripts/higher_order_sims.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,19 +6,22 @@
from config import config
import timecorr as tc
from matplotlib import pyplot as plt
import seaborn as sns

cond= sys.argv[1]
r= sys.argv[2]
sim_function = sys.argv[1]
r = sys.argv[2] #reps

F = 4 #number of features
T = 30 #number of timepoints
F = int(sys.argv[3]) #number of features
T = int(sys.argv[4]) #number of timepoints
K = 2 #order
W = int(sys.argv[5])
wp = sys.argv[6]

fname = cond + '_' + str(F) + '_' + str(T) + '_' + str(K)
fname = sim_function

width = 20

results_dir = os.path.join(config['resultsdir'], 'higher_order_sims', cond)
results_dir = os.path.join(config['resultsdir'], 'higher_order_sims',
sim_function)

try:
if not os.path.exists(results_dir):
Expand All @@ -27,7 +30,6 @@
print(err)



def expanded_vec2mat(v):
m = tc.vec2mat(v)
x = np.zeros([v.shape[0], m.shape[0] ** 2])
Expand All @@ -36,7 +38,11 @@ def expanded_vec2mat(v):
return x


laplace = {'name': 'Laplace', 'weights': tc.laplace_weights, 'params': {'scale': width}}
laplace = {'name': 'Laplace', 'weights': tc.laplace_weights, 'params': {'scale': W}}
gaussian = {'name': 'Gaussian', 'weights': tc.gaussian_weights, 'params': {'var': W}}
mexican_hat = {'name': 'Mexican hat', 'weights': tc.mexican_hat_weights, 'params': {'sigma': W}}

weights_paramter = eval(wp)

eye_params = {}

Expand All @@ -50,25 +56,28 @@ def generate_templates(order=1, **kwargs):
T = kwargs['T']
templates = []
for n in range(order - 1):
print(n)
templates.append(next_template)

expanded_corrmats = tc.vec2mat(next_template)
K2 = expanded_corrmats.shape[0] ** 2
next_template = np.zeros([K2, K2, T])
for t in range(T):
x = np.atleast_2d(expanded_corrmats[:, :, t].ravel())
next_template[:, :, t] = x * x.T
x = expanded_corrmats[:, :, t]
next_template[:, :, t] = np.kron(x, x)
next_template = tc.mat2vec(next_template)
templates.append(next_template)
return templates



def generate_data(templates):
order = len(templates) + 1
adjusted_templates = [templates[-1]] # generate adjusted templates in reverse order
next_corrmats = adjusted_templates[-1]

for n in range(order - 1, 1, -1):
print(n)
corrmats = tc.vec2mat(next_corrmats)
K = corrmats.shape[0]
sK = int(np.sqrt(K))
Expand All @@ -77,6 +86,7 @@ def generate_data(templates):
draws = np.zeros([sK, sK, T])
means = tc.vec2mat(templates[n - 2])


for t in range(T):
draws[:, :, t] = np.reshape(np.random.multivariate_normal(means[:, :, t].ravel(), corrmats[:, :, t]),
[sK, sK])
Expand All @@ -102,7 +112,7 @@ def generate_data(templates):

recovery_performance_all = pd.DataFrame()

templates = generate_templates(order=K, S=1, T=T, K=F, datagen=cond)
templates = generate_templates(order=K, S=1, T=T, K=F, datagen=sim_function)

data, adjusted_templates = generate_data(templates)

Expand All @@ -112,33 +122,41 @@ def generate_data(templates):
recovery_performance = pd.DataFrame(index=np.arange(T), columns=np.arange(1, K+1))
recovery_performance.index.name = 'time'
recovery_performance.columns.name = 'order'

next_data = data
recovered_corrs_raw = []
recovered_corrs_smooth = []

for k in np.arange(1, K+1):
next_recovered_smooth = tc.timecorr(next_data, weights_function=laplace['weights'], weights_params=laplace['params'])
next_recovered_smooth = tc.timecorr(next_data, weights_function=weights_paramter['weights'],
weights_params=weights_paramter['params'])

next_recovered_raw = tc.timecorr(next_data, weights_function=eye_weights, weights_params=eye_params)
recovered_corrs_smooth.append(next_recovered_smooth)
F_new = get_f(next_recovered_smooth.shape[1])
for t in np.arange(T):
recovery_performance.loc[t, k] = np.corrcoef(templates[k-1][t, F_new:], next_recovered_smooth[t, F_new:])[0, 1]

next_data = expanded_vec2mat(next_recovered_raw)

# recovery_performance.columns = [str(x + 1) for x in np.arange(K)]
#
# recovery_performance['iteration'] = int(r)

#recovery_performance_all = recovery_performance_all.append(recovery_performance)
next_data = expanded_vec2mat(next_recovered_raw)

recovery_performance.columns = [str(x + 1) for x in np.arange(K)]
recovery_performance['iteration'] = int(r)
recovery_performance_all = recovery_performance_all.append(recovery_performance)

print(recovery_performance)
plt.clf()
plt.plot(recovery_performance['1'])
plt.plot(recovery_performance['2'])
plt.show()

recovery_performance.to_csv(save_file + '.csv')
plt.savefig(results_dir + '2')

# if not os.path.isfile(save_file + '.csv'):
# recovery_performance.to_csv(save_file + '.csv')
# else:
# append_iter = pd.read_csv(save_file + '.csv', index_col=0)
# append_iter = append_iter.append(recovery_performance)
# append_iter.to_csv(save_file + '.csv')

if not os.path.isfile(save_file + '.csv'):
recovery_performance.to_csv(save_file + '.csv')
else:
append_iter = pd.read_csv(save_file + '.csv', index_col=0)
append_iter = append_iter.append(recovery_performance)
append_iter.to_csv(save_file + '.csv')
Loading

0 comments on commit a42c76d

Please sign in to comment.