Skip to content

Commit

Permalink
Merge pull request #232 from Jhsmit/web_plots
Browse files Browse the repository at this point in the history
Plot updates
  • Loading branch information
Jhsmit authored Oct 18, 2021
2 parents 6bdc32c + 43629d1 commit 5b21c26
Show file tree
Hide file tree
Showing 27 changed files with 2,195 additions and 743 deletions.
3 changes: 1 addition & 2 deletions .github/workflows/pytest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,7 @@ jobs:
pip install codecov
pip install pytest
pip install pytest-cov
pip install -r requirements.txt
pip install -e .
pip install -e .[web,pdf]
- name: Test with pytest
run: |
pytest --cov=./
Expand Down
91 changes: 49 additions & 42 deletions dev/gui/dev_gui_secB.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,54 +9,58 @@
import pickle
from pyhdx.web.apps import main_app
from pyhdx.web.base import DEFAULT_COLORS, STATIC_DIR
from pyhdx.web.utils import load_state
from pyhdx.web.sources import DataSource
from pyhdx.batch_processing import yaml_to_hdxm
from pyhdx.fileIO import csv_to_protein
import panel as pn
import numpy as np
from pathlib import Path
import pandas as pd
import yaml

ctrl = main_app()
directory = Path(__file__).parent
root_dir = directory.parent.parent
data_dir = root_dir / 'tests' / 'test_data'
data_dir = root_dir / 'tests' / 'test_data' / 'input'
test_dir = directory / 'test_data'

fpath_1 = root_dir / 'tests' / 'test_data' / 'ecSecB_apo.csv'
fpath_2 = root_dir / 'tests' / 'test_data' / 'ecSecB_dimer.csv'

fpaths = [fpath_1, fpath_2]
files = [p.read_bytes() for p in fpaths]
yaml_dict = yaml.safe_load(Path(data_dir / 'data_states.yaml').read_text())

# fpaths = [fpath_1, fpath_2]
# files = [p.read_bytes() for p in fpaths]
#
#
# d1 = {
# 'filenames': ['ecSecB_apo.csv', 'ecSecB_dimer.csv'],
# 'd_percentage': 95,
# 'control': ('Full deuteration control', 0.167),
# 'series_name': 'SecB WT apo',
# 'temperature': 30,
# 'temperature_unit': 'celsius',
# 'pH': 8.,
# 'c_term': 165
# }
#
# d2 = {
# 'filenames': ['ecSecB_apo.csv', 'ecSecB_dimer.csv'],
# 'd_percentage': 95,
# 'control': ('Full deuteration control', 0.167),
# 'series_name': 'SecB his dimer apo',
# 'temperature': 30,
# 'temperature_unit': 'celsius',
# 'pH': 8.,
# 'c_term': 165
# }

d1 = {
'filenames': ['ecSecB_apo.csv', 'ecSecB_dimer.csv'],
'd_percentage': 95,
'control': ('Full deuteration control', 0.167),
'series_name': 'SecB WT apo',
'temperature': 30,
'temperature_unit': 'celsius',
'pH': 8.,
'c_term': 165
}

d2 = {
'filenames': ['ecSecB_apo.csv', 'ecSecB_dimer.csv'],
'd_percentage': 95,
'control': ('Full deuteration control', 0.167),
'series_name': 'SecB his dimer apo',
'temperature': 30,
'temperature_unit': 'celsius',
'pH': 8.,
'c_term': 165
}

yaml_dicts = {'testname_123': d1, 'SecB his dimer apo': d2}
#yaml_dicts = {'testname_123': d1, 'SecB his dimer apo': d2}


def reload_dashboard():
data_objs = {k: yaml_to_hdxm(v, data_dir=data_dir) for k, v in yaml_dicts.items()}
data_objs = {k: yaml_to_hdxm(v, data_dir=data_dir) for k, v in yaml_dict.items()}
for k, v in data_objs.items():
v.metadata['name'] = k
ctrl.data_objects = data_objs
Expand Down Expand Up @@ -96,21 +100,24 @@ def reload_dashboard():


def init_dashboard():
file_input = ctrl.control_panels['PeptideFileInputControl']
file_input.input_files = files
file_input.fd_state = 'Full deuteration control'
file_input.fd_exposure = 0.167*60
file_input.pH = 8
file_input.temperature = 273.15 + 30
file_input.d_percentage = 90.

file_input.exp_state = 'SecB WT apo'
file_input.dataset_name = 'SecB_tetramer'
file_input._action_add_dataset()

file_input.exp_state = 'SecB his dimer apo'
file_input.dataset_name = 'SecB_dimer' # todo catch error duplicate name
file_input._action_add_dataset()
for k, v in yaml_dict.items():
load_state(ctrl, v, data_dir=data_dir, name=k)

# file_input = ctrl.control_panels['PeptideFileInputControl']
# file_input.input_files = files
# file_input.fd_state = 'Full deuteration control'
# file_input.fd_exposure = 0.167*60
# file_input.pH = 8
# file_input.temperature = 273.15 + 30
# file_input.d_percentage = 90.
#
# file_input.exp_state = 'SecB WT apo'
# file_input.dataset_name = 'SecB_tetramer'
# file_input._action_add_dataset()
#
# file_input.exp_state = 'SecB his dimer apo'
# file_input.dataset_name = 'SecB_dimer' # todo catch error duplicate name
# file_input._action_add_dataset()

# initial_guess = ctrl.control_panels['InitialGuessControl']
# initial_guess._action_fit()
Expand Down
71 changes: 39 additions & 32 deletions docs/examples/01_basic_usage.ipynb

Large diffs are not rendered by default.

67 changes: 0 additions & 67 deletions docs/examples/04_exporting_output.ipynb

This file was deleted.

433 changes: 433 additions & 0 deletions docs/examples/04_plot_output.ipynb

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions pyhdx/__init__.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
from .models import PeptideMasterTable, PeptideMeasurements, HDXMeasurement, Coverage, HDXMeasurementSet
from .models import PeptideMasterTable, HDXTimepoint, HDXMeasurement, Coverage, HDXMeasurementSet
from .fileIO import read_dynamx
from .fitting_torch import TorchSingleFitResult, TorchBatchFitResult
from .fitting_torch import TorchFitResult
from ._version import get_versions

try:
from .output import Output, Report
from .output import FitReport
except ModuleNotFoundError:
pass

Expand Down
3 changes: 2 additions & 1 deletion pyhdx/batch_processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@


time_factors = {"s": 1, "m": 60., "min": 60., "h": 3600, "d": 86400}
temperature_offsets = {'C': 273.15, 'celsius': 273.15, 'K': 0, 'kelvin': 0}
temperature_offsets = {'c': 273.15, 'celsius': 273.15, 'k': 0, 'kelvin': 0}


def yaml_to_hdxmset(yaml_dict, data_dir=None, **kwargs):
"""reads files according to `yaml_dict` spec from `data_dir into HDXMEasurementSet"""
Expand Down
13 changes: 13 additions & 0 deletions pyhdx/config.ini
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,16 @@ n_workers = 10
[fitting]
dtype = float64
device = cpu

[plotting]
# Sizes are in mm
ncols = 2
page_width = 160
cbar_width = 2.5
peptide_coverage_aspect = 3
peptide_mse_aspect = 3
residue_scatter_aspect = 3
deltaG_aspect = 2.5
linear_bars_aspect=30
loss_aspect = 2.5
rainbow_aspect = 4
9 changes: 9 additions & 0 deletions pyhdx/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,15 @@ def get(self, *args, **kwargs):
"""configparser get"""
return self._config.get(*args, **kwargs)

def getint(self, *args, **kwargs):
return self._config.getint(*args, **kwargs)

def getfloat(self, *args, **kwargs):
return self._config.getfloat(*args, **kwargs)

def getboolean(self, *args, **kwargs):
return self._config.getboolean(*args, **kwargs)

def set(self, *args, **kwargs):
"""configparser set"""
self._config.set(*args, **kwargs)
Expand Down
21 changes: 10 additions & 11 deletions pyhdx/fileIO.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
PEPTIDE_DTYPES = {
'start': int,
'end': int,
'_start': int,
'_end': int
}


Expand Down Expand Up @@ -196,7 +198,7 @@ def csv_to_hdxm(filepath_or_buffer, comment='#', **kwargs):
if df.columns.nlevels == 2:
hdxm_list = []
for state in df.columns.unique(level=0):
subdf = df[state].dropna(how='all')
subdf = df[state].dropna(how='all').astype(PEPTIDE_DTYPES)
m = metadata.get(state, {})
hdxm = pyhdx.models.HDXMeasurement(subdf, **m)
hdxm_list.append(hdxm)
Expand Down Expand Up @@ -343,10 +345,10 @@ def save_fitresult(output_dir, fit_result, log_lines=None):
dataframe_to_file(output_dir / 'losses.csv', fit_result.losses)
dataframe_to_file(output_dir / 'losses.txt', fit_result.losses, fmt='pprint')

if isinstance(fit_result.data_obj, pyhdx.HDXMeasurement):
fit_result.data_obj.to_file(output_dir / 'HDXMeasurement.csv')
if isinstance(fit_result.data_obj, pyhdx.HDXMeasurementSet):
fit_result.data_obj.to_file(output_dir / 'HDXMeasurements.csv')
if isinstance(fit_result.hdxm_set, pyhdx.HDXMeasurement):
fit_result.hdxm_set.to_file(output_dir / 'HDXMeasurement.csv')
if isinstance(fit_result.hdxm_set, pyhdx.HDXMeasurementSet):
fit_result.hdxm_set.to_file(output_dir / 'HDXMeasurements.csv')

loss = f'Total_loss {fit_result.total_loss:.2f}, mse_loss {fit_result.mse_loss:.2f}, reg_loss {fit_result.reg_loss:.2f}' \
f'({fit_result.regularization_percentage:.2f}%)'
Expand Down Expand Up @@ -379,12 +381,9 @@ def load_fitresult(fit_dir):
if pth.is_dir():
fit_result = csv_to_dataframe(fit_dir / 'fit_result.csv')
losses = csv_to_dataframe(fit_dir / 'losses.csv')
try:
data_obj = csv_to_hdxm(fit_dir / 'HDXMeasurement.csv')
result_klass = pyhdx.fitting_torch.TorchSingleFitResult
except FileNotFoundError:
data_obj = csv_to_hdxm(fit_dir / 'HDXMeasurements.csv')
result_klass = pyhdx.fitting_torch.TorchBatchFitResult

data_obj = csv_to_hdxm(fit_dir / 'HDXMeasurements.csv')
result_klass = pyhdx.fitting_torch.TorchFitResult
elif pth.is_file():
raise DeprecationWarning('`load_fitresult` only loads from fit result directories')
fit_result = csv_to_dataframe(fit_dir)
Expand Down
Loading

0 comments on commit 5b21c26

Please sign in to comment.