From 211ad042e5529a17255745ef3ac47f216d744624 Mon Sep 17 00:00:00 2001 From: "tfalque.ext" Date: Tue, 25 Jan 2022 18:11:46 +0100 Subject: [PATCH] :ambulance: fixes problem with dependencies --- docs/wallet.md | 2 +- metrics/__init__.py | 2 +- metrics/wallet/analysis.py | 29 ++++++++++++++++------------- setup.py | 13 +++++++++++-- 4 files changed, 29 insertions(+), 17 deletions(-) diff --git a/docs/wallet.md b/docs/wallet.md index 9609bcc..7bcba5d 100644 --- a/docs/wallet.md +++ b/docs/wallet.md @@ -752,7 +752,7 @@ To make an optimality analysis, the user needs to parse and get back some needed - the usual `input`, `experiment_ware`, `cpu_time`, `timeout` columns - the additional columns: - `bound_list` is the list of all found bounds during an experiment - - `bound_list` is the corresponding timestamp of each bound of bound_list + - `timestamp_list` is the corresponding timestamp of each bound of bound_list - `objective` is equal to `min` for minization problem else `max` - `status` informs the final status of the experiment (`COMPLETE` or `INCOMPLETE`) - `best_bound` is the final found bound before the end of the resolution diff --git a/metrics/__init__.py b/metrics/__init__.py index 55b76eb..b03dd8d 100644 --- a/metrics/__init__.py +++ b/metrics/__init__.py @@ -35,7 +35,7 @@ __summary__ = 'rEproducible sofTware peRformance analysIs in perfeCt Simplicity' __keywords__ = 'reproducible software performance analysis' __uri__ = 'https://github.com/crillab/metrics' -__version__ = '1.1' +__version__ = '1.1.1' __author__ = 'Thibault Falque, Romain Wallon, Hugues Wattez' __email__ = 'thibault.falque@exakis-nelite.com, wallon@cril.fr, wattez@cril.fr' diff --git a/metrics/wallet/analysis.py b/metrics/wallet/analysis.py index 5e373ff..0846316 100644 --- a/metrics/wallet/analysis.py +++ b/metrics/wallet/analysis.py @@ -79,7 +79,7 @@ def export_data_frame(data_frame, output=None, commas_for_number=False, dollars_ buf=file, escape=False, index_names=False, - #bold_rows=True, + # bold_rows=True, **kwargs ) elif ext == 'csv': @@ -224,7 +224,7 @@ def check_success(self, is_success): self._data_frame[USER_SUCCESS_COL] = self._data_frame.apply(is_success, axis=1) self._check_global_success() - def check_missing_experiments(self, inputs: List[str]=None, experiment_wares: List[str]=None): + def check_missing_experiments(self, inputs: List[str] = None, experiment_wares: List[str] = None): """ Check missing experiments @param inputs: the entire list of inputs (complete the missing ones) @@ -629,11 +629,12 @@ def _make_list(l): return [l] -def default_explode(df, samp): +def default_explode(df, samp, objective): d = df.iloc[0].to_dict() times = _make_list(d.pop('timestamp_list')) bounds = _make_list(d.pop('bound_list')) - if d.pop('objective') == 'min': + + if objective(d.pop('objective',None)): bounds = [-x for x in bounds] if len(bounds) != len(times): @@ -790,7 +791,7 @@ class OptiAnalysis(BasicAnalysis): """ def __init__(self, input_file: str = None, data_frame: DataFrame = None, - basic_analysis: BasicAnalysis = None, func=default_explode, samp=None): + basic_analysis: BasicAnalysis = None, func=default_explode, samp=None, objective=lambda s: s == 'min'): """ Conctructs an optimality analysis by giving an 'input_file' to parse the campaign logs OR a 'data_frame' of already build analysis OR a 'basic_analysis' with the necessary data to @@ -801,24 +802,26 @@ def __init__(self, input_file: str = None, data_frame: DataFrame = None, @param func: the function that permits to explode the current experiments (a default one is given) @param samp: the sampling times to apply on the exploding function + @param objective a lambda to find the objective direction """ if input_file is not None or basic_analysis is not None: super().__init__( input_file, None if basic_analysis is None else basic_analysis.data_frame ) - self._explode_experiments(func, samp) + self._explode_experiments(func, samp, objective) elif data_frame is not None: self._data_frame = data_frame else: raise AttributeError('input_file or data_frame or basic_analysis needs to be given.') - def _explode_experiments(self, func, samp=None): + def _explode_experiments(self, func, samp, objective): self.apply_on_groupby( by=[EXPERIMENT_INPUT, EXPERIMENT_XP_WARE], func=lambda df: func( df, - [self.data_frame.timeout.max()] if samp is None else samp + [self.data_frame.timeout.max()] if samp is None else samp, + objective ), inplace=True ) @@ -875,11 +878,11 @@ def _contribution_agg(sli: pd.DataFrame): if first[SUCCESS_COL]: return pd.Series([ - first[EXPERIMENT_XP_WARE], - first[EXPERIMENT_CPU_TIME], - not second[SUCCESS_COL], - second[EXPERIMENT_CPU_TIME] if second[SUCCESS_COL] else 1000000 - ], + first[EXPERIMENT_XP_WARE], + first[EXPERIMENT_CPU_TIME], + not second[SUCCESS_COL], + second[EXPERIMENT_CPU_TIME] if second[SUCCESS_COL] else 1000000 + ], index=index ) diff --git a/setup.py b/setup.py index 7e57553..d044fd9 100644 --- a/setup.py +++ b/setup.py @@ -49,8 +49,17 @@ def requirements() -> List[str]: :return: The dependencies of Metrics. """ - with open('requirements.txt') as file: - return [line.rstrip() for line in file] + return [ + 'crillab-autograph', + 'dash-bootstrap-components', + 'deprecated', + 'jsonpickle', + 'myst-parser', + 'pandas', + 'pyparsing', + 'tenacity', + 'pyyaml', + ] setup(