From 77ebe8772a3501055df66dd3f957bc31c15c4759 Mon Sep 17 00:00:00 2001 From: Johannes Mueller Date: Wed, 9 Oct 2024 17:51:03 +0200 Subject: [PATCH 1/5] Update changelog Signed-off-by: Johannes Mueller --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 774246d7..3e17c0e5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ In this file noteworthy changes of new releases of pyLife are documented since ### Improvements * Sanitize checks for Wöhler analysis (#108) +* Error messages when odbclient gets unsupported element types (#64) ### Bugfixes From 50db0fc81e8f03056cc676275379c731facca7f7 Mon Sep 17 00:00:00 2001 From: Johannes Mueller Date: Thu, 10 Oct 2024 13:43:54 +0200 Subject: [PATCH 2/5] Load collective to histogram WIP Signed-off-by: Johannes Mueller --- CHANGELOG.md | 5 + .../stress/collective/load_collective.py | 191 ++++++++++++++++- .../stress/collective/test_load_collective.py | 195 ++++++++++++++++-- 3 files changed, 368 insertions(+), 23 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3e17c0e5..70834f5d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,10 +5,15 @@ In this file noteworthy changes of new releases of pyLife are documented since ## unreleased +### New features + +* New method `LoadCollective.histogram()` (#107) + ### Improvements * Sanitize checks for Wöhler analysis (#108) * Error messages when odbclient gets unsupported element types (#64) +* Improved documentation ### Bugfixes diff --git a/src/pylife/stress/collective/load_collective.py b/src/pylife/stress/collective/load_collective.py index ade4c6d5..6c9111f2 100644 --- a/src/pylife/stress/collective/load_collective.py +++ b/src/pylife/stress/collective/load_collective.py @@ -169,34 +169,209 @@ def shift(self, diffs): return obj.load_collective def range_histogram(self, bins, axis=None): - """Calculate the histogram of range values along a given axis. + """Calculate the histogram of cycles for range intervals along a given axis. Parameters ---------- bins : int, sequence of scalars or pd.IntervalIndex The bins of the histogram to be calculated + axis : str, optional + The index axis along which the histogram is calculated. If missing + the histogram is calculated over the whole collective. + + Returns ------- range histogram : :class:`~pylife.pylife.stress.LoadHistogram` - axis : str, optional - The index axis along which the histogram is calculated. If missing - the histogram is calculated over the whole collective. + + Note + ---- + This resulting histogram does not contain any information on the mean + stress. Neither does it perform any kind of mean stress transformation + + See also + -------- + histogram + + Examples + -------- + Calculate a range histogram of a simple load collective + + >>> df = pd.DataFrame( + ... {'range': [1.0, 2.0, 1.0, 2.0, 1.0], 'mean': [0, 0, 0, 0, 0]}, + ... columns=['range', 'mean'], + ... ) + >>> df.load_collective.range_histogram([0, 1, 2, 3]).to_pandas() + range + (0, 1] 0 + (1, 2] 3 + (2, 3] 2 + Name: cycles, dtype: int64 + + Calculate a range histogram of a load collective collection for + multiple nodes. The axis along which to aggregate the histogram is + given as ``cycle_number``. + + >>> element_idx = pd.Index([10, 20, 30], name='element_id') + >>> cycle_idx = pd.Index([0, 1, 2], name='cycle_number') + >>> index = pd.MultiIndex.from_product((element_idx, cycle_idx)) + + >>> df = pd.DataFrame({ + ... 'range': [1., 2., 2., 0., 1., 2., 1., 1., 2.], + ... 'mean': [0, 0, 0, 0, 0, 0, 0, 0, 0] + ... }, columns=['range', 'mean'], index=index) + + >>> h = df.load_collective.range_histogram([0, 1, 2, 3], 'cycle_number') + >>> h.to_pandas() + element_id range + 10 (0, 1] 0 + (1, 2] 1 + (2, 3] 2 + 20 (0, 1] 1 + (1, 2] 1 + (2, 3] 1 + 30 (0, 1] 0 + (1, 2] 2 + (2, 3] 1 + Name: cycles, dtype: int64 + """ def make_histogram(group): cycles, intervals = np.histogram(group * 2., bins) idx = pd.IntervalIndex.from_breaks(intervals, name='range') return pd.Series(cycles, index=idx, name='cycles') - if isinstance(bins, pd.IntervalIndex): + if isinstance(bins, pd.IntervalIndex) or isinstance(bins, pd.arrays.IntervalArray): bins = np.append(bins.left[0], bins.right) if axis is None: return LoadHistogram(make_histogram(self.amplitude)) - result = pd.Series(self.amplitude - .groupby(self._obj.index.droplevel(axis).names) - .apply(make_histogram), name='cycles') + result = pd.Series( + self.amplitude.groupby(self._levels_from_axis(axis)).apply( + make_histogram + ), + name='cycles', + ) return LoadHistogram(result) + + def histogram(self, bins, axis=None): + """Calculate the histogram of cycles along a given axis. + + Parameters + ---------- + bins : int, sequence of scalars or pd.IntervalIndex + The bins of the histogram to be calculated + + axis : str, optional + The index axis along which the histogram is calculated. If missing + the histogram is calculated over the whole collective. + + Returns + ------- + range histogram : :class:`~pylife.pylife.stress.LoadHistogram` + + See also + -------- + range_histogram + + Examples + -------- + Calculate a range histogram of a simple load collective + + >>> df = pd.DataFrame( + ... {'range': [1.0, 2.0, 1.0, 2.0, 1.0], 'mean': [0.5, 1.5, 1.0, 1.5, 0.5]}, + ... columns=['range', 'mean'], + ... ) + >>> df.load_collective.histogram([0, 1, 2, 3]).to_pandas() + range mean + (0, 1] (0, 1] 0.0 + (1, 2] 0.0 + (2, 3] 0.0 + (1, 2] (0, 1] 2.0 + (1, 2] 1.0 + (2, 3] 0.0 + (2, 3] (0, 1] 0.0 + (1, 2] 2.0 + (2, 3] 0.0 + Name: cycles, dtype: float64 + + Calculate a range histogram of a load collective collection for + multiple nodes. The axis along which to aggregate the histogram is + given as ``cycle_number``. + + >>> element_idx = pd.Index([10, 20], name='element_id') + >>> cycle_idx = pd.Index([0, 1, 2], name='cycle_number') + >>> index = pd.MultiIndex.from_product((element_idx, cycle_idx)) + + >>> df = pd.DataFrame({ + ... 'range': [1., 2., 2., 0., 1., 2.], + ... 'mean': [0.5, 1.0, 1.0, 0.0, 1.0, 1.5] + ... }, columns=['range', 'mean'], index=index) + + >>> h = df.load_collective.histogram([0, 1, 2, 3], 'cycle_number') + >>> h.to_pandas() + element_id range mean + 10 (0, 1] (0, 1] 0.0 + (1, 2] 0.0 + (2, 3] 0.0 + (1, 2] (0, 1] 1.0 + (1, 2] 0.0 + (2, 3] 0.0 + (2, 3] (0, 1] 0.0 + (1, 2] 2.0 + (2, 3] 0.0 + 20 (0, 1] (0, 1] 1.0 + (1, 2] 0.0 + (2, 3] 0.0 + (1, 2] (0, 1] 0.0 + (1, 2] 1.0 + (2, 3] 0.0 + (2, 3] (0, 1] 0.0 + (1, 2] 1.0 + (2, 3] 0.0 + Name: cycles, dtype: float64 + + """ + def make_histogram(group): + cycles, range_bins, mean_bins = np.histogram2d( + group["range"], group["meanstress"], bins + ) + + return pd.Series( + cycles.ravel(), + name="cycles", + index=pd.MultiIndex.from_product( + [ + pd.IntervalIndex.from_breaks(range_bins), + pd.IntervalIndex.from_breaks(mean_bins), + ], + names=["range", "mean"], + ), + ) + + range_mean = pd.DataFrame( + {'range': self.amplitude * 2, 'meanstress': self.meanstress}, + index=self._obj.index, + ) + + if isinstance(bins, pd.IntervalIndex) or isinstance(bins, pd.arrays.IntervalArray): + bins = np.append(bins.left[0], bins.right) + + if axis is None: + return LoadHistogram(make_histogram(range_mean)) + + result = pd.Series( + range_mean.groupby(self._levels_from_axis(axis)) + .apply(make_histogram) + .stack(['range', 'mean'], future_stack=True), + name="cycles", + ) + + return LoadHistogram(result) + + def _levels_from_axis(self, axis): + return [lv for lv in self._obj.index.names if lv not in [axis] and lv is not None] diff --git a/tests/stress/collective/test_load_collective.py b/tests/stress/collective/test_load_collective.py index 917e4ebe..1bc7fcf4 100644 --- a/tests/stress/collective/test_load_collective.py +++ b/tests/stress/collective/test_load_collective.py @@ -382,13 +382,15 @@ def test_load_collective_mean_range_shift_scalar(df, expected_amplitude, expecte ]) def test_load_collective_range_histogram_alter_bins(bins, expected_index_tuples, expected_data): df = pd.DataFrame({ - 'range': [1., 2., 1.], - 'mean': [0, 0, 0] + 'range': [1.0, 2.0, 1.0], + 'mean': [0.0, 0.0, 0.0] }, columns=['range', 'mean']) - expected = pd.Series(expected_data, - name='cycles', - index=pd.IntervalIndex.from_tuples(expected_index_tuples, name='range')) + expected = pd.Series( + expected_data, + name='cycles', + index=pd.IntervalIndex.from_tuples(expected_index_tuples, name='range'), + ) result = df.load_collective.range_histogram(bins) @@ -396,10 +398,10 @@ def test_load_collective_range_histogram_alter_bins(bins, expected_index_tuples, def test_load_collective_range_histogram_alter_ranges(): - df = pd.DataFrame({ - 'range': [1., 2., 1., 2., 1], - 'mean': [0, 0, 0, 0, 0] - }, columns=['range', 'mean']) + df = pd.DataFrame( + {'range': [1.0, 2.0, 1.0, 2.0, 1], 'mean': [0.0, 0.0, 0.0, 0.0, 0.0]}, + columns=['range', 'mean'], + ) expected_index = pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (2, 3)], name='range') expected = pd.Series([0, 3, 2], name='cycles', index=expected_index) @@ -411,8 +413,8 @@ def test_load_collective_range_histogram_alter_ranges(): def test_load_collective_range_histogram_interval_index(): df = pd.DataFrame({ - 'range': [1., 2., 1., 2., 1], - 'mean': [0, 0, 0, 0, 0] + 'range': [1.0, 2.0, 1.0, 2.0, 1.0], + 'mean': [0.0, 0.0, 0.0, 0.0, 0.0] }, columns=['range', 'mean']) expected_index = pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (2, 3)], name='range') @@ -423,19 +425,34 @@ def test_load_collective_range_histogram_interval_index(): pd.testing.assert_series_equal(result.to_pandas(), expected) +def test_load_collective_range_histogram_interval_arrays(): + df = pd.DataFrame({ + 'range': [1.0, 2.0, 1.0, 2.0, 1.0], + 'mean': [0.0, 0.0, 0.0, 0.0, 0.0] + }, columns=['range', 'mean']) + + expected_index = pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (2, 3)], name='range') + expected = pd.Series([0, 3, 2], name='cycles', index=expected_index) + + intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2), (2, 3)]) + result = df.load_collective.range_histogram(intervals) + + pd.testing.assert_series_equal(result.to_pandas(), expected) + + def test_load_collective_range_histogram_unnested_grouped(): element_idx = pd.Index([10, 20, 30], name='element_id') cycle_idx = pd.Index([0, 1, 2], name='cycle_number') idx = pd.MultiIndex.from_product((element_idx, cycle_idx)) df = pd.DataFrame({ - 'range': [1., 2., 1., 2., 1., 2., 1., 1., 1], + 'range': [0., 1., 2., 0., 1., 2., 0., 1., 2.], 'mean': [0, 0, 0, 0, 0, 0, 0, 0, 0] }, columns=['range', 'mean'], index=idx) expected_intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (2, 3)], name='range') expected_index = pd.MultiIndex.from_product([element_idx, expected_intervals]) - expected = pd.Series([0, 2, 1, 0, 1, 2, 0, 3, 0], name='cycles', index=expected_index) + expected = pd.Series(1, name='cycles', index=expected_index) result = df.load_collective.range_histogram([0, 1, 2, 3], 'cycle_number') @@ -449,8 +466,8 @@ def test_load_collective_range_histogram_nested_grouped(): idx = pd.MultiIndex.from_product((element_idx, node_idx, cycle_idx)) df = pd.DataFrame({ - 'range': [1., 2., 1., 2., 1., 2., 1., 1.], - 'mean': [0, 0, 0, 0, 0, 0, 0, 0] + 'range': [1.0, 2.0, 1.0, 2.0, 1.0, 2.0, 1.0, 1.0], + 'mean': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] }, columns=['range', 'mean'], index=idx) expected_intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (2, 3)], name='range') @@ -483,3 +500,151 @@ def test_load_collective_strange_shift(): }, index=expected_index) pd.testing.assert_frame_equal(result, expected) + + +# GH-107 +@pytest.mark.parametrize('bins, expected_index_tuples, expected_data', [ + ([0, 1, 2, 3], [(0, 1), (1, 2), (2, 3)], [0, 0, 0, 2, 0, 0, 0, 1, 0]), + ([0, 2, 4], [(0, 2), (2, 4)], [2, 0, 1, 0]) +]) +def test_load_collective_histogram_alter_bins(bins, expected_index_tuples, expected_data): + df = pd.DataFrame( + {'range': [1.5, 2.5, 1.5], 'mean': [0.75, 1.25, 0.75]}, columns=['range', 'mean'] + ) + + expected_intervals = pd.IntervalIndex.from_tuples(expected_index_tuples) + expected = pd.Series( + expected_data, + name='cycles', + index=pd.MultiIndex.from_product( + [expected_intervals, expected_intervals], names=['range', 'mean'] + ), + dtype=np.float64 + ) + + result = df.load_collective.histogram(bins) + + pd.testing.assert_series_equal(result.to_pandas(), expected) + + +# GH-107 +def test_load_collective_histogram_alter_ranges(): + df = pd.DataFrame({ + 'range': [1., 2., 1., 2., 1], + 'mean': [0.5, 1.0, 0.5, 1.0, 0.5] + }, columns=['range', 'mean']) + + expected_intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (2, 3)]) + expected = pd.Series( + [0.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 2.0, 0.0], + name='cycles', + index=pd.MultiIndex.from_product( + [expected_intervals, expected_intervals], names=['range', 'mean'] + ), + ) + + result = df.load_collective.histogram([0, 1, 2, 3]) + + pd.testing.assert_series_equal(result.to_pandas(), expected) + + +# GH-107 +def test_load_collective_histogram_interval_index(): + df = pd.DataFrame({ + 'range': [1., 2., 1., 2., 1], + 'mean': [0.5, 1.0, 0.5, 1.0, 0.5] + }, columns=['range', 'mean']) + + expected_intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (2, 3)]) + expected = pd.Series( + [0.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 2.0, 0.0], + name='cycles', + index=pd.MultiIndex.from_product( + [expected_intervals, expected_intervals], names=['range', 'mean'] + ), + ) + + result = df.load_collective.histogram(expected_intervals) + + pd.testing.assert_series_equal(result.to_pandas(), expected) + + +# GH-107 +def test_load_collective_histogram_interval_array(): + df = pd.DataFrame({ + 'range': [1., 2., 1., 2., 1], + 'mean': [0.5, 1.0, 0.5, 1.0, 0.5] + }, columns=['range', 'mean']) + + expected_intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (2, 3)]) + expected = pd.Series( + [0.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 2.0, 0.0], + name='cycles', + index=pd.MultiIndex.from_product( + [expected_intervals, expected_intervals], names=['range', 'mean'] + ), + ) + + intervals = pd.arrays.IntervalArray.from_tuples([(0, 1), (1, 2), (2, 3)]) + result = df.load_collective.histogram(intervals) + + pd.testing.assert_series_equal(result.to_pandas(), expected) + + +# GH-107 +def test_load_collective_histogram_unnested_grouped(): + element_idx = pd.Index([10, 20, 30], name='element_id') + cycle_idx = pd.Index([0, 1, 2], name='cycle_number') + idx = pd.MultiIndex.from_product((element_idx, cycle_idx)) + + df = pd.DataFrame({ + 'range': [1., 2., 1., 2., 1., 2., 1., 1., 1], + 'mean': [0, 0, 0, 0, 0, 0, 0, 0, 0] + }, columns=['range', 'mean'], index=idx) + + expected_intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (2, 3)]) + expected_ranges = expected_intervals.set_names(['range']) + expected_means = expected_intervals.set_names(['mean']) + + expected_index = pd.MultiIndex.from_product( + [element_idx, expected_ranges, expected_means] + ) + expected = pd.Series( + [0, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0], + name='cycles', + index=expected_index, + dtype=np.float64, + ) + + result = df.load_collective.histogram([0, 1, 2, 3], 'cycle_number') + pd.testing.assert_series_equal(result.to_pandas(), expected) + + +# GH-107 +def test_load_collective_histogram_nested_grouped(): + element_idx = pd.Index([10, 20], name='element_id') + node_idx = pd.Index([100, 101], name='node_id') + cycle_idx = pd.Index([0, 1], name='cycle_number') + idx = pd.MultiIndex.from_product((element_idx, node_idx, cycle_idx)) + + df = pd.DataFrame({ + 'range': [1., 2., 1., 2., 1., 2., 1., 2.], + 'mean': [0, 0, 0, 0, 0, 0, 0, 0] + }, columns=['range', 'mean'], index=idx) + + expected_intervals = pd.IntervalIndex.from_tuples([(0, 1), (1, 2), (2, 3)]) + expected_ranges = expected_intervals.set_names(['range']) + expected_means = expected_intervals.set_names(['mean']) + expected_index = pd.MultiIndex.from_product( + [element_idx, node_idx, expected_ranges, expected_means] + ) + expected = pd.Series( + [0, 0, 0, 1, 0, 0, 1, 0, 0] * 4, + name='cycles', + index=expected_index, + dtype=np.float64, + ) + + result = df.load_collective.histogram([0, 1, 2, 3], 'cycle_number') + + pd.testing.assert_series_equal(result.to_pandas(), expected) From de1d2ee9d2439d2c0b72927603ae0acd65866a49 Mon Sep 17 00:00:00 2001 From: Johannes Mueller Date: Thu, 10 Oct 2024 15:49:38 +0200 Subject: [PATCH 3/5] Filter FutureWarning to accommodate python-3.8 Signed-off-by: Johannes Mueller --- .../stress/collective/load_collective.py | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/src/pylife/stress/collective/load_collective.py b/src/pylife/stress/collective/load_collective.py index 6c9111f2..30afcb9d 100644 --- a/src/pylife/stress/collective/load_collective.py +++ b/src/pylife/stress/collective/load_collective.py @@ -17,14 +17,17 @@ __author__ = "Johannes Mueller" __maintainer__ = __author__ -from .abstract_load_collective import AbstractLoadCollective -from .load_histogram import LoadHistogram +import warnings import pandas as pd import numpy as np from pylife import PylifeSignal +from .abstract_load_collective import AbstractLoadCollective +from .load_histogram import LoadHistogram + + @pd.api.extensions.register_dataframe_accessor('load_collective') class LoadCollective(PylifeSignal, AbstractLoadCollective): """A Load collective. @@ -364,12 +367,15 @@ def make_histogram(group): if axis is None: return LoadHistogram(make_histogram(range_mean)) - result = pd.Series( - range_mean.groupby(self._levels_from_axis(axis)) - .apply(make_histogram) - .stack(['range', 'mean'], future_stack=True), - name="cycles", - ) + # TODO: Warning filter can be dropped as soon as python-3.8 support is dropped + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=FutureWarning) + result = pd.Series( + range_mean.groupby(self._levels_from_axis(axis)) + .apply(make_histogram) + .stack(['range', 'mean']), + name="cycles", + ) return LoadHistogram(result) From 0dad6d7d3db367722bd9027a2c9b84cb31edb9e9 Mon Sep 17 00:00:00 2001 From: Johannes Mueller Date: Thu, 10 Oct 2024 15:41:18 +0200 Subject: [PATCH 4/5] Use doctests Signed-off-by: Johannes Mueller --- setup.cfg | 5 +- src/pylife/core/pylifesignal.py | 11 +-- src/pylife/materialdata/woehler/bayesian.py | 1 + src/pylife/mesh/meshsignal.py | 46 +++++++---- src/pylife/strength/meanstress.py | 10 +-- src/pylife/utils/histogram.py | 21 +++-- src/pylife/vmap/vmap_import.py | 86 +++++++++++---------- 7 files changed, 101 insertions(+), 79 deletions(-) diff --git a/setup.cfg b/setup.cfg index 8f3c5fb1..93cf728d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -115,15 +115,18 @@ extras = # Comment those flags to avoid this py.test issue. addopts = --cov src/pylife --cov-append -m "not slow_acceptance and not demos" + --doctest-modules + --ignore=src/pylife/materialdata/woehler/bayesian.py norecursedirs = dist build .tox -testpaths = tests +testpaths = tests src/pylife markers = slow_acceptance: long running acceptance test (not run by default) demos: demo notebooks by testbook +doctest_optionflags = NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL [bdist_wheel] # Use this option if your package is pure-python diff --git a/src/pylife/core/pylifesignal.py b/src/pylife/core/pylifesignal.py index 352f30fc..eaf8f024 100644 --- a/src/pylife/core/pylifesignal.py +++ b/src/pylife/core/pylifesignal.py @@ -273,10 +273,11 @@ def __init__(self, obj): def bar(df): return pd.DataFrame({'baz': df['foo'] + df['bar']}) - >>> df = pd.DataFrame({'foo': [1.0, 2.0], 'bar': [-1.0, -2.0]}) - >>> df.foo.bar() - baz - 0 0.0 - 1 0.0 + df = pd.DataFrame({'foo': [1.0, 2.0], 'bar': [-1.0, -2.0]}) + df.foo.bar() + + baz + 0 0.0 + 1 0.0 """ return cls._register_method(method_name) diff --git a/src/pylife/materialdata/woehler/bayesian.py b/src/pylife/materialdata/woehler/bayesian.py index 2ba2044c..92444c31 100644 --- a/src/pylife/materialdata/woehler/bayesian.py +++ b/src/pylife/materialdata/woehler/bayesian.py @@ -34,6 +34,7 @@ __author__ = "Mustapha Kassem" __maintainer__ = "Johannes Mueller" + raise NotImplementedError( "pyLife's Bayesian Wöhler analyzer has been shutdown. " "See documentation for details." diff --git a/src/pylife/mesh/meshsignal.py b/src/pylife/mesh/meshsignal.py index ebfb160e..97e648ea 100644 --- a/src/pylife/mesh/meshsignal.py +++ b/src/pylife/mesh/meshsignal.py @@ -30,20 +30,25 @@ -------- Read in a mesh from a vmap file: - ->>> df = (vm = pylife.vmap.VMAPImport('demos/plate_with_hole.vmap') - .make_mesh('1', 'STATE-2') - .join_variable('STRESS_CAUCHY') - .join_variable('DISPLACEMENT') - .to_frame()) +>>> from pylife.vmap import VMAPImport +>>> df = ( +... VMAPImport('demos/plate_with_hole.vmap') +... .make_mesh('1', 'STATE-2') +... .join_coordinates() +... .join_variable('STRESS_CAUCHY') +... .join_variable('DISPLACEMENT') +... .to_frame() +... ) >>> df.head() - x y z S11 S22 S33 S12 S13 S23 dx dy dz -element_id node_id -1 1734 14.897208 5.269875 0.0 27.080811 6.927080 0.0 -13.687358 0.0 0.0 0.005345 0.000015 0.0 - 1582 14.555333 5.355806 0.0 28.319006 1.178649 0.0 -10.732705 0.0 0.0 0.005285 0.000003 0.0 - 1596 14.630658 4.908741 0.0 47.701195 5.512213 0.0 -17.866833 0.0 0.0 0.005376 0.000019 0.0 - 4923 14.726271 5.312840 0.0 27.699907 4.052865 0.0 -12.210032 0.0 0.0 0.005315 0.000009 0.0 - 4924 14.592996 5.132274 0.0 38.010101 3.345431 0.0 -14.299768 0.0 0.0 0.005326 0.000013 0.0 + x y z ... dx dy dz +element_id node_id ... +1 1734 14.897208 5.269875 0.0 ... 0.005345 0.000015 0.0 + 1582 14.555333 5.355806 0.0 ... 0.005285 0.000003 0.0 + 1596 14.630658 4.908741 0.0 ... 0.005376 0.000019 0.0 + 4923 14.726271 5.312840 0.0 ... 0.005315 0.000009 0.0 + 4924 14.592996 5.132274 0.0 ... 0.005326 0.000013 0.0 + +[5 rows x 12 columns] Get the coordinates of the mesh. @@ -210,10 +215,19 @@ def vtk_data(self): Example ------- >>> import pyvista as pv - >>> grid = pv.UnstructuredGrid(*our_mesh.mesh.vtk_data()) + >>> from pylife.vmap import VMAPImport + >>> df = ( + ... VMAPImport('demos/plate_with_hole.vmap') + ... .make_mesh('1', 'STATE-2') + ... .join_coordinates() + ... .join_variable('STRESS_CAUCHY') + ... .to_frame() + ... ) + + >>> grid = pv.UnstructuredGrid(*df.mesh.vtk_data()) >>> plotter = pv.Plotter(window_size=[1920, 1080]) - >>> plotter.add_mesh(grid, scalars=our_mesh.groupby('element_id')['val'].mean().to_numpy()) - >>> plotter.show() + >>> plotter.add_mesh(grid, scalars=df.groupby('element_id')['S11'].mean().to_numpy()) # doctest: +SKIP + >>> plotter.show() # doctest: +SKIP Note the `*` that needs to be added when calling ``pv.UnstructuredGrid()``. """ diff --git a/src/pylife/strength/meanstress.py b/src/pylife/strength/meanstress.py index 6ad6f3c2..1bb77bc6 100644 --- a/src/pylife/strength/meanstress.py +++ b/src/pylife/strength/meanstress.py @@ -59,11 +59,11 @@ def from_dict(cls, segments_dict): Example ------- - >>> hd = MST.HaighDiagram.from_dict({ - >>> (1.0, np.inf): 0.0, - >>> (-np.inf, 0.0): 0.5, - >>> (0.0, 1.0): 0.167 - >>> }) + >>> hd = HaighDiagram.from_dict({ + ... (1.0, np.inf): 0.0, + ... (-np.inf, 0.0): 0.5, + ... (0.0, 1.0): 0.167 + ... }) sets up a FKM Goodman like Haigh diagram. """ diff --git a/src/pylife/utils/histogram.py b/src/pylife/utils/histogram.py index d2f648f5..9584e6e7 100644 --- a/src/pylife/utils/histogram.py +++ b/src/pylife/utils/histogram.py @@ -165,12 +165,6 @@ def rebin_histogram(histogram, binning, nan_default=False): Examples -------- - >>> h - (0.0, 1.0] 1.0 - (1.0, 2.0] 2.0 - (2.0, 3.0] 3.0 - (3.0, 4.0] 4.0 - dtype: float64 >>> h = pd.Series([10.0, 20.0, 30.0, 40.0], index=pd.interval_range(0.0, 4.0, 4)) >>> h (0.0, 1.0] 10.0 @@ -203,12 +197,15 @@ def rebin_histogram(histogram, binning, nan_default=False): Define the target bin just by an int: - >>> rebin_histogram(h, 5) - (0.0, 0.8] 8.0 - (0.8, 1.6] 14.0 - (1.6, 2.4] 20.0 - (2.4, 3.2] 26.0 - (3.2, 4.0] 32.0 + >>> rebin_histogram(h, 8) + (0.0, 0.5] 5.0 + (0.5, 1.0] 5.0 + (1.0, 1.5] 10.0 + (1.5, 2.0] 10.0 + (2.0, 2.5] 15.0 + (2.5, 3.0] 15.0 + (3.0, 3.5] 20.0 + (3.5, 4.0] 20.0 dtype: float64 Limitations diff --git a/src/pylife/vmap/vmap_import.py b/src/pylife/vmap/vmap_import.py index be8019f2..c4c675a8 100644 --- a/src/pylife/vmap/vmap_import.py +++ b/src/pylife/vmap/vmap_import.py @@ -131,24 +131,29 @@ def make_mesh(self, geometry, state=None): -------- Get the mesh data with the coordinates of geometry '1' and the stress tensor of 'STATE-2' - >>> (pylife.vmap.VMAPImport('demos/plate_with_hole.vmap') - .make_mesh('1', 'STATE-2') - .join_coordinates() - .join_variable('STRESS_CAUCHY') - .to_frame() - x y z S11 S22 S33 S12 S13 S23 - element_id node_id - 1 1734 14.897208 5.269875 0.0 27.080811 6.927080 0.0 -13.687358 0.0 0.0 - 1582 14.555333 5.355806 0.0 28.319006 1.178649 0.0 -10.732705 0.0 0.0 - 1596 14.630658 4.908741 0.0 47.701195 5.512213 0.0 -17.866833 0.0 0.0 - 4923 14.726271 5.312840 0.0 27.699907 4.052865 0.0 -12.210032 0.0 0.0 - 4924 14.592996 5.132274 0.0 38.010101 3.345431 0.0 -14.299768 0.0 0.0 - ... ... ... ... ... ... ... ... ... ... - 4770 3812 -13.189782 -5.691876 0.0 36.527439 2.470588 0.0 -14.706686 0.0 0.0 - 12418 -13.560289 -5.278386 0.0 32.868889 3.320898 0.0 -14.260107 0.0 0.0 - 14446 -13.673285 -5.569107 0.0 34.291058 3.642457 0.0 -13.836027 0.0 0.0 - 14614 -13.389065 -5.709927 0.0 36.063541 2.828889 0.0 -13.774759 0.0 0.0 - 14534 -13.276068 -5.419206 0.0 33.804211 2.829817 0.0 -14.580153 0.0 0.0 + >>> ( + ... VMAPImport('demos/plate_with_hole.vmap') + ... .make_mesh('1', 'STATE-2') + ... .join_coordinates() + ... .join_variable('STRESS_CAUCHY') + ... .to_frame() + ... ) + x y z ... S12 S13 S23 + element_id node_id ... + 1 1734 14.897208 5.269875 0.0 ... -13.687358 0.0 0.0 + 1582 14.555333 5.355806 0.0 ... -10.732705 0.0 0.0 + 1596 14.630658 4.908741 0.0 ... -17.866833 0.0 0.0 + 4923 14.726271 5.312840 0.0 ... -12.210032 0.0 0.0 + 4924 14.592996 5.132274 0.0 ... -14.299768 0.0 0.0 + ... ... ... ... ... ... ... ... + 4770 3812 -13.189782 -5.691876 0.0 ... -14.706686 0.0 0.0 + 12418 -13.560289 -5.278386 0.0 ... -14.260107 0.0 0.0 + 14446 -13.673285 -5.569107 0.0 ... -13.836027 0.0 0.0 + 14614 -13.389065 -5.709927 0.0 ... -13.774759 0.0 0.0 + 14534 -13.276068 -5.419206 0.0 ... -14.580153 0.0 0.0 + + [37884 rows x 9 columns] + """ self._mesh = pd.DataFrame(index=self._mesh_index(geometry)) self._geometry = geometry @@ -219,7 +224,7 @@ def join_coordinates(self): -------- Receive the mesh with the node coordinates - >>> pylife.vmap.VMAPImport('demos/plate_with_hole.vmap').make_mesh('1').join_coordinates().to_frame() + >>> VMAPImport('demos/plate_with_hole.vmap').make_mesh('1').join_coordinates().to_frame() x y z element_id node_id 1 1734 14.897208 5.269875 0.0 @@ -233,9 +238,8 @@ def join_coordinates(self): 14446 -13.673285 -5.569107 0.0 14614 -13.389065 -5.709927 0.0 14534 -13.276068 -5.419206 0.0 - + [37884 rows x 3 columns] - """ if self._mesh is None: raise APIUseError("Need to make_mesh() before joining the coordinates.") @@ -340,25 +344,27 @@ def join_variable(self, var_name, state=None, column_names=None): -------- Receiving the 'DISPLACEMENT' of 'STATE-1' , the stress and strain tensors of 'STATE-2' - >>> (pylife.vmap.VMAPImport('demos/plate_with_hole.vmap') - .make_mesh('1') - .join_variable('DISPLACEMENT', 'STATE-1') - .join_variable('STRESS_CAUCHY', 'STATE-2') - .join_variable('E').to_frame()) - dx dy dz S11 S22 S33 S12 S13 S23 E11 E22 E33 E12 E13 E23 - element_id node_id - 1 1734 0.0 0.0 0.0 27.080811 6.927080 0.0 -13.687358 0.0 0.0 0.000119 -0.000006 0.0 -0.000169 0.0 0.0 - 1582 0.0 0.0 0.0 28.319006 1.178649 0.0 -10.732705 0.0 0.0 0.000133 -0.000035 0.0 -0.000133 0.0 0.0 - 1596 0.0 0.0 0.0 47.701195 5.512213 0.0 -17.866833 0.0 0.0 0.000219 -0.000042 0.0 -0.000221 0.0 0.0 - 4923 0.0 0.0 0.0 27.699907 4.052865 0.0 -12.210032 0.0 0.0 0.000126 -0.000020 0.0 -0.000151 0.0 0.0 - 4924 0.0 0.0 0.0 38.010101 3.345431 0.0 -14.299768 0.0 0.0 0.000176 -0.000038 0.0 -0.000177 0.0 0.0 - ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... - 4770 3812 0.0 0.0 0.0 36.527439 2.470588 0.0 -14.706686 0.0 0.0 0.000170 -0.000040 0.0 -0.000182 0.0 0.0 - 12418 0.0 0.0 0.0 32.868889 3.320898 0.0 -14.260107 0.0 0.0 0.000152 -0.000031 0.0 -0.000177 0.0 0.0 - 14446 0.0 0.0 0.0 34.291058 3.642457 0.0 -13.836027 0.0 0.0 0.000158 -0.000032 0.0 -0.000171 0.0 0.0 - 14614 0.0 0.0 0.0 36.063541 2.828889 0.0 -13.774759 0.0 0.0 0.000168 -0.000038 0.0 -0.000171 0.0 0.0 - 14534 0.0 0.0 0.0 33.804211 2.829817 0.0 -14.580153 0.0 0.0 0.000157 -0.000035 0.0 -0.000181 0.0 0.0 - + >>> ( + ... VMAPImport('demos/plate_with_hole.vmap') + ... .make_mesh('1') + ... .join_variable('DISPLACEMENT', 'STATE-1') + ... .join_variable('STRESS_CAUCHY', 'STATE-2') + ... .join_variable('E').to_frame() + ... ) + dx dy dz S11 ... E33 E12 E13 E23 + element_id node_id ... + 1 1734 0.0 0.0 0.0 27.080811 ... 0.0 -0.000169 0.0 0.0 + 1582 0.0 0.0 0.0 28.319006 ... 0.0 -0.000133 0.0 0.0 + 1596 0.0 0.0 0.0 47.701195 ... 0.0 -0.000221 0.0 0.0 + 4923 0.0 0.0 0.0 27.699907 ... 0.0 -0.000151 0.0 0.0 + 4924 0.0 0.0 0.0 38.010101 ... 0.0 -0.000177 0.0 0.0 + ... ... ... ... ... ... ... ... ... ... + 4770 3812 0.0 0.0 0.0 36.527439 ... 0.0 -0.000182 0.0 0.0 + 12418 0.0 0.0 0.0 32.868889 ... 0.0 -0.000177 0.0 0.0 + 14446 0.0 0.0 0.0 34.291058 ... 0.0 -0.000171 0.0 0.0 + 14614 0.0 0.0 0.0 36.063541 ... 0.0 -0.000171 0.0 0.0 + 14534 0.0 0.0 0.0 33.804211 ... 0.0 -0.000181 0.0 0.0 + [37884 rows x 15 columns] TODO From 31c59b6c32f7dec06361d33450e0b0bf91ef8980 Mon Sep 17 00:00:00 2001 From: Johannes Mueller Date: Thu, 10 Oct 2024 19:38:50 +0200 Subject: [PATCH 5/5] Update README.md and CHANGELOG.md Signed-off-by: Johannes Mueller --- CHANGELOG.md | 2 +- README.md | 15 +++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 70834f5d..632c71a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,7 @@ In this file noteworthy changes of new releases of pyLife are documented since 2.0.0. -## unreleased +## pylife-2.1.2 ### New features diff --git a/README.md b/README.md index fe8830d1..0e60da9d 100644 --- a/README.md +++ b/README.md @@ -31,12 +31,15 @@ based on pyLife code. ## Status -pyLife-2.1.1 has been released. That means that for the time being we hope -that we will not introduce *breaking* changes. That does not mean that the -release is stable finished and perfect. We will do small improvements, -especially with respect to documentation in the upcoming months and release -them as 2.1.x releases. Once we have noticeable feature additions we will come -up with a 2.x.0 release. No ETA about that. +pyLife-2.1.x is the current release the you get by default. We are doing small +improvements, in the pyLife-2.1.x branch (`master`) while developing the more +vast features in the 2.2.x branch (`develop`). + +The main new features of the 2.2.x branch is about FKM functionality. As that +is quite a comprehensive addition we would need some time to get it right +before we can release it as default release. + +Once 2.2.x is released we will probably stick to a one branch development. ## Contents