diff --git a/.github/workflows/check_formatting.yml b/.github/workflows/check_formatting.yml new file mode 100644 index 00000000..be0760ad --- /dev/null +++ b/.github/workflows/check_formatting.yml @@ -0,0 +1,28 @@ +name: Check formatting and linting + +on: + pull_request: + push: { branches: [main] } + +jobs: + ruff-check: + name: Run ruff lint and format checks + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - uses: actions/setup-python@v5 + with: + python-version: '3.11' + cache: 'pip' + + - name: Installing dependencies + run: pip install ruff + + - name: Run ruff lint + run: ruff check . + + - name: Run ruff format + run: ruff format . --check + diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..d3db8417 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,16 @@ +repos: +- repo: local + hooks: + - id: lint + name: Ruff Lint + description: Linting using ruff + entry: bash -c 'ruff check .' + language: system + stages: ["pre-commit", "pre-push"] + + - id: format + name: Ruff Format + description: Formatting using ruff + entry: bash -c 'ruff format . --check' + language: system + stages: ["pre-commit", "pre-push"] diff --git a/docs/conf.py b/docs/conf.py index d05f5c14..23d3ba6c 100755 --- a/docs/conf.py +++ b/docs/conf.py @@ -39,9 +39,9 @@ "pandas", ] -os.environ[ - "SPHINX_APIDOC_OPTIONS" -] = "members,show-inheritance,inherited-members" +os.environ["SPHINX_APIDOC_OPTIONS"] = ( + "members,show-inheritance,inherited-members" +) apidoc_module_dir = "../src/fmu" apidoc_output_dir = "apiref" @@ -83,4 +83,4 @@ # Output file base name for HTML help builder. htmlhelp_basename = "fmu-sumo" -html_logo = "_static/equinor-logo2.jpg" \ No newline at end of file +html_logo = "_static/equinor-logo2.jpg" diff --git a/docs/explorer.rst b/docs/explorer.rst index da53c0e3..67e8cdfe 100644 --- a/docs/explorer.rst +++ b/docs/explorer.rst @@ -255,16 +255,16 @@ the ``has`` filter to find cases that have ``4d-seismic`` data: .. code-block:: python - from fmu.sumo.explorer import Explorer, Filters + from fmu.sumo.explorer import Explorer, filters exp = Explorer(env="prod") - cases = exp.cases.filter(asset="Heidrun", has=Filters.seismic4d) + cases = exp.cases.filter(asset="Heidrun", has=filters.seismic4d) In this case, we have a predefined filter for ``4d-seismic``, exposed -thorugh ``fmu.sumo.explorer.Filters``. There is no magic involved; any +thorugh ``fmu.sumo.explorer.filters``. There is no magic involved; any user can create their own filters, and either use them directly or ask -for them to be added to ``fmu.sumo.explorer.Filters``. +for them to be added to ``fmu.sumo.explorer.filters``. It is also possible to chain filters. The previous example could also be handled by diff --git a/examples/explorer.ipynb b/examples/explorer.ipynb index ed7887c4..0679449d 100644 --- a/examples/explorer.ipynb +++ b/examples/explorer.ipynb @@ -43,11 +43,11 @@ "outputs": [], "source": [ "# Get Drogon cases\n", - "myassetname = \"Drogon\" # Must be a valid asset on Sumo\n", + "myassetname = \"Drogon\" # Must be a valid asset on Sumo\n", "cases = sumo.cases.filter(asset=myassetname)\n", "\n", "# Get available status filters\n", - "print(\"Statuses:\",cases.statuses)\n", + "print(\"Statuses:\", cases.statuses)\n", "\n", "# Filter on status\n", "cases = cases.filter(status=\"keep\")\n", @@ -67,11 +67,11 @@ " print(\"\\n\")\n", "\n", "# Get case by name (name is not guaranteed to be unique)\n", - "mycasename = cases[0].name # for sake of example\n", + "mycasename = cases[0].name # for sake of example\n", "case = sumo.cases.filter(name=mycasename)[0]\n", "\n", "# Get case by id\n", - "mycaseuuid = cases[0].uuid # for sake of example\n", + "mycaseuuid = cases[0].uuid # for sake of example\n", "case = sumo.cases.filter(uuid=mycaseuuid)[0]\n", "\n", "# Select case\n", @@ -210,7 +210,7 @@ "\n", "layout = openvds.getLayout(openvds_handle)\n", "channel_count = layout.getChannelCount()\n", - "print(\"Channel count: \", channel_count)\n", + "print(\"Channel count: \", channel_count)\n", "print(\"Channel names: \")\n", "for i in range(channel_count):\n", " print(\" \", layout.getChannelName(i))" @@ -232,7 +232,7 @@ "source": [ "# Perform aggregation on SurfaceCollection\n", "\n", - "regsurf = surfs.min() # .max(), .mean(), .std(), .p10(), .p90(), .p50()\n", + "regsurf = surfs.min() # .max(), .mean(), .std(), .p10(), .p90(), .p50()\n", "regsurf.to_regular_surface().quickplot()" ] }, @@ -286,15 +286,24 @@ "\n", "\n", "# get surfaces with timestamp in range\n", - "time = TimeFilter(time_type=TimeType.TIMESTAMP, start=\"2018-01-01\", end=\"2022-01-01\")\n", + "time = TimeFilter(\n", + " time_type=TimeType.TIMESTAMP, start=\"2018-01-01\", end=\"2022-01-01\"\n", + ")\n", "surfs = case.surfaces.filter(time=time)\n", "\n", "# get surfaces with time intervals in range\n", - "time = TimeFilter(time_type=TimeType.INTERVAL, start=\"2018-01-01\", end=\"2022-01-01\")\n", + "time = TimeFilter(\n", + " time_type=TimeType.INTERVAL, start=\"2018-01-01\", end=\"2022-01-01\"\n", + ")\n", "surfs = case.surfaces.filter(time=time)\n", "\n", "# get surfaces where intervals overlap with range\n", - "time = TimeFilter(time_type=TimeType.INTERVAL, start=\"2018-01-01\", end=\"2022-01-01\", overlap=True)\n", + "time = TimeFilter(\n", + " time_type=TimeType.INTERVAL,\n", + " start=\"2018-01-01\",\n", + " end=\"2022-01-01\",\n", + " overlap=True,\n", + ")\n", "surfs = case.surfaces.filter(time=time)\n", "\n", "# get surfaces with exact timestamp matching (t0 == start)\n", @@ -302,7 +311,12 @@ "surfs = case.surfaces.filter(time=time)\n", "\n", "# get surfaces with exact interval matching (t0 == start AND t1 == end)\n", - "time = TimeFilter(time_type=TimeType.INTERVAL, start=\"2018-01-01\", end=\"2022-01-01\", exact=True)\n", + "time = TimeFilter(\n", + " time_type=TimeType.INTERVAL,\n", + " start=\"2018-01-01\",\n", + " end=\"2022-01-01\",\n", + " exact=True,\n", + ")\n", "surfs = case.surfaces.filter(time=time)" ] }, diff --git a/examples/explorer2.ipynb b/examples/explorer2.ipynb index 65b88d9a..aa530ef4 100644 --- a/examples/explorer2.ipynb +++ b/examples/explorer2.ipynb @@ -9,6 +9,7 @@ "source": [ "import json\n", "import time\n", + "\n", "from fmu.sumo.explorer import Explorer" ] }, @@ -19,11 +20,11 @@ "metadata": {}, "outputs": [], "source": [ - "env=\"dev\"\n", - "caseuuid=\"d872b3ce-0322-4357-b192-32bde70d7dac\"\n", - "name=\"DROGON\"\n", - "tagname=\"summary\"\n", - "iteration=\"iter-0\"" + "env = \"dev\"\n", + "caseuuid = \"d872b3ce-0322-4357-b192-32bde70d7dac\"\n", + "name = \"DROGON\"\n", + "tagname = \"summary\"\n", + "iteration = \"iter-0\"" ] }, { @@ -54,7 +55,7 @@ "metadata": {}, "outputs": [], "source": [ - "[(case.name, case.uuid) for case in exp.cases]\n" + "[(case.name, case.uuid) for case in exp.cases]" ] }, { @@ -66,7 +67,7 @@ "source": [ "case = exp.get_object(caseuuid)\n", "print(f\"Case name: {case.name}\")\n", - "print(f\"Case UUID: {case.uuid}\")\n" + "print(f\"Case UUID: {case.uuid}\")" ] }, { @@ -76,13 +77,18 @@ "metadata": {}, "outputs": [], "source": [ - "realizations = case.filter(cls=\"table\", name=name, tagname=tagname, \\\n", - " iteration=iteration, realization=True)\n", + "realizations = case.filter(\n", + " cls=\"table\",\n", + " name=name,\n", + " tagname=tagname,\n", + " iteration=iteration,\n", + " realization=True,\n", + ")\n", "print(f\"Number of realizations: {len(realizations)}\")\n", - "columns=realizations[0].columns\n", + "columns = realizations[0].columns\n", "print(f\"Number of columns, first realization: {len(columns)}\")\n", "print(f\"Number of columns, all realizations: {len(realizations.columns)}\")\n", - "print(f\"Tagnames: {realizations.tagnames}\")\n" + "print(f\"Tagnames: {realizations.tagnames}\")" ] }, { @@ -92,9 +98,9 @@ "metadata": {}, "outputs": [], "source": [ - "t0=time.perf_counter()\n", - "agg=realizations.aggregate(operation=\"collect\", columns=columns[:50])\n", - "t1=time.perf_counter()\n" + "t0 = time.perf_counter()\n", + "agg = realizations.aggregate(operation=\"collect\", columns=columns[:50])\n", + "t1 = time.perf_counter()" ] }, { @@ -104,7 +110,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"Table aggregation: elapsed {t1-t0:0.3} seconds.\")\n" + "print(f\"Table aggregation: elapsed {t1-t0:0.3} seconds.\")" ] }, { @@ -114,7 +120,9 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"Number of realizations, aggregation: {len(agg.metadata['fmu']['aggregation']['realization_ids'])}\")" + "print(\n", + " f\"Number of realizations, aggregation: {len(agg.metadata['fmu']['aggregation']['realization_ids'])}\"\n", + ")" ] }, { @@ -124,7 +132,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(agg.metadata['fmu']['aggregation']['operation'])" + "print(agg.metadata[\"fmu\"][\"aggregation\"][\"operation\"])" ] }, { @@ -135,7 +143,7 @@ "outputs": [], "source": [ "table = agg.to_pandas()\n", - "print(table)\n" + "print(table)" ] }, { @@ -145,12 +153,13 @@ "metadata": {}, "outputs": [], "source": [ - "realizations = case.filter(cls=\"surface\", \\\n", - " iteration=iteration, realization=True)\n", + "realizations = case.filter(\n", + " cls=\"surface\", iteration=iteration, realization=True\n", + ")\n", "print(f\"Number of realizations: {len(realizations)}\")\n", "print(realizations.names)\n", "print(realizations.tagnames)\n", - "print(realizations.contents)\n" + "print(realizations.contents)" ] }, { @@ -160,7 +169,9 @@ "metadata": {}, "outputs": [], "source": [ - "surfaces=realizations.filter(name=\"Valysar Fm.\", content=\"depth\", tagname=\"apstrend_aps_Channel_Average\")\n", + "surfaces = realizations.filter(\n", + " name=\"Valysar Fm.\", content=\"depth\", tagname=\"apstrend_aps_Channel_Average\"\n", + ")\n", "print(len(surfaces))\n", "print(surfaces.tagnames)\n", "print(surfaces.contents)" @@ -173,9 +184,9 @@ "metadata": {}, "outputs": [], "source": [ - "t0=time.perf_counter()\n", - "agg=surfaces.aggregate(operation=\"mean\")\n", - "t1=time.perf_counter()" + "t0 = time.perf_counter()\n", + "agg = surfaces.aggregate(operation=\"mean\")\n", + "t1 = time.perf_counter()" ] }, { @@ -185,7 +196,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"Surface aggregation: elapsed {t1-t0:0.3} seconds.\")\n" + "print(f\"Surface aggregation: elapsed {t1-t0:0.3} seconds.\")" ] }, { @@ -195,8 +206,10 @@ "metadata": {}, "outputs": [], "source": [ - "print(f\"Number of realizations, aggregation: {len(agg.metadata['fmu']['aggregation']['realization_ids'])}\")\n", - "print(agg.metadata['fmu']['aggregation']['operation'])" + "print(\n", + " f\"Number of realizations, aggregation: {len(agg.metadata['fmu']['aggregation']['realization_ids'])}\"\n", + ")\n", + "print(agg.metadata[\"fmu\"][\"aggregation\"][\"operation\"])" ] }, { @@ -206,7 +219,7 @@ "metadata": {}, "outputs": [], "source": [ - "regsurf=agg.to_regular_surface()\n" + "regsurf = agg.to_regular_surface()" ] }, { @@ -216,7 +229,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(regsurf)\n" + "print(regsurf)" ] }, { @@ -256,7 +269,7 @@ "metadata": {}, "outputs": [], "source": [ - "iteration=exp.get_iteration_by_uuid('23c63921-b54c-449d-49a3-c08332faf7cc')" + "iteration = exp.get_iteration_by_uuid(\"23c63921-b54c-449d-49a3-c08332faf7cc\")" ] }, { @@ -316,7 +329,7 @@ "metadata": {}, "outputs": [], "source": [ - "len(iteration.filter(realization=159))\n" + "len(iteration.filter(realization=159))" ] }, { @@ -326,7 +339,7 @@ "metadata": {}, "outputs": [], "source": [ - "len(iteration.filter(realization=160))\n" + "len(iteration.filter(realization=160))" ] }, { @@ -386,8 +399,9 @@ "metadata": {}, "outputs": [], "source": [ - "from fmu.sumo.explorer import Filters\n", - "len(exp.cases.filter(has=Filters.seismic4d))" + "from fmu.sumo.explorer import filters\n", + "\n", + "len(exp.cases.filter(has=filters.seismic4d))" ] }, { @@ -397,7 +411,7 @@ "metadata": {}, "outputs": [], "source": [ - "len(exp.filter(has=Filters.seismic4d))" + "len(exp.filter(has=filters.seismic4d))" ] }, { @@ -407,7 +421,7 @@ "metadata": {}, "outputs": [], "source": [ - "[case.name for case in list(exp.filter(has=Filters.seismic4d).cases)[:10]]" + "[case.name for case in list(exp.filter(has=filters.seismic4d).cases)[:10]]" ] }, { @@ -417,8 +431,10 @@ "metadata": {}, "outputs": [], "source": [ - "myrealization=iteration.realizations.filter(complex={\"term\":{\"fmu.realization.id\":0}})[0]\n", - "myrealization\n" + "myrealization = iteration.realizations.filter(\n", + " complex={\"term\": {\"fmu.realization.id\": 0}}\n", + ")[0]\n", + "myrealization" ] }, { @@ -438,7 +454,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(json.dumps(iteration.metadata,indent=2))" + "print(json.dumps(iteration.metadata, indent=2))" ] }, { diff --git a/examples/metrics.ipynb b/examples/metrics.ipynb index ea7f64b6..18e98520 100644 --- a/examples/metrics.ipynb +++ b/examples/metrics.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "exp=Explorer(env=\"preview\")" + "exp = Explorer(env=\"preview\")" ] }, { @@ -27,7 +27,7 @@ "metadata": {}, "outputs": [], "source": [ - "case=exp.get_case_by_uuid(\"359e7c72-a4ca-43ee-9203-f09cd0f149a9\")" + "case = exp.get_case_by_uuid(\"359e7c72-a4ca-43ee-9203-f09cd0f149a9\")" ] }, { @@ -37,7 +37,7 @@ "metadata": {}, "outputs": [], "source": [ - "tables=case.tables" + "tables = case.tables" ] }, { @@ -47,7 +47,7 @@ "metadata": {}, "outputs": [], "source": [ - "summaries=tables.filter(tagname=\"summary\", realization=True)" + "summaries = tables.filter(tagname=\"summary\", realization=True)" ] }, { @@ -135,7 +135,7 @@ "metadata": {}, "outputs": [], "source": [ - "summaries.metrics.sum(field=\"_sumo.blob_size\")[\"value\"]/(1024*1024*1024)" + "summaries.metrics.sum(field=\"_sumo.blob_size\")[\"value\"] / (1024 * 1024 * 1024)" ] }, { diff --git a/examples/table-aggregation.ipynb b/examples/table-aggregation.ipynb index d9180eb5..ec42d112 100644 --- a/examples/table-aggregation.ipynb +++ b/examples/table-aggregation.ipynb @@ -7,19 +7,23 @@ "metadata": {}, "outputs": [], "source": [ - "import json\n", "import time\n", + "\n", + "\n", "class Timer:\n", " def __init__(self):\n", " return\n", + "\n", " def __enter__(self):\n", " self._t0 = time.perf_counter()\n", " return\n", + "\n", " def __exit__(self, type, value, traceback):\n", " t1 = time.perf_counter()\n", " print(f\"Elapsed: {t1-self._t0:0.3f} seconds.\")\n", " return\n", - " pass\n" + "\n", + " pass" ] }, { @@ -39,7 +43,8 @@ ], "source": [ "from fmu.sumo.explorer import Explorer\n", - "exp=Explorer(env=\"preview\")" + "\n", + "exp = Explorer(env=\"preview\")" ] }, { @@ -55,20 +60,26 @@ " tbs = sc.metrics.sum(\"_sumo.blob_size\")\n", " return tbs\n", "\n", + "\n", "def do_aggregate(tagname, rels, columns):\n", " print(f\"{tagname}: {len(rels)} objects, {len(rels.columns)} columns.\")\n", " tot_size_bytes = total_blob_size(rels)\n", " print(f\"Total size of input: {tot_size_bytes / (1024*1024*1024):.3f} GiB\")\n", " with Timer():\n", - " agg=rels.filter(column=columns)._aggregate(columns=columns)\n", + " agg = rels.filter(column=columns)._aggregate(columns=columns)\n", " print(agg.to_pandas().sort_values(by=[\"REAL\", \"DATE\"]))\n", "\n", + "\n", "def run_exp(caseuuid, itername, tagname, columns):\n", " case = exp.get_case_by_uuid(caseuuid)\n", " print(f\"{case.asset}: {case.name}: {caseuuid}\")\n", - " rels=case.tables.visible.filter(iteration=itername, realization=True, tagname=tagname, column=columns)\n", + " rels = case.tables.visible.filter(\n", + " iteration=itername, realization=True, tagname=tagname, column=columns\n", + " )\n", " do_aggregate(tagname, rels, columns)\n", - " rels=case.tables.hidden.filter(iteration=itername, realization=True, tagname=tagname, column=columns)\n", + " rels = case.tables.hidden.filter(\n", + " iteration=itername, realization=True, tagname=tagname, column=columns\n", + " )\n", " do_aggregate(tagname, rels, columns)" ] }, diff --git a/examples/tables.ipynb b/examples/tables.ipynb index 04f7f045..289520a4 100644 --- a/examples/tables.ipynb +++ b/examples/tables.ipynb @@ -6,11 +6,11 @@ "metadata": {}, "outputs": [], "source": [ - "import time\n", - "import pandas as pd\n", - "import pyarrow as pa\n", - "from fmu.sumo.explorer import Explorer, AggregatedTable\n", "import matplotlib.pyplot as plt\n", + "import pandas as pd\n", + "\n", + "from fmu.sumo.explorer import AggregatedTable, Explorer\n", + "\n", "%matplotlib inline\n", "\n", "# These examples use Seaborn for plotting. Seaborn does not automatically install with\n", @@ -143,7 +143,9 @@ "outputs": [], "source": [ "# Filter using the key\n", - "one_table = tables.filter(realization=0, iteration=\"iter-0\", name=\"DROGON\", tagname=\"compdat\")[0]\n", + "one_table = tables.filter(\n", + " realization=0, iteration=\"iter-0\", name=\"DROGON\", tagname=\"compdat\"\n", + ")[0]\n", "\n", "# Give back the name and tag\n", "print(f\"Found table {one_table.name}-{one_table.tagname}\")\n", @@ -204,7 +206,9 @@ "metadata": {}, "outputs": [], "source": [ - "sim_tables = tables.filter(name=\"DROGON\", iteration=\"iter-0\", aggregation=\"collection\")\n", + "sim_tables = tables.filter(\n", + " name=\"DROGON\", iteration=\"iter-0\", aggregation=\"collection\"\n", + ")\n", "sim_tables.tagnames" ] }, @@ -256,10 +260,11 @@ "metadata": {}, "outputs": [], "source": [ - "\n", "pressure = rft_tables.filter(column=\"PRESSURE\")[0]\n", "frame = pressure.to_pandas()\n", - "print(f\"The following columns are in the pressure object {frame.columns.to_list()}\")" + "print(\n", + " f\"The following columns are in the pressure object {frame.columns.to_list()}\"\n", + ")" ] }, { @@ -275,28 +280,28 @@ "metadata": {}, "outputs": [], "source": [ - "\n", "names = frame.WELL.unique()\n", "dates = frame.DATE.unique()\n", "fig, plots = plt.subplots(len(dates), len(names))\n", "\n", "for i, date in enumerate(dates):\n", " for j, well in enumerate(names):\n", - " data = frame.loc[(frame.DATE == date) & (frame.WELL == well)].sort_values(by=\"DEPTH\")\n", + " data = frame.loc[\n", + " (date == frame.DATE) & (well == frame.WELL)\n", + " ].sort_values(by=\"DEPTH\")\n", " ax = plots[i, j]\n", " if data.empty:\n", - " ax = plots[i, j] # get current axes\n", - " ax.get_xaxis().set_visible(False) # hide x-axis\n", - " ax.get_yaxis().set_visible(False) # hide y-axis \n", + " ax = plots[i, j] # get current axes\n", + " ax.get_xaxis().set_visible(False) # hide x-axis\n", + " ax.get_yaxis().set_visible(False) # hide y-axis\n", " ax.axis(\"off\")\n", " else:\n", " data[[\"DEPTH\", \"PRESSURE\"]].plot(ax=ax, x=\"PRESSURE\", y=\"DEPTH\")\n", " ax.get_legend().remove()\n", " if i == 0:\n", " ax.set_title(well)\n", - " \n", - " ax.invert_yaxis()\n", "\n", + " ax.invert_yaxis()\n", "\n", "\n", "plt.show()" @@ -357,7 +362,14 @@ "source": [ "EQUIL = AggregatedTable(case, \"DROGON\", \"equil\", \"iter-0\")\n", "CONTACT_TYPE = \"OWC\"\n", - "sns.boxplot(pd.pivot_table(EQUIL[CONTACT_TYPE].to_pandas(), index=\"REAL\", columns=\"EQLNUM\", values=CONTACT_TYPE).values)\n", + "sns.boxplot(\n", + " pd.pivot_table(\n", + " EQUIL[CONTACT_TYPE].to_pandas(),\n", + " index=\"REAL\",\n", + " columns=\"EQLNUM\",\n", + " values=CONTACT_TYPE,\n", + " ).values\n", + ")\n", "plt.show()" ] }, @@ -376,7 +388,11 @@ "source": [ "RELPERM = AggregatedTable(case, \"DROGON\", \"satfunc\", \"iter-0\")\n", "\n", - "KRW = pd.concat((RELPERM[\"KRW\"].to_pandas(), RELPERM[\"SW\"].to_pandas()), axis=1).T.drop_duplicates().T\n", + "KRW = (\n", + " pd.concat((RELPERM[\"KRW\"].to_pandas(), RELPERM[\"SW\"].to_pandas()), axis=1)\n", + " .T.drop_duplicates()\n", + " .T\n", + ")\n", "print(KRW.head())" ] }, @@ -393,7 +409,13 @@ "metadata": {}, "outputs": [], "source": [ - "ax = sns.lineplot(KRW.loc[(KRW.KEYWORD == \"SWOF\")], x=\"SW\", y=\"KRW\", hue=\"SATNUM\", style=\"REAL\")\n", + "ax = sns.lineplot(\n", + " KRW.loc[(KRW.KEYWORD == \"SWOF\")],\n", + " x=\"SW\",\n", + " y=\"KRW\",\n", + " hue=\"SATNUM\",\n", + " style=\"REAL\",\n", + ")\n", "ax.legend(loc=\"right\", ncols=6, bbox_to_anchor=(2.1, 0.5))\n", "plt.show()" ] @@ -411,10 +433,18 @@ "metadata": {}, "outputs": [], "source": [ - "\n", "summary = AggregatedTable(case, \"DROGON\", \"summary\", \"iter-0\")\n", "VECTOR_NAME = \"FOIP\"\n", - "ax = pd.pivot_table(summary[VECTOR_NAME].to_pandas(), index=\"DATE\", columns=\"REAL\", values=VECTOR_NAME).dropna(axis=0).plot()\n", + "ax = (\n", + " pd.pivot_table(\n", + " summary[VECTOR_NAME].to_pandas(),\n", + " index=\"DATE\",\n", + " columns=\"REAL\",\n", + " values=VECTOR_NAME,\n", + " )\n", + " .dropna(axis=0)\n", + " .plot()\n", + ")\n", "ax.get_legend().remove()\n", "ax.set_label(VECTOR_NAME)\n", "plt.show()" @@ -461,10 +491,21 @@ "metadata": {}, "outputs": [], "source": [ - "KH[\"ZONE_NR\"] = KH[\"ZONE\"].replace({value: key for key, value in dict(enumerate(KH[\"ZONE\"].unique().tolist())).items()})\n", - "MEAN_STD = pd.pivot_table(KH, index=[\"ZONE_NR\", \"ZONE\"], columns=\"WELL\", values=\"KH\", aggfunc=[\"mean\", \"std\"])\n", + "KH[\"ZONE_NR\"] = KH[\"ZONE\"].replace(\n", + " {\n", + " value: key\n", + " for key, value in dict(enumerate(KH[\"ZONE\"].unique().tolist())).items()\n", + " }\n", + ")\n", + "MEAN_STD = pd.pivot_table(\n", + " KH,\n", + " index=[\"ZONE_NR\", \"ZONE\"],\n", + " columns=\"WELL\",\n", + " values=\"KH\",\n", + " aggfunc=[\"mean\", \"std\"],\n", + ")\n", "# KH.head()\n", - "MEAN_STD[(\"mean\", )][\"A1\"]" + "MEAN_STD[(\"mean\",)][\"A1\"]" ] }, { @@ -519,7 +560,9 @@ "source": [ "FAULTS = AggregatedTable(case, \"DROGON\", \"faults\", \"iter-0\")\n", "print(FAULTS.columns)\n", - "COMPLETE = pd.concat((FAULTS[\"I\"].to_pandas(), FAULTS[\"J\"].to_pandas(), FAULTS[\"K\"].to_pandas()))\n", + "COMPLETE = pd.concat(\n", + " (FAULTS[\"I\"].to_pandas(), FAULTS[\"J\"].to_pandas(), FAULTS[\"K\"].to_pandas())\n", + ")\n", "COMPLETE.head()" ] }, diff --git a/pyproject.toml b/pyproject.toml index 30ac652e..1f3c518d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,12 +5,6 @@ build-backend = "setuptools.build_meta" [tool.setuptools_scm] version_file = "src/fmu/sumo/explorer/_version.py" -[tool.isort] -profile = "black" - -[tool.black] -line-length = 79 - [project] name = "fmu-sumo" description = "Python package for interacting with Sumo in an FMU setting" @@ -35,7 +29,7 @@ dependencies = [ Repository = "https://github.com/equinor/fmu-sumo" [project.optional-dependencies] -dev = ["black", "flake8", "pytest"] +dev = ["ruff", "pytest"] test = ["pytest", "pytest-timeout"] docs = [ "sphinx==6.2.1", @@ -52,3 +46,36 @@ platforms = ["any"] [tool.setuptools.packages.find] where = ["src"] + +[tool.ruff] +exclude = [ + ".env", + ".git", + ".github", + ".venv", + "venv", +] + +line-length = 79 + +[tool.ruff.lint] +ignore = [ + "E501", + "PD901", +] + +extend-select = [ + "C4", # Flake8-comprehensions + "I", # isort + "SIM", # Flake8-simplify + "TC", # Flake8-type-checking + "TID", # Flake8-tidy-imports + "N", # pep8-naming + "PD", # Pandas +] + +[tool.ruff.lint.per-file-ignores] +"__init__.py" = ["F401"] +"tests/context.py" = ["F401"] +"examples/explorer.ipynb" = ["F821"] + diff --git a/src/fmu/sumo/explorer/Filters.py b/src/fmu/sumo/explorer/Filters.py deleted file mode 100644 index 52130588..00000000 --- a/src/fmu/sumo/explorer/Filters.py +++ /dev/null @@ -1,26 +0,0 @@ -# Filter that matches 4d-seismic objects. -seismic4d = { - "bool": { - "must": [ - {"term": {"data.content.keyword": "seismic"}}, - {"term": {"data.time.t0.label.keyword": "base"}}, - {"term": {"data.time.t1.label.keyword": "monitor"}}, - ] - } -} - -# Filter that matches aggregations -aggregations = {"exists": {"field": "fmu.aggregation.operation"}} - -# Filter that matches observations -observations = { - "bool": { - "must_not": [ - {"exists": {"field": "fmu.iteration.name.keyword"}}, - {"exists": {"field": "fmu.realization.id"}}, - ] - } -} - -# Filter that matches realizations -realizations = {"exists": {"field": "fmu.realization.id"}} diff --git a/src/fmu/sumo/explorer/__init__.py b/src/fmu/sumo/explorer/__init__.py index 1c46ab00..f72e3f88 100644 --- a/src/fmu/sumo/explorer/__init__.py +++ b/src/fmu/sumo/explorer/__init__.py @@ -9,4 +9,5 @@ from fmu.sumo.explorer.explorer import Explorer -from fmu.sumo.explorer.timefilter import TimeType, TimeFilter +from fmu.sumo.explorer.filters import Filters +from fmu.sumo.explorer.timefilter import TimeFilter, TimeType diff --git a/src/fmu/sumo/explorer/cache.py b/src/fmu/sumo/explorer/cache.py index f76eacd8..fce9369c 100644 --- a/src/fmu/sumo/explorer/cache.py +++ b/src/fmu/sumo/explorer/cache.py @@ -5,9 +5,8 @@ class LRUCache: - def __init__(self, capacity): - self.cache = dict() + self.cache = {} self.capacity = capacity self.access = deque() self.lock = Lock() diff --git a/src/fmu/sumo/explorer/explorer.py b/src/fmu/sumo/explorer/explorer.py index 87da966a..01a776ae 100644 --- a/src/fmu/sumo/explorer/explorer.py +++ b/src/fmu/sumo/explorer/explorer.py @@ -1,9 +1,10 @@ """Module containing class for exploring results from sumo""" import warnings -import httpx +import httpx from sumo.wrapper import SumoClient + from fmu.sumo.explorer.objects._search_context import SearchContext @@ -36,7 +37,12 @@ def __init__( interactive (bool): authenticate using interactive flow (browser) keep_alive (str): point in time lifespan (deprecated and ignored) """ - sumo = SumoClient(env, token=token, interactive=interactive, timeout=httpx.Timeout(180.0)) + sumo = SumoClient( + env, + token=token, + interactive=interactive, + timeout=httpx.Timeout(180.0), + ) SearchContext.__init__(self, sumo) if keep_alive: warnings.warn( @@ -59,9 +65,8 @@ def get_permissions(self, asset: str = None): """ res = self._sumo.get("/userpermissions").json() - if asset is not None: - if asset not in res: - raise PermissionError(f"No permissions for asset: {asset}") + if asset is not None and asset not in res: + raise PermissionError(f"No permissions for asset: {asset}") return res @@ -77,9 +82,7 @@ async def get_permissions_async(self, asset: str = None): res = await self._sumo.get_async("/userpermissions") res = res.json() - if asset is not None: - if asset not in res: - raise PermissionError(f"No permissions for asset: {asset}") + if asset is not None and asset not in res: + raise PermissionError(f"No permissions for asset: {asset}") return res - diff --git a/src/fmu/sumo/explorer/filters.py b/src/fmu/sumo/explorer/filters.py new file mode 100644 index 00000000..8e6f33b9 --- /dev/null +++ b/src/fmu/sumo/explorer/filters.py @@ -0,0 +1,27 @@ +class Filters: + # Filter that matches 4d-seismic objects. + seismic4d = { + "bool": { + "must": [ + {"term": {"data.content.keyword": "seismic"}}, + {"term": {"data.time.t0.label.keyword": "base"}}, + {"term": {"data.time.t1.label.keyword": "monitor"}}, + ] + } + } + + # Filter that matches aggregations + aggregations = {"exists": {"field": "fmu.aggregation.operation"}} + + # Filter that matches observations + observations = { + "bool": { + "must_not": [ + {"exists": {"field": "fmu.iteration.name.keyword"}}, + {"exists": {"field": "fmu.realization.id"}}, + ] + } + } + + # Filter that matches realizations + realizations = {"exists": {"field": "fmu.realization.id"}} diff --git a/src/fmu/sumo/explorer/objects/__init__.py b/src/fmu/sumo/explorer/objects/__init__.py index 33cd6861..5362c0ea 100644 --- a/src/fmu/sumo/explorer/objects/__init__.py +++ b/src/fmu/sumo/explorer/objects/__init__.py @@ -1,18 +1,18 @@ """Sumo cases and child objects""" -from fmu.sumo.explorer.objects._search_context import SearchContext from fmu.sumo.explorer.objects._child import Child from fmu.sumo.explorer.objects._metrics import Metrics +from fmu.sumo.explorer.objects._search_context import SearchContext from fmu.sumo.explorer.objects.case import Case from fmu.sumo.explorer.objects.cases import Cases -from fmu.sumo.explorer.objects.cube import Cube -from fmu.sumo.explorer.objects.dictionary import Dictionary -from fmu.sumo.explorer.objects.surface import Surface -from fmu.sumo.explorer.objects.polygons import Polygons -from fmu.sumo.explorer.objects.table import Table from fmu.sumo.explorer.objects.cpgrid import CPGrid from fmu.sumo.explorer.objects.cpgrid_property import CPGridProperty +from fmu.sumo.explorer.objects.cube import Cube +from fmu.sumo.explorer.objects.dictionary import Dictionary from fmu.sumo.explorer.objects.iteration import Iteration from fmu.sumo.explorer.objects.iterations import Iterations +from fmu.sumo.explorer.objects.polygons import Polygons from fmu.sumo.explorer.objects.realization import Realization from fmu.sumo.explorer.objects.realizations import Realizations +from fmu.sumo.explorer.objects.surface import Surface +from fmu.sumo.explorer.objects.table import Table diff --git a/src/fmu/sumo/explorer/objects/_child.py b/src/fmu/sumo/explorer/objects/_child.py index 8327ae73..8b1be030 100644 --- a/src/fmu/sumo/explorer/objects/_child.py +++ b/src/fmu/sumo/explorer/objects/_child.py @@ -1,8 +1,10 @@ """module containing class for child object""" -from typing import Dict from io import BytesIO +from typing import Dict + from sumo.wrapper import SumoClient + from fmu.sumo.explorer.objects._document import Document _prop_desc = [ @@ -87,8 +89,10 @@ def interval(self) -> str: @property def template_path(self): - return "/".join(["{realization}", "{iteration}"] + - self.relative_path.split("/")[2:]) + return "/".join( + ["{realization}", "{iteration}"] + + self.relative_path.split("/")[2:] + ) Child.map_properties(Child, _prop_desc) diff --git a/src/fmu/sumo/explorer/objects/_document.py b/src/fmu/sumo/explorer/objects/_document.py index 7c53a8df..f7ecc3f4 100644 --- a/src/fmu/sumo/explorer/objects/_document.py +++ b/src/fmu/sumo/explorer/objects/_document.py @@ -1,7 +1,7 @@ """Contains class for one document""" import re -from typing import List, Dict +from typing import Dict, List _path_split_rx = re.compile(r"\]\.|\.|\[") diff --git a/src/fmu/sumo/explorer/objects/_metrics.py b/src/fmu/sumo/explorer/objects/_metrics.py index 8d0b9433..ef1dc732 100644 --- a/src/fmu/sumo/explorer/objects/_metrics.py +++ b/src/fmu/sumo/explorer/objects/_metrics.py @@ -4,10 +4,10 @@ def __init__(self, search_context): return def _aggregate(self, op, **kwargs): - aggs = {"agg": {op: {k: v for k, v in kwargs.items() if v is not None}}} - qdoc = {"query": self._search_context._query, - "aggs": aggs, - "size": 0} + aggs = { + "agg": {op: {k: v for k, v in kwargs.items() if v is not None}} + } + qdoc = {"query": self._search_context._query, "aggs": aggs, "size": 0} res = self._search_context._sumo.post("/search", json=qdoc).json() return res["aggregations"]["agg"] @@ -130,5 +130,6 @@ def percentiles(self, field, percents=None): A dictionary of percentiles. """ - return self._aggregate("percentiles", field=field, - percents=percents)["values"] + return self._aggregate("percentiles", field=field, percents=percents)[ + "values" + ] diff --git a/src/fmu/sumo/explorer/objects/_search_context.py b/src/fmu/sumo/explorer/objects/_search_context.py index 7f96e282..269d6c34 100644 --- a/src/fmu/sumo/explorer/objects/_search_context.py +++ b/src/fmu/sumo/explorer/objects/_search_context.py @@ -1,11 +1,13 @@ import uuid -import httpx -import deprecation import warnings -from typing import List, Dict, Tuple from datetime import datetime from io import BytesIO +from typing import Dict, List, Tuple + +import deprecation +import httpx from sumo.wrapper import SumoClient + import fmu.sumo.explorer.objects as objects from fmu.sumo.explorer.cache import LRUCache @@ -182,7 +184,7 @@ def _gen_filters(spec): "contents": ["data.content.keyword", "List of unique contents."], "columns": ["data.spec.columns.keyword", "List of unique column names."], "statuses": ["_sumo.status.keyword", "List of unique case statuses."], - "users": ["fmu.case.user.id.keyword", "List of unique user names."] + "users": ["fmu.case.user.id.keyword", "List of unique user names."], } @@ -269,8 +271,8 @@ def __init__( sumo: SumoClient, must: List = [], must_not: List = [], - hidden = False, - visible = True + hidden=False, + visible=True, ): self._sumo = sumo self._must = must[:] @@ -304,9 +306,7 @@ def _query(self): if len(must) == 0: return {"bool": {"must_not": must_not}} else: - return { - "bool": {"must": must, "must_not": must_not} - } + return {"bool": {"must": must, "must_not": must_not}} def _to_sumo(self, obj, blob=None): cls = obj["_source"]["class"] @@ -320,7 +320,7 @@ def _to_sumo(self, obj, blob=None): "surface": objects.Surface, "table": objects.Table, "cpgrid": objects.CPGrid, - "cpgrid_property": objects.CPGridProperty + "cpgrid_property": objects.CPGridProperty, }.get(cls) if constructor is None: warnings.warn(f"No constructor for class {cls}") @@ -501,11 +501,13 @@ def select(self, sel): None """ - required = set(["class"]) + required = {"class"} + def extreq(lst): if isinstance(lst, str): lst = [lst] return list(set(lst) | required) + if isinstance(sel, str): self._select = extreq([sel]) elif isinstance(sel, list): @@ -539,7 +541,7 @@ def get_object(self, uuid: str) -> Dict: query = { "query": {"ids": {"values": [uuid]}}, "size": 1, - "_source": self._select + "_source": self._select, } res = self._sumo.post("/search", json=query) @@ -568,7 +570,7 @@ async def get_object_async(self, uuid: str) -> Dict: query = { "query": {"ids": {"values": [uuid]}}, "size": 1, - "_source": self._select + "_source": self._select, } res = await self._sumo.post_async("/search", json=query) @@ -585,7 +587,7 @@ def _maybe_prefetch(self, index): uuid = self._hits[index] if self._cache.has(uuid): return - uuids = self._hits[index:min(index + 100, len(self._hits))] + uuids = self._hits[index : min(index + 100, len(self._hits))] uuids = [uuid for uuid in uuids if not self._cache.has(uuid)] hits = self.__search_all( {"ids": {"values": uuids}}, @@ -601,7 +603,7 @@ async def _maybe_prefetch_async(self, index): uuid = self._hits[index] if self._cache.has(uuid): return - uuids = self._hits[index:min(index + 100, len(self._hits))] + uuids = self._hits[index : min(index + 100, len(self._hits))] uuids = [uuid for uuid in uuids if not self._cache.has(uuid)] hits = await self.__search_all_async( {"ids": {"values": uuids}}, @@ -621,7 +623,9 @@ def get_objects( size = ( 1000 if select is False - else 100 if isinstance(select, list) else 10 + else 100 + if isinstance(select, list) + else 10 ) return self.__search_all( {"ids": {"values": uuids}}, size=size, select=select @@ -635,7 +639,9 @@ async def get_objects_async( size = ( 1000 if select is False - else 100 if isinstance(select, list) else 10 + else 100 + if isinstance(select, list) + else 10 ) return await self.__search_all_async( {"ids": {"values": uuids}}, size=size, select=select @@ -773,9 +779,7 @@ def _get_field_values(self, field: str) -> List: """ if field not in self._field_values: buckets = self._get_buckets(field) - self._field_values[field] = list( - map(lambda bucket: bucket["key"], buckets) - ) + self._field_values[field] = [bucket["key"] for bucket in buckets] return self._field_values[field] @@ -790,9 +794,7 @@ async def _get_field_values_async(self, field: str) -> List: """ if field not in self._field_values: buckets = await self._get_buckets_async(field) - self._field_values[field] = list( - map(lambda bucket: bucket["key"], buckets) - ) + self._field_values[field] = [bucket["key"] for bucket in buckets] return self._field_values[field] @@ -808,27 +810,33 @@ def _context_for_class(self, cls): @property def hidden(self): - return SearchContext(sumo=self._sumo, - must=self._must, - must_not = self._must_not, - hidden = True, - visible = False) + return SearchContext( + sumo=self._sumo, + must=self._must, + must_not=self._must_not, + hidden=True, + visible=False, + ) @property def visible(self): - return SearchContext(sumo=self._sumo, - must=self._must, - must_not = self._must_not, - hidden = False, - visible = True) + return SearchContext( + sumo=self._sumo, + must=self._must, + must_not=self._must_not, + hidden=False, + visible=True, + ) @property def all(self): - return SearchContext(sumo=self._sumo, - must=self._must, - must_not = self._must_not, - hidden = True, - visible = True) + return SearchContext( + sumo=self._sumo, + must=self._must, + must_not=self._must_not, + hidden=True, + visible=True, + ) @property def cases(self): @@ -851,7 +859,9 @@ def iterations(self): @property async def iterations_async(self): """Iterations from current selection.""" - uuids = await self._get_field_values_async("fmu.iteration.uuid.keyword") + uuids = await self._get_field_values_async( + "fmu.iteration.uuid.keyword" + ) return objects.Iterations(self, uuids) @property @@ -863,12 +873,14 @@ def realizations(self): @property async def realizations_async(self): """Realizations from current selection.""" - uuids = await self._get_field_values_async("fmu.realization.uuid.keyword") + uuids = await self._get_field_values_async( + "fmu.realization.uuid.keyword" + ) return objects.Realizations(self, uuids) @property - def template_paths(sc): - return set([obj.template_path for obj in sc]) + def template_paths(search_context): # noqa: N805 + return {obj.template_path for obj in search_context} @property def metrics(self): @@ -961,7 +973,13 @@ def filter(self, **kwargs) -> "SearchContext": if _must_not is not None: must_not.append(_must_not) - sc = SearchContext(self._sumo, must=must, must_not=must_not, hidden=self._hidden, visible = self._visible) + sc = SearchContext( + self._sumo, + must=must, + must_not=must_not, + hidden=self._hidden, + visible=self._visible, + ) if "has" in kwargs: # Get list of cases matched by current filter set @@ -1248,7 +1266,7 @@ def _verify_aggregation_operation(self): hits = self._search_all(select=["fmu.realization.id"]) if any( - [hit["_source"]["fmu"].get("realization") is None for hit in hits] + hit["_source"]["fmu"].get("realization") is None for hit in hits ): raise Exception("Selection contains non-realization data.") @@ -1278,7 +1296,11 @@ def _aggregate(self, columns=None, operation=None): spec["columns"] = columns cols = columns[:] table_index = prototype["_source"]["data"].get("table_index") - if table_index is not None and len(table_index) != 0 and table_index[0] not in cols: + if ( + table_index is not None + and len(table_index) != 0 + and table_index[0] not in cols + ): cols.insert(0, table_index[0]) pass prototype["_source"]["data"]["spec"]["columns"] = cols @@ -1298,33 +1320,49 @@ def aggregate(self, columns=None, operation=None): if len(self.hidden) > 0: return self.hidden._aggregate(columns=columns, operation=operation) else: - return self.visible._aggregate(columns=columns, operation=operation) + return self.visible._aggregate( + columns=columns, operation=operation + ) - @deprecation.deprecated(details="Use the method 'aggregate' instead, with parameter 'operation'.") + @deprecation.deprecated( + details="Use the method 'aggregate' instead, with parameter 'operation'." + ) def min(self): return self.aggregate(operation="min") - @deprecation.deprecated(details="Use the method 'aggregate' instead, with parameter 'operation'.") + @deprecation.deprecated( + details="Use the method 'aggregate' instead, with parameter 'operation'." + ) def max(self): return self.aggregate(operation="max") - @deprecation.deprecated(details="Use the method 'aggregate' instead, with parameter 'operation'.") + @deprecation.deprecated( + details="Use the method 'aggregate' instead, with parameter 'operation'." + ) def mean(self): return self.aggregate(operation="mean") - @deprecation.deprecated(details="Use the method 'aggregate' instead, with parameter 'operation'.") + @deprecation.deprecated( + details="Use the method 'aggregate' instead, with parameter 'operation'." + ) def std(self): return self.aggregate(operation="std") - @deprecation.deprecated(details="Use the method 'aggregate' instead, with parameter 'operation'.") + @deprecation.deprecated( + details="Use the method 'aggregate' instead, with parameter 'operation'." + ) def p10(self): return self.aggregate(operation="p10") - @deprecation.deprecated(details="Use the method 'aggregate' instead, with parameter 'operation'.") + @deprecation.deprecated( + details="Use the method 'aggregate' instead, with parameter 'operation'." + ) def p50(self): return self.aggregate(operation="p50") - @deprecation.deprecated(details="Use the method 'aggregate' instead, with parameter 'operation'.") + @deprecation.deprecated( + details="Use the method 'aggregate' instead, with parameter 'operation'." + ) def p90(self): return self.aggregate(operation="p90") diff --git a/src/fmu/sumo/explorer/objects/case.py b/src/fmu/sumo/explorer/objects/case.py index e89723bb..60b945fe 100644 --- a/src/fmu/sumo/explorer/objects/case.py +++ b/src/fmu/sumo/explorer/objects/case.py @@ -1,7 +1,9 @@ """Module containing case class""" -from typing import Dict, List +from typing import Dict + from sumo.wrapper import SumoClient + from fmu.sumo.explorer.objects._document import Document from fmu.sumo.explorer.objects._search_context import SearchContext @@ -16,77 +18,54 @@ def _make_overview_query(id): return { - "query": { - "term": { - "fmu.case.uuid.keyword": id - } - }, + "query": {"term": {"fmu.case.uuid.keyword": id}}, "aggs": { "iteration_uuids": { - "terms": { - "field": "fmu.iteration.uuid.keyword", - "size": 100 - } + "terms": {"field": "fmu.iteration.uuid.keyword", "size": 100} }, "iteration_names": { - "terms": { - "field": "fmu.iteration.name.keyword", - "size": 100 - } - }, - "data_types": { - "terms": { - "field": "class.keyword", - "size": 100 - } + "terms": {"field": "fmu.iteration.name.keyword", "size": 100} }, + "data_types": {"terms": {"field": "class.keyword", "size": 100}}, "iterations": { - "terms": { - "field": "fmu.iteration.uuid.keyword", - "size": 100 - }, + "terms": {"field": "fmu.iteration.uuid.keyword", "size": 100}, "aggs": { "iteration_name": { "terms": { "field": "fmu.iteration.name.keyword", - "size": 100 + "size": 100, } }, "numreal": { - "cardinality": { - "field": "fmu.realization.id" - } + "cardinality": {"field": "fmu.realization.id"} }, - "maxreal": { - "max": { - "field": "fmu.realization.id" - } - }, - "minreal": { - "min": { - "field": "fmu.realization.id" - } - } - } - } + "maxreal": {"max": {"field": "fmu.realization.id"}}, + "minreal": {"min": {"field": "fmu.realization.id"}}, + }, + }, }, - "size": 0 + "size": 0, } + class Case(Document, SearchContext): """Class for representing a case in Sumo""" def __init__(self, sumo: SumoClient, metadata: Dict): Document.__init__(self, metadata) - SearchContext.__init__(self, sumo, must=[{"term": {"fmu.case.uuid.keyword": self.uuid}}]) + SearchContext.__init__( + self, sumo, must=[{"term": {"fmu.case.uuid.keyword": self.uuid}}] + ) self._overview = None self._iterations = None @property def overview(self): """Overview of case contents.""" + def extract_bucket_keys(bucket, name): return [b["key"] for b in bucket[name]["buckets"]] + if self._overview is None: query = _make_overview_query(self._uuid) res = self._sumo.post("/search", json=query) @@ -106,15 +85,15 @@ def extract_bucket_keys(bucket, name): "name": itername, "minreal": minreal, "maxreal": maxreal, - "numreal": numreal + "numreal": numreal, } self._overview = { "iteration_names": iteration_names, "iteration_uuids": iteration_uuids, "data_types": data_types, - "iterations": iterations + "iterations": iterations, } - + return self._overview diff --git a/src/fmu/sumo/explorer/objects/cases.py b/src/fmu/sumo/explorer/objects/cases.py index fa50b2e5..0918beea 100644 --- a/src/fmu/sumo/explorer/objects/cases.py +++ b/src/fmu/sumo/explorer/objects/cases.py @@ -1,10 +1,13 @@ -""" Module for searchcontext for collection of cases. """ +"""Module for searchcontext for collection of cases.""" from fmu.sumo.explorer.objects._search_context import SearchContext + class Cases(SearchContext): def __init__(self, sc, uuids): - super().__init__(sc._sumo, must=[{"terms": {"fmu.case.uuid.keyword": uuids}}]) + super().__init__( + sc._sumo, must=[{"terms": {"fmu.case.uuid.keyword": uuids}}] + ) self._hits = uuids return @@ -18,4 +21,3 @@ def filter(self, **kwargs): sc = super().filter(**kwargs) uuids = sc.uuids return Cases(self, uuids) - diff --git a/src/fmu/sumo/explorer/objects/cpgrid.py b/src/fmu/sumo/explorer/objects/cpgrid.py index fceb291f..652f752f 100644 --- a/src/fmu/sumo/explorer/objects/cpgrid.py +++ b/src/fmu/sumo/explorer/objects/cpgrid.py @@ -1,9 +1,12 @@ """Module containing class for cpgrid""" from typing import Dict + from sumo.wrapper import SumoClient + from fmu.sumo.explorer.objects._child import Child + class CPGrid(Child): """Class representing a cpgrid object in Sumo.""" @@ -24,7 +27,9 @@ def to_cpgrid(self): try: from xtgeo import grid_from_file except ModuleNotFoundError: - raise RuntimeError("Unable to import xtgeo; probably not installed.") + raise RuntimeError( + "Unable to import xtgeo; probably not installed." + ) try: return grid_from_file(self.blob) except TypeError as type_err: @@ -38,7 +43,9 @@ async def to_cpgrid_async(self): try: from xtgeo import grid_from_file except ModuleNotFoundError: - raise RuntimeError("Unable to import xtgeo; probably not installed.") + raise RuntimeError( + "Unable to import xtgeo; probably not installed." + ) try: return grid_from_file(await self.blob_async) diff --git a/src/fmu/sumo/explorer/objects/cpgrid_property.py b/src/fmu/sumo/explorer/objects/cpgrid_property.py index b146aa94..127eacac 100644 --- a/src/fmu/sumo/explorer/objects/cpgrid_property.py +++ b/src/fmu/sumo/explorer/objects/cpgrid_property.py @@ -1,9 +1,12 @@ """Module containing class for cpgrid_property""" from typing import Dict + from sumo.wrapper import SumoClient + from fmu.sumo.explorer.objects._child import Child + class CPGridProperty(Child): """Class representing a cpgrid_property object in Sumo.""" @@ -24,7 +27,9 @@ def to_cpgrid_property(self): try: from xtgeo import gridproperty_from_file except ModuleNotFoundError: - raise RuntimeError("Unable to import xtgeo; probably not installed.") + raise RuntimeError( + "Unable to import xtgeo; probably not installed." + ) try: return gridproperty_from_file(self.blob) except TypeError as type_err: @@ -38,7 +43,9 @@ async def to_cpgrid_property_async(self): try: from xtgeo import gridproperty_from_file except ModuleNotFoundError: - raise RuntimeError("Unable to import xtgeo; probably not installed.") + raise RuntimeError( + "Unable to import xtgeo; probably not installed." + ) try: return gridproperty_from_file(await self.blob_async) diff --git a/src/fmu/sumo/explorer/objects/cube.py b/src/fmu/sumo/explorer/objects/cube.py index 6eaa73ae..2c41c488 100644 --- a/src/fmu/sumo/explorer/objects/cube.py +++ b/src/fmu/sumo/explorer/objects/cube.py @@ -1,11 +1,10 @@ """Module containing class for cube object""" -import json from typing import Dict + from sumo.wrapper import SumoClient + from fmu.sumo.explorer.objects._child import Child -import sys -import warnings class Cube(Child): @@ -82,7 +81,9 @@ def openvds_handle(self): try: import openvds except ModuleNotFoundError: - raise RuntimeError("Unable to import openvds; probably not installed.") + raise RuntimeError( + "Unable to import openvds; probably not installed." + ) if self._url is None: self._populate_url() @@ -99,7 +100,9 @@ async def openvds_handle_async(self): try: import openvds except ModuleNotFoundError: - raise RuntimeError("Unable to import openvds; probably not installed.") + raise RuntimeError( + "Unable to import openvds; probably not installed." + ) if self._url is None: await self._populate_url_async() diff --git a/src/fmu/sumo/explorer/objects/dictionary.py b/src/fmu/sumo/explorer/objects/dictionary.py index 182b101f..8c1169fe 100644 --- a/src/fmu/sumo/explorer/objects/dictionary.py +++ b/src/fmu/sumo/explorer/objects/dictionary.py @@ -2,7 +2,9 @@ import json from typing import Dict + from sumo.wrapper import SumoClient + from fmu.sumo.explorer.objects._child import Child diff --git a/src/fmu/sumo/explorer/objects/iteration.py b/src/fmu/sumo/explorer/objects/iteration.py index 9b7b0248..3da02f67 100644 --- a/src/fmu/sumo/explorer/objects/iteration.py +++ b/src/fmu/sumo/explorer/objects/iteration.py @@ -1,7 +1,9 @@ -""" Module for (pseudo) iteration class. """ +"""Module for (pseudo) iteration class.""" from typing import Dict + from sumo.wrapper import SumoClient + from fmu.sumo.explorer.objects._document import Document from fmu.sumo.explorer.objects._search_context import SearchContext @@ -20,7 +22,11 @@ class Iteration(Document, SearchContext): def __init__(self, sumo: SumoClient, metadata: Dict): Document.__init__(self, metadata) - SearchContext.__init__(self, sumo, must=[{"term": {"fmu.iteration.uuid.keyword": self.uuid}}]) + SearchContext.__init__( + self, + sumo, + must=[{"term": {"fmu.iteration.uuid.keyword": self.uuid}}], + ) Iteration.map_properties(Iteration, _prop_desc) diff --git a/src/fmu/sumo/explorer/objects/iterations.py b/src/fmu/sumo/explorer/objects/iterations.py index bdee9306..9275c9aa 100644 --- a/src/fmu/sumo/explorer/objects/iterations.py +++ b/src/fmu/sumo/explorer/objects/iterations.py @@ -1,11 +1,15 @@ -""" Module for searchcontext for collection of iterations. """ +"""Module for searchcontext for collection of iterations.""" from typing import Dict, List + from fmu.sumo.explorer.objects._search_context import SearchContext + class Iterations(SearchContext): def __init__(self, sc, uuids): - super().__init__(sc._sumo, must=[{"terms": {"fmu.iteration.uuid.keyword": uuids}}]) + super().__init__( + sc._sumo, must=[{"terms": {"fmu.iteration.uuid.keyword": uuids}}] + ) self._hits = uuids return @@ -14,7 +18,7 @@ def _maybe_prefetch(self, index): async def _maybe_prefetch_async(self, index): return - + def get_object(self, uuid: str, select: List[str] = None) -> Dict: """Get metadata object by uuid @@ -34,7 +38,7 @@ def get_object(self, uuid: str, select: List[str] = None) -> Dict: return obj async def get_object_async( - self, uuid: str, select: List[str] = None + self, uuid: str, select: List[str] = None ) -> Dict: """Get metadata object by uuid diff --git a/src/fmu/sumo/explorer/objects/polygons.py b/src/fmu/sumo/explorer/objects/polygons.py index 68599a18..f8a124ad 100644 --- a/src/fmu/sumo/explorer/objects/polygons.py +++ b/src/fmu/sumo/explorer/objects/polygons.py @@ -1,7 +1,9 @@ """Module containing class for polygons object""" from typing import Dict + from sumo.wrapper import SumoClient + from fmu.sumo.explorer.objects._child import Child @@ -24,6 +26,7 @@ def to_pandas(self): """ import pandas as pd + try: return pd.read_csv(self.blob) except TypeError as type_err: @@ -37,6 +40,7 @@ async def to_pandas_async(self): """ import pandas as pd + try: return pd.read_csv(await self.blob_async) except TypeError as type_err: diff --git a/src/fmu/sumo/explorer/objects/realization.py b/src/fmu/sumo/explorer/objects/realization.py index 3f1227c2..30c01f92 100644 --- a/src/fmu/sumo/explorer/objects/realization.py +++ b/src/fmu/sumo/explorer/objects/realization.py @@ -1,7 +1,9 @@ -""" Module for (pseudo) realization class. """ +"""Module for (pseudo) realization class.""" from typing import Dict + from sumo.wrapper import SumoClient + from fmu.sumo.explorer.objects._document import Document from fmu.sumo.explorer.objects._search_context import SearchContext @@ -21,7 +23,11 @@ class Realization(Document, SearchContext): def __init__(self, sumo: SumoClient, metadata: Dict): Document.__init__(self, metadata) - SearchContext.__init__(self, sumo, must=[{"term": {"fmu.realization.uuid.keyword": self.uuid}}]) + SearchContext.__init__( + self, + sumo, + must=[{"term": {"fmu.realization.uuid.keyword": self.uuid}}], + ) Realization.map_properties(Realization, _prop_desc) diff --git a/src/fmu/sumo/explorer/objects/realizations.py b/src/fmu/sumo/explorer/objects/realizations.py index 405ab644..5574d578 100644 --- a/src/fmu/sumo/explorer/objects/realizations.py +++ b/src/fmu/sumo/explorer/objects/realizations.py @@ -1,11 +1,15 @@ -""" Module for searchcontext for collection of realizations. """ +"""Module for searchcontext for collection of realizations.""" from typing import Dict, List + from fmu.sumo.explorer.objects._search_context import SearchContext + class Realizations(SearchContext): def __init__(self, sc, uuids): - super().__init__(sc._sumo, must=[{"terms": {"fmu.realization.uuid.keyword": uuids}}]) + super().__init__( + sc._sumo, must=[{"terms": {"fmu.realization.uuid.keyword": uuids}}] + ) self._hits = uuids return @@ -14,7 +18,7 @@ def _maybe_prefetch(self, index): async def _maybe_prefetch_async(self, index): return - + def get_object(self, uuid: str, select: List[str] = None) -> Dict: """Get metadata object by uuid diff --git a/src/fmu/sumo/explorer/objects/surface.py b/src/fmu/sumo/explorer/objects/surface.py index f8956ba2..cf47d5b9 100644 --- a/src/fmu/sumo/explorer/objects/surface.py +++ b/src/fmu/sumo/explorer/objects/surface.py @@ -1,7 +1,9 @@ """Module containg class for surface""" from typing import Dict + from sumo.wrapper import SumoClient + from fmu.sumo.explorer.objects._child import Child @@ -26,7 +28,9 @@ def to_regular_surface(self): try: from xtgeo import surface_from_file except ModuleNotFoundError: - raise RuntimeError("Unable to import xtgeo; probably not installed.") + raise RuntimeError( + "Unable to import xtgeo; probably not installed." + ) try: return surface_from_file(self.blob) @@ -42,7 +46,9 @@ async def to_regular_surface_async(self): try: from xtgeo import surface_from_file except ModuleNotFoundError: - raise RuntimeError("Unable to import xtgeo; probably not installed.") + raise RuntimeError( + "Unable to import xtgeo; probably not installed." + ) try: return surface_from_file(await self.blob_async) diff --git a/src/fmu/sumo/explorer/objects/table.py b/src/fmu/sumo/explorer/objects/table.py index 56636dd1..bbc12e60 100644 --- a/src/fmu/sumo/explorer/objects/table.py +++ b/src/fmu/sumo/explorer/objects/table.py @@ -1,9 +1,11 @@ """module containing class for table""" import logging +from typing import Dict + from sumo.wrapper import SumoClient + from fmu.sumo.explorer.objects._child import Child -from typing import Dict class Table(Child): @@ -51,16 +53,16 @@ def _construct_table_from_blob(self, blob): raise TypeError( f"Don't know how to convert a blob of format {self.dataformat} to a pandas table." ) - except Exception as ex0: + except Exception: try: dataframe = pd.read_csv(blob) - except Exception as ex: + except Exception: try: dataframe = pd.read_parquet(blob) - except Exception as ex: + except Exception: try: dataframe = pf.read_feather(blob) - except Exception as ex: + except Exception: raise TypeError( f"Unable to convert a blob of format {self.dataformat} to pandas table; tried csv, parquet and feather." ) @@ -98,8 +100,8 @@ async def _read_arrow_async(self): def _construct_arrow_from_blob(self, blob): import pandas as pd import pyarrow as pa - import pyarrow.parquet as pq import pyarrow.feather as pf + import pyarrow.parquet as pq try: if self.dataformat == "csv": @@ -112,16 +114,16 @@ def _construct_arrow_from_blob(self, blob): raise TypeError( f"Don't know how to convert a blob of format {self.dataformat} to a pandas table." ) - except Exception as ex0: + except Exception: try: arrowtable = pa.Table.from_pandas(pd.read_csv(blob)) - except Exception as ex: + except Exception: try: arrowtable = pq.read_table(blob) - except Exception as ex: + except Exception: try: arrowtable = pf.read_table(blob) - except Exception as ex: + except Exception: raise TypeError( f"Unable to convert a blob of format {self.dataformat} to arrow; tried csv, parquet and feather." ) diff --git a/src/fmu/sumo/explorer/objects/table_aggregated.py b/src/fmu/sumo/explorer/objects/table_aggregated.py index dafd3162..75c6e821 100644 --- a/src/fmu/sumo/explorer/objects/table_aggregated.py +++ b/src/fmu/sumo/explorer/objects/table_aggregated.py @@ -1,7 +1,7 @@ """module containing class for table""" -from fmu.sumo.explorer.objects.case import Case from fmu.sumo.explorer._utils import Utils +from fmu.sumo.explorer.objects.case import Case from fmu.sumo.explorer.objects.table import Table diff --git a/tests/context.py b/tests/context.py index f2efa6c5..6abe7ab6 100644 --- a/tests/context.py +++ b/tests/context.py @@ -1,7 +1,16 @@ """context pytest""" + import sys from pathlib import Path +from fmu.sumo.explorer import Explorer +from fmu.sumo.explorer.objects._document import Document +from fmu.sumo.explorer.objects._search_context import SearchContext +from fmu.sumo.explorer.objects.case import Case +from fmu.sumo.explorer.objects.polygons import Polygons +from fmu.sumo.explorer.objects.surface import Surface +from fmu.sumo.explorer.objects.table import Table + def add_path(): """Way to add package path to sys.path for testing""" @@ -14,11 +23,3 @@ def add_path(): add_path() - -from fmu.sumo.explorer import Explorer -from fmu.sumo.explorer.objects._search_context import SearchContext -from fmu.sumo.explorer.objects._document import Document -from fmu.sumo.explorer.objects.case import Case -from fmu.sumo.explorer.objects.surface import Surface -from fmu.sumo.explorer.objects.polygons import Polygons -from fmu.sumo.explorer.objects.table import Table diff --git a/tests/test_access/tst_access_drogon_affiliate_login.py b/tests/test_access/tst_access_drogon_affiliate_login.py index 859b96be..64f17ff4 100644 --- a/tests/test_access/tst_access_drogon_affiliate_login.py +++ b/tests/test_access/tst_access_drogon_affiliate_login.py @@ -1,11 +1,12 @@ """Test access to SUMO using a DROGON-AFFILIATE login. - Shall only run in Github Actions as a specific user with - specific access rights. Running this test with your personal login - will fail.""" -import os -import sys -import json +Shall only run in Github Actions as a specific user with +specific access rights. Running this test with your personal login +will fail.""" + import inspect +import json +import os + import pytest from context import ( Explorer, @@ -35,7 +36,7 @@ def test_admin_access(explorer: Explorer): with pytest.raises(Exception, match="403*"): print("About to call an admin endpoint which should raise exception") explorer._sumo.get( - f"/admin/make-shared-access-key?user=noreply%40equinor.com&roles=DROGON-READ&duration=111" + "/admin/make-shared-access-key?user=noreply%40equinor.com&roles=DROGON-READ&duration=111" ) print("Execution should never reach this line") @@ -43,13 +44,13 @@ def test_admin_access(explorer: Explorer): def test_get_userpermissions(explorer: Explorer): """Test the userpermissions""" print("Running test:", inspect.currentframe().f_code.co_name) - response = explorer._sumo.get(f"/userpermissions") + response = explorer._sumo.get("/userpermissions") print("/Userpermissions response: ", response.text) userperms = json.loads(response.text) assert "Drogon" in userperms assert "affiliate" in userperms.get("Drogon") - assert 1 == len(userperms.get("Drogon")) - assert 1 == len(userperms) + assert len(userperms.get("Drogon")) == 1 + assert len(userperms) == 1 def test_get_cases(explorer: Explorer): @@ -61,8 +62,8 @@ def test_get_cases(explorer: Explorer): assert case.field.lower() == "drogon" assert len(cases) >= 1 - # We have set up 1 case in KEEP in Drogon DEV - # with affiliate-access and it has 2 children + # We have set up 1 case in KEEP in Drogon DEV + # with affiliate-access and it has 2 children # objects with affiliate access filtered_cases = cases.filter(uuid="2c2f47cf-c7ab-4112-87f9-b4797ec51cb6") assert len(filtered_cases) == 1 @@ -76,7 +77,7 @@ def test_get_cases(explorer: Explorer): assert case.polygons[0].uuid == "a5f38286-5cf6-d85c-9b3c-03c72b5947d5" assert case.surfaces[0].uuid == "5f73b0c1-3bdc-2d0e-1a1d-271331615999" - # Many Drogon cases might have been shared now, who knows. + # Many Drogon cases might have been shared now, who knows. # Ensure that all returned cases have correct metadata: for case in cases: affiliate_roles = case._metadata.get("access").get("affiliate_roles") @@ -89,40 +90,44 @@ def test_get_object(explorer: Explorer): cases = explorer.cases print("Number of cases: ", len(cases)) - # We have set up a KEEP case in Drogon DEV with + # We have set up a KEEP case in Drogon DEV with # objects with affiliate-access # Read one child object child_object_uuid = "a5f38286-5cf6-d85c-9b3c-03c72b5947d5" response = explorer._sumo.get(f"/objects('{child_object_uuid}')") - print ("child retval:", response) + print("child retval:", response) print("child retval.content:", response.content) assert response.status_code == 200 response_json = json.loads(response.text) child_uuid = response_json.get("_id") print("child_uuid returned:", child_uuid) assert child_uuid == child_object_uuid - classification = response_json.get("_source").get("access").get("classification") + classification = ( + response_json.get("_source").get("access").get("classification") + ) assert classification == "internal" - # Read the other child object (which also have + # Read the other child object (which also have # access.classification:restricted) child_object_uuid = "5f73b0c1-3bdc-2d0e-1a1d-271331615999" response = explorer._sumo.get(f"/objects('{child_object_uuid}')") - print ("child retval:", response) + print("child retval:", response) print("child retval.content:", response.content) assert response.status_code == 200 response_json = json.loads(response.text) child_uuid = response_json.get("_id") print("child_uuid returned:", child_uuid) assert child_uuid == child_object_uuid - classification = response_json.get("_source").get("access").get("classification") + classification = ( + response_json.get("_source").get("access").get("classification") + ) assert classification == "restricted" # Read the case object case_object_uuid = "2c2f47cf-c7ab-4112-87f9-b4797ec51cb6" response = explorer._sumo.get(f"/objects('{case_object_uuid}')") - print ("case retval:", response) + print("case retval:", response) print("case retval.content:", response.content) assert response.status_code == 200 response_json = json.loads(response.text) @@ -142,6 +147,7 @@ def test_get_object(explorer: Explorer): # The exact number of shared Drogon cases cannot be known assert len(cases) >= 1 + def test_delete(explorer: Explorer): """Test a delete method""" print("Running test:", inspect.currentframe().f_code.co_name) @@ -188,12 +194,12 @@ def test_write(explorer: Explorer): print("Unexpected response: ", response.text) -def test_read_restricted_classification_data(explorer: Explorer): +def test_read_restricted_classification_data(explorer: Explorer): """Test if can read restricted data aka 'access:classification: restricted'""" print("Running test:", inspect.currentframe().f_code.co_name) # access.classification:restricted is available, - # EVEN for this DROGON-AFFILIATE user + # EVEN for this DROGON-AFFILIATE user # (This differs from DROGON-READ which cannot read restricted) response = explorer._sumo.get( "/search?%24query=access.classification%3Arestricted" @@ -204,6 +210,7 @@ def test_read_restricted_classification_data(explorer: Explorer): print("Hits on restricted:", hits) assert hits >= 1 + # Remove or update this test when bulk aggregation is finalized # @pytest.mark.skipif(not (sys.platform == "linux" and # sys.version_info[:2] == (3, 11)), @@ -235,27 +242,27 @@ def test_aggregations_fast(explorer: Explorer): # Fixed test case ("Drogon_AHM_2023-02-22") in Sumo/DEV # This user has AFFILIATE and can READ, but this case # is not set up with AFFILIATE access, so should fail - TESTCASE_UUID = "10f41041-2c17-4374-a735-bb0de62e29dc" - print("About to trigger fast-aggregation on case", TESTCASE_UUID) - SURFACE_UUID_1 = "ae6cf480-12ba-77ca-848e-92e707556b63" - SURFACE_UUID_2 = "7189835b-cc8a-2a8e-4a34-dde2ceb2a69c" + testcase_uuid = "10f41041-2c17-4374-a735-bb0de62e29dc" + print("About to trigger fast-aggregation on case", testcase_uuid) + surface_uuid_1 = "ae6cf480-12ba-77ca-848e-92e707556b63" + surface_uuid_2 = "7189835b-cc8a-2a8e-4a34-dde2ceb2a69c" body = { "operations": ["min"], - "object_ids": [SURFACE_UUID_1, SURFACE_UUID_2], + "object_ids": [surface_uuid_1, surface_uuid_2], "class": "surface", "iteration_name": "iter-0", } - print("About to trigger fast-aggregation on hardcoded case", TESTCASE_UUID) + print("About to trigger fast-aggregation on hardcoded case", testcase_uuid) print("using body", body) with pytest.raises(Exception, match="40*"): - response = explorer._sumo.post(f"/aggregations", json=body) + response = explorer._sumo.post("/aggregations", json=body) print("Execution should never reach this line") print("Unexpected status: ", response.status_code) print("Unexpected response: ", response.text) # TODO: TBC: Consider setting up a case with affiliate access on all -# surfaces, so we can test successful fast aggregation. Need first +# surfaces, so we can test successful fast aggregation. Need first # a clarification if affiliates are allowed fast aggregation or not diff --git a/tests/test_access/tst_access_drogon_manage_login.py b/tests/test_access/tst_access_drogon_manage_login.py index 81d179e2..e68af9f3 100644 --- a/tests/test_access/tst_access_drogon_manage_login.py +++ b/tests/test_access/tst_access_drogon_manage_login.py @@ -1,12 +1,12 @@ """Test access to SUMO using a DROGON-MANAGE login. - Shall only run in Github Actions as a specific user with - specific access rights. Running this test with your personal login - will fail.""" +Shall only run in Github Actions as a specific user with +specific access rights. Running this test with your personal login +will fail.""" -import os -import sys -import json import inspect +import json +import os + import pytest from context import ( Explorer, @@ -36,7 +36,7 @@ def test_admin_access(explorer: Explorer): with pytest.raises(Exception, match="403*"): print("About to call an admin endpoint which should raise exception") explorer._sumo.get( - f"/admin/make-shared-access-key?user=noreply%40equinor.com&roles=DROGON-READ&duration=111" + "/admin/make-shared-access-key?user=noreply%40equinor.com&roles=DROGON-READ&duration=111" ) print("Execution should never reach this line") @@ -44,13 +44,13 @@ def test_admin_access(explorer: Explorer): def test_get_userpermissions(explorer: Explorer): """Test the userpermissions""" print("Running test:", inspect.currentframe().f_code.co_name) - response = explorer._sumo.get(f"/userpermissions") + response = explorer._sumo.get("/userpermissions") print("/Userpermissions response: ", response.text) userperms = json.loads(response.text) assert "Drogon" in userperms assert "manage" in userperms.get("Drogon") - assert 1 == len(userperms.get("Drogon")) - assert 1 == len(userperms) + assert len(userperms.get("Drogon")) == 1 + assert len(userperms) == 1 def test_get_cases(explorer: Explorer): @@ -72,7 +72,7 @@ def test_write(explorer: Explorer): case = cases[0] print("case uuid:", case.metadata.get("fmu").get("case").get("uuid")) print("About to write to a case") - response = explorer._sumo.post(f"/objects", json=case.metadata) + response = explorer._sumo.post("/objects", json=case.metadata) print(response.status_code) print(response.text) assert response.status_code == 200 @@ -96,6 +96,7 @@ def test_read_restricted_classification_data(explorer: Explorer): print("Hits on restricted:", hits) assert hits > 0 + # Remove or update this test when bulk aggregation is finalized # @pytest.mark.skipif(not (sys.platform == "linux" and # sys.version_info[:2] == (3, 11)), @@ -166,7 +167,7 @@ def test_aggregations_fast(explorer: Explorer): "class": "surface", "iteration_name": case.iterations[0].name, } - response = explorer._sumo.post(f"/aggregations", json=body) + response = explorer._sumo.post("/aggregations", json=body) print("Response status code:", response.status_code) assert response.status_code == 200 print("Length of returned aggregate object:", len(response.text)) diff --git a/tests/test_access/tst_access_drogon_read_login.py b/tests/test_access/tst_access_drogon_read_login.py index c2bfa9e2..dcb37b3c 100644 --- a/tests/test_access/tst_access_drogon_read_login.py +++ b/tests/test_access/tst_access_drogon_read_login.py @@ -1,11 +1,12 @@ """Test access to SUMO using a DROGON-READ login. - Shall only run in Github Actions as a specific user with - specific access rights. Running this test with your personal login - will fail.""" -import os -import sys -import json +Shall only run in Github Actions as a specific user with +specific access rights. Running this test with your personal login +will fail.""" + import inspect +import json +import os + import pytest from context import ( Explorer, @@ -35,7 +36,7 @@ def test_admin_access(explorer: Explorer): with pytest.raises(Exception, match="403*"): print("About to call an admin endpoint which should raise exception") explorer._sumo.get( - f"/admin/make-shared-access-key?user=noreply%40equinor.com&roles=DROGON-READ&duration=111" + "/admin/make-shared-access-key?user=noreply%40equinor.com&roles=DROGON-READ&duration=111" ) print("Execution should never reach this line") @@ -43,13 +44,13 @@ def test_admin_access(explorer: Explorer): def test_get_userpermissions(explorer: Explorer): """Test the userpermissions""" print("Running test:", inspect.currentframe().f_code.co_name) - response = explorer._sumo.get(f"/userpermissions") + response = explorer._sumo.get("/userpermissions") print("/Userpermissions response: ", response.text) userperms = json.loads(response.text) assert "Drogon" in userperms assert "read" in userperms.get("Drogon") - assert 1 == len(userperms.get("Drogon")) - assert 1 == len(userperms) + assert len(userperms.get("Drogon")) == 1 + assert len(userperms) == 1 def test_get_cases(explorer: Explorer): @@ -72,7 +73,7 @@ def test_write(explorer: Explorer): print("case uuid:", case.metadata.get("fmu").get("case").get("uuid")) with pytest.raises(Exception, match="403*"): print("About to write a case which should raise exception") - explorer._sumo.post(f"/objects", json=case.metadata) + explorer._sumo.post("/objects", json=case.metadata) print("Execution should never reach this line") @@ -81,13 +82,17 @@ def test_delete(explorer: Explorer): print("Running test:", inspect.currentframe().f_code.co_name) with pytest.raises(Exception, match="403*"): - res = explorer._sumo.delete(f"/objects('dcff880f-b35b-3598-08bc-2a408c85d204')") + res = explorer._sumo.delete( + "/objects('dcff880f-b35b-3598-08bc-2a408c85d204')" + ) print("Execution should never reach this line") print("Unexpected status: ", res.status_code) print("Unexpected response: ", res.text) with pytest.raises(Exception, match="403*"): - res = explorer._sumo.delete(f"/objects('392c3c70-dd1a-41b5-ac49-0e369a0ac4eb')") + res = explorer._sumo.delete( + "/objects('392c3c70-dd1a-41b5-ac49-0e369a0ac4eb')" + ) print("Execution should never reach this line") print("Unexpected status: ", res.status_code) print("Unexpected response: ", res.text) @@ -121,18 +126,29 @@ def test_aggregations_fast(explorer: Explorer): assert len(cases) > 0 case = None for c in cases: - if (len(c.realizations) > 1 and - len(c.surfaces) > 40 and - len(c.iterations) == 1 and - len(c.surfaces.filter(name="Therys Fm.", tagname="FACIES_Fraction_Calcite")) > 2): + if ( + len(c.realizations) > 1 + and len(c.surfaces) > 40 + and len(c.iterations) == 1 + and len( + c.surfaces.filter( + name="Therys Fm.", tagname="FACIES_Fraction_Calcite" + ) + ) + > 2 + ): case = c break assert case case_uuid = case.metadata.get("fmu").get("case").get("uuid") - surface1 = case.surfaces.filter(name="Therys Fm.", realization=0, tagname="FACIES_Fraction_Calcite") - surface2 = case.surfaces.filter(name="Therys Fm.", realization=1, tagname="FACIES_Fraction_Calcite") - print ("Len filtered: ", len(surface1)) - print ("Len filtered: ", len(surface2)) + surface1 = case.surfaces.filter( + name="Therys Fm.", realization=0, tagname="FACIES_Fraction_Calcite" + ) + surface2 = case.surfaces.filter( + name="Therys Fm.", realization=1, tagname="FACIES_Fraction_Calcite" + ) + print("Len filtered: ", len(surface1)) + print("Len filtered: ", len(surface2)) assert len(surface1) == 1 assert len(surface2) == 1 surface_uuids = [surface1[0].uuid, surface2[0].uuid] @@ -145,11 +161,12 @@ def test_aggregations_fast(explorer: Explorer): print("About to trigger fast-aggregation on case", case_uuid) print("using body", body) # A READ role user shall be allowed to use FAST aggregation (but not bulk aggr) - response = explorer._sumo.post(f"/aggregations", json=body) + response = explorer._sumo.post("/aggregations", json=body) print("Response status code:", response.status_code) assert response.status_code in [200, 201, 202] print("Length of returned aggregate object:", len(response.text)) + # Remove or update this test when bulk aggregation is finalized # @pytest.mark.skipif(not (sys.platform == "linux" and # sys.version_info[:2] == (3, 11)), @@ -225,4 +242,3 @@ def test_get_message_log_truncate(explorer: Explorer): print("Execution should never reach this line") print("Unexpected status: ", response.status_code) print("Unexpected response: ", response.text) - diff --git a/tests/test_access/tst_access_drogon_write_login.py b/tests/test_access/tst_access_drogon_write_login.py index 1378935e..7523f8c3 100644 --- a/tests/test_access/tst_access_drogon_write_login.py +++ b/tests/test_access/tst_access_drogon_write_login.py @@ -1,12 +1,12 @@ """Test access to SUMO using a DROGON-WRITE login. - Shall only run in Github Actions as a specific user with - specific access rights. Running this test with your personal login - will fail.""" +Shall only run in Github Actions as a specific user with +specific access rights. Running this test with your personal login +will fail.""" -import os -import sys -import json import inspect +import json +import os + import pytest from context import ( Explorer, @@ -36,7 +36,7 @@ def test_admin_access(explorer: Explorer): with pytest.raises(Exception, match="403*"): print("About to call an admin endpoint which should raise exception") explorer._sumo.get( - f"/admin/make-shared-access-key?user=noreply%40equinor.com&roles=DROGON-READ&duration=111" + "/admin/make-shared-access-key?user=noreply%40equinor.com&roles=DROGON-READ&duration=111" ) print("Execution should never reach this line") @@ -44,13 +44,13 @@ def test_admin_access(explorer: Explorer): def test_get_userpermissions(explorer: Explorer): """Test the userpermissions""" print("Running test:", inspect.currentframe().f_code.co_name) - response = explorer._sumo.get(f"/userpermissions") + response = explorer._sumo.get("/userpermissions") print("/Userpermissions response: ", response.text) userperms = json.loads(response.text) assert "Drogon" in userperms assert "write" in userperms.get("Drogon") - assert 1 == len(userperms.get("Drogon")) - assert 1 == len(userperms) + assert len(userperms.get("Drogon")) == 1 + assert len(userperms) == 1 def test_get_cases(explorer: Explorer): @@ -72,7 +72,7 @@ def test_write(explorer: Explorer): case = cases[0] print("case uuid:", case.metadata.get("fmu").get("case").get("uuid")) print("About to write to a case") - response = explorer._sumo.post(f"/objects", json=case.metadata) + response = explorer._sumo.post("/objects", json=case.metadata) print(response.status_code) print(response.text) assert response.status_code == 200 @@ -96,6 +96,7 @@ def test_read_restricted_classification_data(explorer: Explorer): print("Hits on restricted:", hits) assert hits > 0 + # Remove or update this test when bulk aggregation is finalized # @pytest.mark.skipif(not (sys.platform == "linux" and # sys.version_info[:2] == (3, 11)), @@ -166,7 +167,7 @@ def test_aggregations_fast(explorer: Explorer): "class": "surface", "iteration_name": case.iterations[0].name, } - response = explorer._sumo.post(f"/aggregations", json=body) + response = explorer._sumo.post("/aggregations", json=body) print("Response status code:", response.status_code) assert response.status_code == 200 print("Length of returned aggregate object:", len(response.text)) diff --git a/tests/test_access/tst_access_no_access_login.py b/tests/test_access/tst_access_no_access_login.py index e4ad75bc..eff07c46 100644 --- a/tests/test_access/tst_access_no_access_login.py +++ b/tests/test_access/tst_access_no_access_login.py @@ -1,12 +1,12 @@ """Test access to SUMO using a no-access login. - Shall only run in Github Actions as a specific user with - specific access rights. Running this test with your personal login - will fail.""" +Shall only run in Github Actions as a specific user with +specific access rights. Running this test with your personal login +will fail.""" -import os -import sys -import json import inspect +import json +import os + import pytest from context import ( Explorer, @@ -36,7 +36,7 @@ def test_admin_access(explorer: Explorer): with pytest.raises(Exception, match="403*"): print("About to call an admin endpoint which should raise exception") explorer._sumo.get( - f"/admin/make-shared-access-key?user=noreply%40equinor.com&roles=DROGON-READ&duration=111" + "/admin/make-shared-access-key?user=noreply%40equinor.com&roles=DROGON-READ&duration=111" ) print("Execution should never reach this line") @@ -44,11 +44,11 @@ def test_admin_access(explorer: Explorer): def test_get_userpermissions(explorer: Explorer): """Test the userpermissions""" print("Running test:", inspect.currentframe().f_code.co_name) - response = explorer._sumo.get(f"/userpermissions") + response = explorer._sumo.get("/userpermissions") print("/Userpermissions response: ", response.text) userperms = json.loads(response.text) assert "Drogon" not in userperms - assert 0 == len(userperms) + assert len(userperms) == 0 def test_get_cases(explorer: Explorer): @@ -78,22 +78,28 @@ def test_write(explorer: Explorer): print("Unexpected status: ", response.status_code) print("Unexpected response: ", response.text) + def test_delete(explorer: Explorer): """Test a delete method""" print("Running test:", inspect.currentframe().f_code.co_name) with pytest.raises(Exception, match="403*"): - res = explorer._sumo.delete(f"/objects('dcff880f-b35b-3598-08bc-2a408c85d204')") + res = explorer._sumo.delete( + "/objects('dcff880f-b35b-3598-08bc-2a408c85d204')" + ) print("Execution should never reach this line") print("Unexpected status: ", res.status_code) print("Unexpected response: ", res.text) with pytest.raises(Exception, match="403*"): - res = explorer._sumo.delete(f"/objects('392c3c70-dd1a-41b5-ac49-0e369a0ac4eb')") + res = explorer._sumo.delete( + "/objects('392c3c70-dd1a-41b5-ac49-0e369a0ac4eb')" + ) print("Execution should never reach this line") print("Unexpected status: ", res.status_code) print("Unexpected response: ", res.text) + def test_read_restricted_classification_data(explorer: Explorer): """Test if can read restriced data aka 'access:classification: restricted'""" print("Running test:", inspect.currentframe().f_code.co_name) @@ -157,6 +163,7 @@ def test_get_message_log_truncate(explorer: Explorer): print("Unexpected status: ", response.status_code) print("Unexpected response: ", response.text) + # Remove or update this test when bulk aggregation is finalized # @pytest.mark.skipif(not (sys.platform == "linux" and # sys.version_info[:2] == (3, 11)), @@ -184,20 +191,20 @@ def test_aggregations_fast(explorer: Explorer): """Test a fast aggregation method""" print("Running test:", inspect.currentframe().f_code.co_name) # Fixed test case ("Drogon_AHM_2023-02-22") in Sumo/DEV - TESTCASE_UUID = "10f41041-2c17-4374-a735-bb0de62e29dc" - print("About to trigger fast-aggregation on case", TESTCASE_UUID) - SURFACE_UUID_1 = "ae6cf480-12ba-77ca-848e-92e707556b63" - SURFACE_UUID_2 = "7189835b-cc8a-2a8e-4a34-dde2ceb2a69c" + testcase_uuid = "10f41041-2c17-4374-a735-bb0de62e29dc" + print("About to trigger fast-aggregation on case", testcase_uuid) + surface_uuid_1 = "ae6cf480-12ba-77ca-848e-92e707556b63" + surface_uuid_2 = "7189835b-cc8a-2a8e-4a34-dde2ceb2a69c" body = { "operations": ["min"], - "object_ids": [SURFACE_UUID_1, SURFACE_UUID_2], + "object_ids": [surface_uuid_1, surface_uuid_2], "class": "surface", "iteration_name": "iter-0", } - print("About to trigger fast-aggregation on hardcoded case", TESTCASE_UUID) + print("About to trigger fast-aggregation on hardcoded case", testcase_uuid) print("using body", body) with pytest.raises(Exception, match="40*"): - response = explorer._sumo.post(f"/aggregations", json=body) + response = explorer._sumo.post("/aggregations", json=body) print("Execution should never reach this line") print("Unexpected status: ", response.status_code) print("Unexpected response: ", response.text) diff --git a/tests/test_explorer.py b/tests/test_explorer.py index 293d6f3d..9057947c 100644 --- a/tests/test_explorer.py +++ b/tests/test_explorer.py @@ -1,24 +1,22 @@ """Tests explorer""" -from platform import python_version import sys if not sys.platform.startswith("darwin") and sys.version_info < (3, 12): import openvds -import logging import json +import logging from pathlib import Path from uuid import UUID + import pytest -from xtgeo import RegularSurface from context import ( + Case, Explorer, SearchContext, - Case, ) - from sumo.wrapper import SumoClient - +from xtgeo import RegularSurface TEST_DATA = Path("data") logging.basicConfig(level="DEBUG") @@ -219,7 +217,6 @@ def test_case_surfaces_filter(test_case: Case): real_surfs = real_surfs.filter(iteration="iter-0") assert len(real_surfs) == 212 - # for surf in real_surfs: # assert surf.iteration == "iter-0" its = real_surfs._get_field_values("fmu.iteration.name.keyword") @@ -239,7 +236,7 @@ def test_case_surfaces_filter(test_case: Case): assert len(its) == 1 and its[0] == "iter-0" names = real_surfs._get_field_values("data.name.keyword") assert len(names) == 1 and names[0] == "Valysar Fm." - + # filter on content non_valid_content_surfs = real_surfs.filter(content="___not_valid") assert len(non_valid_content_surfs) == 0 @@ -264,7 +261,7 @@ def test_case_surfaces_filter(test_case: Case): assert len(names) == 1 and names[0] == "Valysar Fm." tagnames = real_surfs._get_field_values("data.tagname.keyword") assert len(tagnames) == 1 and tagnames[0] == "FACIES_Fraction_Channel" - + # filter on data format non_valid_format_surfs = real_surfs.filter(dataformat="___not_valid") assert len(non_valid_format_surfs) == 0 @@ -317,7 +314,7 @@ def test_seismic_case_by_uuid(explorer: Explorer, seismic_case_uuid: str): cube = case.cubes[0] openvds_handle = cube.openvds_handle - layout = openvds.getLayout(openvds_handle) + layout = openvds.getLayout(openvds_handle) # type: ignore channel_count = layout.getChannelCount() assert channel_count == 3 channel_list = [] @@ -327,26 +324,28 @@ def test_seismic_case_by_uuid(explorer: Explorer, seismic_case_uuid: str): assert "Trace" in channel_list assert "SEGYTraceHeader" in channel_list + def test_grids_and_properties(explorer: Explorer): cases_with_grids = explorer.grids.cases.filter(status="keep") cases_with_gridprops = explorer.grid_properties.cases.filter(status="keep") - cgs=set([case.uuid for case in cases_with_grids]) - cgps=set([case.uuid for case in cases_with_gridprops]) - assert cgs==cgps - case=cases_with_grids[0] - grids=case.grids - gridprops=case.grid_properties - xtgrid=grids[0].to_cpgrid() - gridspec=grids[0].metadata["data"]["spec"] + cgs = {case.uuid for case in cases_with_grids} + cgps = {case.uuid for case in cases_with_gridprops} + assert cgs == cgps + case = cases_with_grids[0] + grids = case.grids + gridprops = case.grid_properties + xtgrid = grids[0].to_cpgrid() + gridspec = grids[0].metadata["data"]["spec"] assert xtgrid.nlay == gridspec["nlay"] assert xtgrid.nrow == gridspec["nrow"] assert xtgrid.ncol == gridspec["ncol"] - xtgridprop=gridprops[0].to_cpgrid_property() + xtgridprop = gridprops[0].to_cpgrid_property() gridpropspec = gridprops[0].metadata["data"]["spec"] assert xtgridprop.nlay == gridpropspec["nlay"] assert xtgridprop.nrow == gridpropspec["nrow"] assert xtgridprop.ncol == gridpropspec["ncol"] + def test_search_context_select(test_case: Case): surfs = test_case.surfaces.filter(realization=True) assert "_sumo" in surfs[0].metadata diff --git a/tests/test_objects_table.py b/tests/test_objects_table.py index a7539481..1638eb9d 100644 --- a/tests/test_objects_table.py +++ b/tests/test_objects_table.py @@ -1,42 +1,49 @@ """Test table objects. - * Table - * AggregatedTable - * TableCollection +* Table +* AggregatedTable +* TableCollection """ + import pandas as pd import pyarrow as pa -from fmu.sumo.explorer import Explorer import pytest +from fmu.sumo.explorer import Explorer + # Fixed test case ("Drogon_AHM_2023-02-22") in Sumo/DEV TESTCASE_UUID = "10f41041-2c17-4374-a735-bb0de62e29dc" + @pytest.fixture(name="explorer") def fixture_explorer(token: str) -> Explorer: """Returns explorer""" return Explorer("dev", token=token) + @pytest.fixture(name="case") def fixture_case(explorer: Explorer): """Return fixed testcase.""" return explorer.get_case_by_uuid(TESTCASE_UUID) + @pytest.fixture(name="table") def fixture_table(case): """Get one table for further testing.""" return case.tables[0] - + + ### Table + def test_table_to_pandas(table): """Test the to_pandas method.""" df = table.to_pandas() assert isinstance(df, pd.DataFrame) + def test_table_to_arrow(table): """Test the to_arrow() method""" arrow = table.to_arrow() assert isinstance(arrow, pa.Table) -