From a5ae6c81a275b692ef5d4c85cfeb37696bf69242 Mon Sep 17 00:00:00 2001 From: Shantanu Singh Date: Fri, 7 Apr 2023 16:29:25 -0400 Subject: [PATCH] New cyto tool: create cell locations file (#257) * Init * cmdline tool * Cleanup * use fire * output is optional * fix sqlite fixture * refactor * files are optional * Typo * cleanup * drop comments * create test fixtures * checks * fix paths * Use fixtures * yield * download file if needed * typo * Update req * use boto3 session * Test s3 locations * skip s3 test * dtypes * Add mike's snipper * use mike's format * tests pass * use method * add alternatives * refactor mike's code * cleanup * better tests * cleanup * overwrite is an option, other cleanup * Update pycytominer/cyto_utils/cell_locations.py Co-authored-by: Gregory Way * add docs * Add deps * Move to setup * add fire to deps * use pathlib * Add pip install .[cell_locations] (+formatting) * Update docs + fix typo in actions * Use as module * Formatting * add cell_locations * Merge in SQL * Update README.md Co-authored-by: Dave Bunten * Update .github/workflows/codecov.yml Co-authored-by: Dave Bunten * Update .github/workflows/python-app.yml Co-authored-by: Dave Bunten * Update pycytominer/tests/test_data/cell_locations_example_data/test_cell_locations.sh Co-authored-by: Dave Bunten * Update pycytominer/tests/test_data/cell_locations_example_data/test_cell_locations.sh Co-authored-by: Dave Bunten * Update pycytominer/tests/test_cyto_utils/test_cell_locations.py Co-authored-by: Dave Bunten * Update pycytominer/cyto_utils/cell_locations.py Co-authored-by: Dave Bunten * Update pycytominer/cyto_utils/cell_locations.py Co-authored-by: Dave Bunten * Update pycytominer/tests/test_cyto_utils/test_cell_locations.py Co-authored-by: Dave Bunten * Update pycytominer/tests/test_cyto_utils/test_cell_locations.py Co-authored-by: Dave Bunten * Address various comment * To address this warning below: pycytominer/tests/test_cyto_utils/test_cell_locations.py::test_shape_and_columns[cell_loc1] /Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/pandas/io/sql.py:1405: RemovedIn20Warning: Deprecated API features detected! These feature(s) are not compatible with SQLAlchemy 2.0. To prevent incompatible upgrades prior to updating applications, ensure requirements files are pinned to "sqlalchemy<2.0". Set environment variable SQLALCHEMY_WARN_20=1 to show all deprecation warnings. Set environment variable SQLALCHEMY_SILENCE_UBER_WARNING=1 to silence this message. (Background on SQLAlchemy 2.0 at: https://sqlalche.me/e/b8d9) return self.connectable.execution_options().execute(*args, **kwargs) * use sqlalchemy * More comments and switch to boto3.client * Be explicit about anon; fix indentation bug * explicit types * Address various comment * Move gitignore entries to the top level * rename files, add docs * Upgrade to python 3.10 * refactor _load_single_cell * fix type * test on highest build version * explain warning * s3 is an attribute * compact check * Update README.md Co-authored-by: Dave Bunten * fix typo + more docs * black cells.py * trim code * black * Skip test * docs * dtypes * skip test * Fix test * Add docs --------- Co-authored-by: Gregory Way Co-authored-by: Dave Bunten --- .github/workflows/codecov.yml | 47 +- .github/workflows/python-app.yml | 38 +- .gitignore | 1 + README.md | 40 +- pycytominer/cyto_utils/cell_locations.py | 440 ++++++++++++++++++ pycytominer/cyto_utils/cell_locations_cmd.py | 5 + pycytominer/cyto_utils/cells.py | 30 +- pycytominer/tests/test_cyto_utils/conftest.py | 139 ++++++ .../test_cyto_utils/test_cell_locations.py | 74 +++ .../tests/test_cyto_utils/test_cells.py | 34 +- .../tests/test_cyto_utils/test_modz.py | 7 +- .../shrink_BR00126114.sh | 70 +++ .../test_BR00126114.sqlite | Bin 0 -> 12288 bytes ...st_BR00126114_load_data_with_illum.parquet | Bin 0 -> 27012 bytes setup.py | 10 +- 15 files changed, 855 insertions(+), 80 deletions(-) create mode 100644 pycytominer/cyto_utils/cell_locations.py create mode 100644 pycytominer/cyto_utils/cell_locations_cmd.py create mode 100644 pycytominer/tests/test_cyto_utils/conftest.py create mode 100644 pycytominer/tests/test_cyto_utils/test_cell_locations.py create mode 100644 pycytominer/tests/test_data/cell_locations_example_data/shrink_BR00126114.sh create mode 100644 pycytominer/tests/test_data/cell_locations_example_data/test_BR00126114.sqlite create mode 100644 pycytominer/tests/test_data/cell_locations_example_data/test_BR00126114_load_data_with_illum.parquet diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml index 9760a0e3..1d38473f 100644 --- a/.github/workflows/codecov.yml +++ b/.github/workflows/codecov.yml @@ -2,33 +2,32 @@ name: Code coverage on: push: - branches: [ master ] + branches: [master] pull_request: - branches: [ master ] + branches: [master] jobs: run: runs-on: ubuntu-latest steps: - - uses: actions/checkout@master - - name: Setup Python - uses: actions/setup-python@master - with: - python-version: 3.7 - - name: Generate coverage report - run: | - pip install pytest - pip install pytest-cov - pip install -r requirements.txt - pip install .[collate] - pytest --cov=./ --cov-report=xml - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 - with: - file: ./coverage.xml - files: ./coverage1.xml,./coverage2.xml - directory: ./coverage/reports/ - flags: unittests - name: codecov-umbrella - fail_ci_if_error: false - path_to_write_report: ./coverage/codecov_report.gz + - uses: actions/checkout@master + - name: Setup Python + uses: actions/setup-python@master + with: + python-version: 3.9 + - name: Generate coverage report + run: | + pip install pytest + pip install pytest-cov + pip install .[collate,cell_locations] + pytest --cov=./ --cov-report=xml + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + with: + file: ./coverage.xml + files: ./coverage1.xml,./coverage2.xml + directory: ./coverage/reports/ + flags: unittests + name: codecov-umbrella + fail_ci_if_error: false + path_to_write_report: ./coverage/codecov_report.gz diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml index 0d769d4d..65b4e941 100644 --- a/.github/workflows/python-app.yml +++ b/.github/workflows/python-app.yml @@ -5,33 +5,35 @@ name: Python build on: push: - branches: [ master ] + branches: [master] pull_request: - branches: [ master ] + branches: [master] jobs: build: - runs-on: ${{ matrix.os }} strategy: matrix: python-version: [3.7, 3.8, 3.9] os: [ubuntu-latest, macos-latest] env: - OS: ${{ matrix.os }} + OS: ${{ matrix.os }} + # This is needed to avoid a warning from SQLAlchemy + # https://sqlalche.me/e/b8d9 + # We can remove this once we upgrade to SQLAlchemy >= 2.0 + SQLALCHEMY_SILENCE_UBER_WARNING: "1" steps: - - uses: actions/checkout@v2 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install pytest - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - pip install .[collate] - - name: Test with pytest - run: | - pytest + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pytest + pip install .[collate,cell_locations] + - name: Test with pytest + run: | + pytest diff --git a/.gitignore b/.gitignore index 3eeeb0e0..6499442c 100644 --- a/.gitignore +++ b/.gitignore @@ -9,4 +9,5 @@ build *.sqlite pycytominer/tests/test_data/collate/backend/**/*.csv !pycytominer/tests/test_data/collate/backend/**/*master.csv +!pycytominer/tests/test_data/cell_locations_example_data/*.sqlite diff --git a/README.md b/README.md index f8a4132b..3db1cef4 100644 --- a/README.md +++ b/README.md @@ -45,11 +45,12 @@ Since the project is actively being developed, with new features added regularly # Example: pip install git+git://github.com/cytomining/pycytominer@2aa8638d7e505ab510f1d5282098dd59bb2cb470 ``` + ### CSV collation If running your images on a cluster, unless you have a MySQL or similar large database set up then you will likely end up with lots of different folders from the different cluster runs (often one per well or one per site), each one containing an `Image.csv`, `Nuclei.csv`, etc. In order to look at full plates, therefore, we first need to collate all of these CSVs into a single file (currently SQLite) per plate. -We currently do this with a library called [cytominer-database](https://github.com/cytomining/cytominer-database). +We currently do this with a library called [cytominer-database](https://github.com/cytomining/cytominer-database). If you want to perform this data collation inside pycytominer using the `cyto_utils` function `collate` (and/or you want to be able to run the tests and have them all pass!), you will need `cytominer-database==0.3.4`; this will change your installation commands slightly: @@ -62,6 +63,43 @@ pip install "pycytominer[collate] @ git+git://github.com/cytomining/pycytominer@ If using `pycytominer` in a conda environment, in order to run `collate.py`, you will also want to make sure to add `cytominer-database=0.3.4` to your list of dependencies. +## Creating a cell locations lookup table + +The `CellLocation` class offers a convenient way to augment a [LoadData](https://cellprofiler-manual.s3.amazonaws.com/CPmanual/LoadData.html) file with X,Y locations of cells in each image. +The locations information is obtained from a single cell SQLite file. + +To use this functionality, you will need to modify your installation command, similar to above: + +```bash +# Example for general case commit: +pip install "pycytominer[cell_locations] @ git+git://github.com/cytomining/pycytominer" +``` + +Example using this functionality: + +```bash +metadata_input="s3://cellpainting-gallery/test-cpg0016-jump/source_4/workspace/load_data_csv/2021_08_23_Batch12/BR00126114/test_BR00126114_load_data_with_illum.parquet" +single_single_cell_input="s3://cellpainting-gallery/test-cpg0016-jump/source_4/workspace/backend/2021_08_23_Batch12/BR00126114/test_BR00126114.sqlite" +augmented_metadata_output="~/Desktop/load_data_with_illum_and_cell_location_subset.parquet" + +python \ + -m pycytominer.cyto_utils.cell_locations_cmd \ + --metadata_input ${metadata_input} \ + --single_cell_input ${single_single_cell_input} \ + --augmented_metadata_output ${augmented_metadata_output} \ + add_cell_location + +# Check the output + +python -c "import pandas as pd; print(pd.read_parquet('${augmented_metadata_output}').head())" + +# It should look something like this (depends on the width of your terminal): + +# Metadata_Plate Metadata_Well Metadata_Site ... PathName_OrigRNA ImageNumber CellCenters +# 0 BR00126114 A01 1 ... s3://cellpainting-gallery/cpg0016-jump/source_... 1 [{'Nuclei_Location_Center_X': 943.512129380054... +# 1 BR00126114 A01 2 ... s3://cellpainting-gallery/cpg0016-jump/source_... 2 [{'Nuclei_Location_Center_X': 29.9516027655562... +``` + ## Usage Using pycytominer is simple and fun. diff --git a/pycytominer/cyto_utils/cell_locations.py b/pycytominer/cyto_utils/cell_locations.py new file mode 100644 index 00000000..755ab7a3 --- /dev/null +++ b/pycytominer/cyto_utils/cell_locations.py @@ -0,0 +1,440 @@ +""" +Utility function to augment a metadata file with X,Y locations of cells in each image +""" + +import pathlib +import pandas as pd +import boto3 +import botocore +import tempfile +import collections +import sqlalchemy +from typing import Union + + +class CellLocation: + """This class holds all the functions augment a metadata file with X,Y + locations of cells in each image. + + In the metadata file, which is either a CSV or a Parquet file, + - Each row is single multi-channel image + - Each image is indexed by multiple columns, e.g., `Metadata_Plate`, `Metadata_Well`,`Metadata_Site` + + The single_cell SQLite file contains at least two tables + - `Nuclei`, which has the single-cell-level readouts, including location information + - `Image`, which has the image-level readouts, as well metadata to link to the metadata file + + In the `Nuclei` table, + - Each row is a cell + - Each cell has at least 3 columns: `Nuclei_Location_Center_X`, `Nuclei_Location_Center_Y`, `ImageNumber` + + In the `Image` table, + - Each row is an image + - Each image has at least the same columns as the images in the metadata file are indexed by, e.g., `Metadata_Plate`,`Metadata_Well`,`Metadata_Site` + + The methods in this class do the following + - Read the metadata file + - Read the single_cell file + - For each image in the metadata file, find the corresponding image in the single_cell file + - For each cell in the corresponding image, find the X,Y location + - Add the X,Y locations of all cells to the metadata file in the corresponding row, packed into a single column + + + Attributes + ---------- + metadata_input : str or Pandas DataFrame + Path to the input metadata file or a Pandas DataFrame + + single_cell_input : str or sqlalchemy.engine.Engine + Path to the single_cell file or a sqlalchemy.engine.Engine object + + augmented_metadata_output : str + Path to the output file. If None, the metadata file is not saved to disk + + image_column : default = 'ImageNumber' + Name of the column in the metadata file that links to the single_cell file + + image_key: default = ['Metadata_Plate', 'Metadata_Well', 'Metadata_Site'] + Names of the columns in the metadata file that uniquely identify each image + + object_column : default = 'ObjectNumber' + Name of the column in the single_cell file that identifies each cell + + cell_x_loc : default = 'Nuclei_Location_Center_X' + Name of the column in the single_cell file that contains the X location of each cell + + cell_y_loc : default = 'Nuclei_Location_Center_Y' + Name of the column in the single_cell file that contains the Y location of each cell + + Methods + ------- + add_cell_location() + Augment the metadata file and optionally save it to a file + + """ + + def __init__( + self, + metadata_input: Union[str, pd.DataFrame], + single_cell_input: Union[str, sqlalchemy.engine.Engine], + augmented_metadata_output: str = None, + overwrite: bool = False, + image_column: str = "ImageNumber", + object_column: str = "ObjectNumber", + image_key: list = ["Metadata_Plate", "Metadata_Well", "Metadata_Site"], + cell_x_loc: str = "Nuclei_Location_Center_X", + cell_y_loc: str = "Nuclei_Location_Center_Y", + ): + self.metadata_input = self._expanduser(metadata_input) + self.augmented_metadata_output = self._expanduser(augmented_metadata_output) + self.single_cell_input = self._expanduser(single_cell_input) + self.overwrite = overwrite + self.image_column = image_column + self.object_column = object_column + self.image_key = image_key + self.cell_x_loc = cell_x_loc + self.cell_y_loc = cell_y_loc + # Currently constrained to only anonymous access for S3 resources + # https://github.com/cytomining/pycytominer/issues/268 + self.s3 = boto3.client( + "s3", config=botocore.config.Config(signature_version=botocore.UNSIGNED) + ) + + def _expanduser(self, obj: Union[str, None]): + """Expand the user home directory in a path""" + if obj is not None and isinstance(obj, str) and not obj.startswith("s3://"): + return pathlib.Path(obj).expanduser().as_posix() + return obj + + def _parse_s3_path(self, s3_path: str): + """Parse an S3 path into a bucket and key + + Parameters + ---------- + s3_path : str + The S3 path + + Returns + ------- + str + The bucket + str + The key + """ + + s3_path = s3_path.replace("s3://", "") + + bucket = s3_path.split("/")[0] + + key = "/".join(s3_path.split("/")[1:]) + + return bucket, key + + def _s3_file_exists(self, s3_path: str): + """Check if a file exists on S3 + + Parameters + ---------- + s3_path : str + The path to the file on S3 + + Returns + ------- + bool + True if the file exists on S3, False otherwise + """ + + bucket, key = self._parse_s3_path(s3_path) + + try: + self.s3.Object(bucket, key).load() + except botocore.exceptions.ClientError as e: + if e.response["Error"]["Code"] == "404": + return False + else: + raise + else: + return True + + def _download_s3(self, uri: str): + """ + Download a file from S3 to a temporary file and return the temporary path + """ + + bucket, key = self._parse_s3_path(uri) + + tmp_file = tempfile.NamedTemporaryFile( + delete=False, suffix=pathlib.Path(key).name + ) + + self.s3.download_file(bucket, key, tmp_file.name) + + return tmp_file.name + + def _load_metadata(self): + """Load the metadata into a Pandas DataFrame + + Returns + ------- + Pandas DataFrame + The metadata loaded into a Pandas DataFrame + """ + + if not isinstance(self.metadata_input, pd.DataFrame): + # verify that the metadata file is a CSV or a Parquet file + + if not ( + self.metadata_input.endswith(".csv") + or self.metadata_input.endswith(".parquet") + ): + raise ValueError("Metadata file must be a CSV or a Parquet file") + + storage_options = ( + {"anon": True} if self.metadata_input.startswith("s3://") else None + ) + + # load the metadata file into a Pandas DataFrame + if self.metadata_input.endswith(".csv"): + df = pd.read_csv( + self.metadata_input, dtype=str, storage_options=storage_options + ) + else: + df = pd.read_parquet( + self.metadata_input, storage_options=storage_options + ) + + # cast all columns to string + df = df.astype(str) + else: + df = self.metadata_input + + # verify that the image index columns are present in the metadata object + + if not all(elem in df.columns for elem in self.image_key): + raise ValueError( + f"Image index columns {self.image_key} are not present in the metadata file" + ) + + return df + + def _create_nested_df(self, df: pd.DataFrame): + """Create a new column `CellCenters` by nesting the X and Y locations of cell from an image into the row of the image + + Parameters + ---------- + df : Pandas DataFrame + The DataFrame to convert + + Returns + ------- + Pandas DataFrame + """ + + # define a dictionary to store the output + output_df_list = collections.defaultdict(list) + + # iterate over each group of cells in the merged DataFrame + group_cols = self.image_key + [self.image_column] + + for group_values, cell_df in df.groupby(group_cols): + # add the image-level information to the output dictionary + for key, value in zip(group_cols, group_values): + output_df_list[key].append(value) + + # convert the cell DataFrame to a dictionary + cell_dict = cell_df.to_dict(orient="list") + + # iterate over each cell in the cell DataFrame + row_cell_dicts = [] + for object_column, cell_x_loc, cell_y_loc in zip( + cell_dict[self.object_column], + cell_dict[self.cell_x_loc], + cell_dict[self.cell_y_loc], + ): + # add the cell information to a dictionary + row_cell_dicts.append( + { + self.object_column: object_column, + self.cell_x_loc: cell_x_loc, + self.cell_y_loc: cell_y_loc, + } + ) + + # add the cell-level information to the output dictionary + output_df_list["CellCenters"].append(row_cell_dicts) + + # convert the output dictionary to a Pandas DataFrame + return pd.DataFrame(output_df_list) + + def _get_single_cell_engine(self): + """ + Get the sqlalchemy.engine.Engine object for the single_cell file + """ + + if isinstance(self.single_cell_input, str): + # check if the single_cell file is a SQLite file + if not self.single_cell_input.endswith(".sqlite"): + raise ValueError("single_cell file must be a SQLite file") + + # if the single_cell file is an S3 path, download it to a temporary file + if self.single_cell_input.startswith("s3://"): + temp_single_cell_input = self._download_s3(self.single_cell_input) + + # connect to the single_cell file + engine = sqlalchemy.create_engine(f"sqlite:///{temp_single_cell_input}") + else: + # connect to the single_cell file + engine = sqlalchemy.create_engine(f"sqlite:///{self.single_cell_input}") + temp_single_cell_input = None + + else: + engine = self.single_cell_input + temp_single_cell_input = None + + return temp_single_cell_input, engine + + def _check_single_cell_correctness(self, engine: sqlalchemy.engine.Engine): + """ + Check that the single_cell file has the required tables and columns + """ + + inspector = sqlalchemy.inspect(engine) + + if not all( + table_name in inspector.get_table_names() + for table_name in ["Image", "Nuclei"] + ): + raise ValueError( + "Image and Nuclei tables are not present in the single_cell file" + ) + + # Verify that the required columns are present in the single_cell file + + nuclei_columns = [column["name"] for column in inspector.get_columns("Nuclei")] + + if not all( + column_name in nuclei_columns + for column_name in [ + self.image_column, + self.object_column, + self.cell_x_loc, + self.cell_y_loc, + ] + ): + raise ValueError( + "Required columns are not present in the Nuclei table in the SQLite file" + ) + + image_columns = [column["name"] for column in inspector.get_columns("Image")] + + if not ( + self.image_column in image_columns + and all(elem in image_columns for elem in self.image_key) + ): + raise ValueError( + "Required columns are not present in the Image table in the SQLite file" + ) + + def _get_joined_image_nuclei_tables(self): + """ + Merge the Image and Nuclei tables in SQL + """ + # get the sqlalchemy.engine.Engine object for the single_cell file + temp_single_cell_input, engine = self._get_single_cell_engine() + + # check that the single_cell file has the required tables and columns + self._check_single_cell_correctness(engine) + + image_index_str = ", ".join(self.image_key) + + # merge the Image and Nuclei tables in SQL + + join_query = f""" + SELECT Nuclei.{self.image_column},Nuclei.{self.object_column},Nuclei.{self.cell_x_loc},Nuclei.{self.cell_y_loc},Image.{image_index_str} + FROM Nuclei + INNER JOIN Image + ON Nuclei.{self.image_column} = Image.{self.image_column}; + """ + + column_types = { + self.image_column: "int64", + self.object_column: "int64", + self.cell_x_loc: "float", + self.cell_y_loc: "float", + } + + for image_key in self.image_key: + column_types[image_key] = "str" + + joined_df = pd.read_sql_query(join_query, engine, dtype=column_types) + + # if the single_cell file was downloaded from S3, delete the temporary file + if temp_single_cell_input is not None: + pathlib.Path(temp_single_cell_input).unlink() + + return joined_df + + def _load_single_cell(self): + """Load the required columns from the `Image` and `Nuclei` tables in the single_cell file or sqlalchemy.engine.Engine object into a Pandas DataFrame + + Returns + ------- + Pandas DataFrame + The required columns from the `Image` and `Nuclei` tables loaded into a Pandas DataFrame + """ + + return self._create_nested_df(self._get_joined_image_nuclei_tables()) + + def add_cell_location(self): + """Add the X,Y locations of all cells to the metadata file in the corresponding row, packed into a single column. + Optionally, save the augmented metadata file as a Parquet file. + + Returns + ------- + Pandas DataFrame + Either a data frame or the path to a Parquet file with the X,Y locations of all cells packed into a single column + """ + + # If self.augmented_metadata_output is not None and it is a str and the file already exists, there is nothing to do + if ( + self.augmented_metadata_output is not None + and isinstance(self.augmented_metadata_output, str) + and self.overwrite is False + and ( + # Check if the file exists on S3 or locally + ( + self.augmented_metadata_output.startswith("s3://") + and self._s3_file_exists(self.augmented_metadata_output) + ) + or ( + not self.augmented_metadata_output.startswith("s3://") + and pathlib.Path(self.augmented_metadata_output).exists() + ) + ) + ): + # TODO: Consider doing a quick difference check should the file already exist. + # For example, if the file already exists and it's different than what could be possibly incoming, should the user know? + # This will involve performing all the steps below and then doing a check to see if the file is different, so this is a bit of a pain. + return self.augmented_metadata_output + + # Load the data + metadata_df = self._load_metadata() + single_cell_df = self._load_single_cell() + + # Merge the data and single_cell tables + augmented_metadata_df = pd.merge( + metadata_df, + single_cell_df, + on=self.image_key, + how="left", + ) + + # If self.augmented_metadata_output is not None, save the data + if self.augmented_metadata_output is not None: + # TODO: switch to https://github.com/cytomining/pycytominer/blob/master/pycytominer/cyto_utils/output.py if we want to support more file types + augmented_metadata_df.to_parquet( + self.augmented_metadata_output, index=False + ) + return self.augmented_metadata_output + else: + return augmented_metadata_df diff --git a/pycytominer/cyto_utils/cell_locations_cmd.py b/pycytominer/cyto_utils/cell_locations_cmd.py new file mode 100644 index 00000000..754449e2 --- /dev/null +++ b/pycytominer/cyto_utils/cell_locations_cmd.py @@ -0,0 +1,5 @@ +from pycytominer.cyto_utils.cell_locations import CellLocation +import fire + +if __name__ == '__main__': + fire.Fire(CellLocation) diff --git a/pycytominer/cyto_utils/cells.py b/pycytominer/cyto_utils/cells.py index cd5c9621..1511f354 100644 --- a/pycytominer/cyto_utils/cells.py +++ b/pycytominer/cyto_utils/cells.py @@ -72,8 +72,8 @@ class SingleCells(object): default_datatype_float: type Numpy floating point datatype to use for load_compartment and resulting dataframes. This parameter may be used to assist with performance-related - issues by reducing the memory required for floating-point data. - For example, using np.float32 instead of np.float64 for this parameter + issues by reducing the memory required for floating-point data. + For example, using np.float32 instead of np.float64 for this parameter will reduce memory consumed by float columns by roughly 50%. Please note: using any besides np.float64 are experimentally unverified. @@ -365,7 +365,6 @@ def subsample_profiles(self, df, rename_col=True): self.set_subsample_random_state(random_state) if self.subsample_frac == 1: - output_df = pd.DataFrame.sample( df, n=self.subsample_n, @@ -537,7 +536,6 @@ def aggregate_compartment( compartment=compartment, n_aggregation_memory_strata=n_aggregation_memory_strata, ): - population_df = self.image_df.merge( compartment_df, how="inner", @@ -636,7 +634,7 @@ def _compartment_df_generator( con=self.conn, ) all_columns = compartment_row1.columns - if self.features != "infer": # allow to get only some features + if self.features != "infer": # allow to get only some features all_columns = [x for x in all_columns if x in self.features] typeof_str = ", ".join([f"typeof({x})" for x in all_columns]) @@ -754,22 +752,12 @@ def merge_single_cells( sc_df, how="left", on=subset_logic_df.columns.tolist() ).reindex(sc_df.columns, axis="columns") - sc_df = sc_df.merge( - self.load_compartment(compartment=right_compartment), - left_on=self.merge_cols + [left_link_col], - right_on=self.merge_cols + [right_link_col], - suffixes=merge_suffix, - ) - - else: - sc_df = sc_df.merge( - self.load_compartment( - compartment=right_compartment - ), - left_on=self.merge_cols + [left_link_col], - right_on=self.merge_cols + [right_link_col], - suffixes=merge_suffix, - ) + sc_df = sc_df.merge( + self.load_compartment(compartment=right_compartment), + left_on=self.merge_cols + [left_link_col], + right_on=self.merge_cols + [right_link_col], + suffixes=merge_suffix, + ) linking_check_cols.append(linking_check) diff --git a/pycytominer/tests/test_cyto_utils/conftest.py b/pycytominer/tests/test_cyto_utils/conftest.py new file mode 100644 index 00000000..a6c78821 --- /dev/null +++ b/pycytominer/tests/test_cyto_utils/conftest.py @@ -0,0 +1,139 @@ +""" +conftest.py for pytest +""" + +import os +import pandas as pd +import pathlib +import pytest +import sqlalchemy +from pycytominer.cyto_utils.cell_locations import CellLocation + + +@pytest.fixture(name="data_dir_cell_locations") +def fixture_data_dir_cell_locations() -> str: + """ + Provide a data directory for cell_locations test data + """ + + return ( + f"{pathlib.Path(__file__).parent.parent}/test_data/cell_locations_example_data" + ) + + +@pytest.fixture(name="metadata_input_file") +def fixture_metadata_input_file(data_dir_cell_locations: str) -> str: + """ + Provide a metadata input file for cell_locations test data + """ + return os.path.join( + data_dir_cell_locations, "test_BR00126114_load_data_with_illum.parquet" + ) + + +@pytest.fixture(name="single_cell_input_file") +def fixture_single_cell_input_file(data_dir_cell_locations: str) -> str: + """ + Provide a single cell input file for cell_locations test data + """ + return os.path.join(data_dir_cell_locations, "test_BR00126114.sqlite") + + +@pytest.fixture(name="metadata_input_file_s3") +def fixture_metadata_input_file_s3() -> str: + """ + Provide a metadata input file for cell_locations test data + """ + return "s3://cellpainting-gallery/test-cpg0016-jump/source_4/workspace/load_data_csv/2021_08_23_Batch12/BR00126114/test_BR00126114_load_data_with_illum.parquet" + + +@pytest.fixture(name="single_cell_input_file_s3") +def fixture_single_cell_input_file_s3() -> str: + """ + Provide a single cell input file for cell_locations test data + """ + return "s3://cellpainting-gallery/test-cpg0016-jump/source_4/workspace/backend/2021_08_23_Batch12/BR00126114/test_BR00126114.sqlite" + + +@pytest.fixture(name="metadata_input_dataframe") +def fixture_metadata_input_dataframe(metadata_input_file: str) -> pd.DataFrame: + """ + Provide a metadata input file for cell_locations test data + """ + return pd.read_parquet(metadata_input_file) + + +@pytest.fixture(name="single_cell_input_engine") +def fixture_single_cell_input_engine( + single_cell_input_file: str, +) -> sqlalchemy.engine.Engine: + """ + Provide a single cell input file for cell_locations test data + """ + return sqlalchemy.create_engine(f"sqlite:///{single_cell_input_file}") + + +@pytest.fixture(name="cell_loc_obj1") +def fixture_cell_loc_obj1( + metadata_input_file: str, + single_cell_input_file: str, +) -> CellLocation: + """ + Provide a CellLocation object with file inputs + """ + return CellLocation( + metadata_input=metadata_input_file, + single_cell_input=single_cell_input_file, + ) + + +@pytest.fixture(name="cell_loc_obj2") +def fixture_cell_loc_obj2( + metadata_input_dataframe: pd.DataFrame, + single_cell_input_engine: sqlalchemy.engine.Engine, +) -> CellLocation: + """ + Provide a CellLocation object with in-memory inputs + """ + return CellLocation( + metadata_input=metadata_input_dataframe, + single_cell_input=single_cell_input_engine, + ) + + +@pytest.fixture(name="cell_loc_obj3") +def fixture_cell_loc_obj3( + metadata_input_file_s3: str, + single_cell_input_file_s3: str, +) -> CellLocation: + """ + Provide a CellLocation object with s3 inputs + """ + return CellLocation( + metadata_input=metadata_input_file_s3, + single_cell_input=single_cell_input_file_s3, + ) + + +@pytest.fixture(name="cell_loc1") +def fixture_cell_loc1(cell_loc_obj1: CellLocation) -> pd.DataFrame: + """ + Provide the output of running CellLocation.add_cell_location + """ + return cell_loc_obj1.add_cell_location() + + +@pytest.fixture(name="cell_loc2") +def fixture_cell_loc2(cell_loc_obj2: CellLocation) -> pd.DataFrame: + """ + Provide the output of running CellLocation.add_cell_location + """ + return cell_loc_obj2.add_cell_location() + + +@pytest.fixture(name="cell_loc3") +def fixture_cell_loc3(cell_loc_obj3: CellLocation) -> pd.DataFrame: + """ + Provide the output of running CellLocation.add_cell_location + """ + return cell_loc_obj3.add_cell_location() diff --git a/pycytominer/tests/test_cyto_utils/test_cell_locations.py b/pycytominer/tests/test_cyto_utils/test_cell_locations.py new file mode 100644 index 00000000..f7102273 --- /dev/null +++ b/pycytominer/tests/test_cyto_utils/test_cell_locations.py @@ -0,0 +1,74 @@ +"""This tests the output from CellLocation class""" + +import pandas as pd +import pytest +import sqlalchemy +from typing import Type +from _pytest.fixtures import FixtureRequest + + +@pytest.mark.parametrize("cell_loc", ["cell_loc1", "cell_loc2", "cell_loc3"]) +def test_output_shape_and_required_columns( + cell_loc: str, + metadata_input_dataframe: pd.DataFrame, + request: Type[FixtureRequest], +): + """ + This tests the shape of the output from CellLocation class and verifies that the required columns are present + """ + + cell_loc = request.getfixturevalue(cell_loc) + + # check the shape of the data + assert cell_loc.shape == ( + metadata_input_dataframe.shape[0], + metadata_input_dataframe.shape[1] + 2, + ) + + # verify that the Nuclear_Location_Center_X and Nuclear_Location_Center_Y columns are present + assert "Nuclei_Location_Center_X" in cell_loc["CellCenters"][0][0].keys() + assert "Nuclei_Location_Center_Y" in cell_loc["CellCenters"][0][0].keys() + + +@pytest.mark.parametrize("cell_loc", ["cell_loc1", "cell_loc2", "cell_loc3"]) +def test_output_value_correctness( + cell_loc: str, + metadata_input_dataframe: pd.DataFrame, + single_cell_input_file: str, + request: Type[FixtureRequest], +): + """ + This tests the correctness of the values in the output from CellLocation class by comparing the values in the output to the values in the input + """ + cell_loc = request.getfixturevalue(cell_loc) + + # if we restrict the columns of cell_loc to the ones in metadata_input_dataframe, we should get the same dataframe + assert ( + cell_loc[metadata_input_dataframe.columns] + .reset_index(drop=True) + .equals(metadata_input_dataframe.reset_index(drop=True)) + ) + + engine = sqlalchemy.create_engine(f"sqlite:///{single_cell_input_file}") + + nuclei_query = "SELECT ImageNumber, ObjectNumber, Nuclei_Location_Center_X, Nuclei_Location_Center_Y FROM Nuclei;" + + nuclei_df = pd.read_sql_query(nuclei_query, engine) + + # get the values in the Nuclear_Location_Center_X and Nuclear_Location_Center_Y columns + # for the rows in nuclei_df that have ImageNumber == 1 + + nuclei_df_row1 = nuclei_df[nuclei_df["ImageNumber"] == "1"] + + observed_x = [x["Nuclei_Location_Center_X"] for x in cell_loc.CellCenters[0]] + observed_y = [x["Nuclei_Location_Center_Y"] for x in cell_loc.CellCenters[0]] + + expected_x = nuclei_df_row1["Nuclei_Location_Center_X"].tolist() + expected_x = [float(x) for x in expected_x] + + expected_y = nuclei_df_row1["Nuclei_Location_Center_Y"].tolist() + expected_y = [float(x) for x in expected_y] + + # verify that the values in the Nuclear_Location_Center_X and Nuclear_Location_Center_Y columns are correct + assert observed_x == expected_x + assert observed_y == expected_y diff --git a/pycytominer/tests/test_cyto_utils/test_cells.py b/pycytominer/tests/test_cyto_utils/test_cells.py index f6461167..ee16bdee 100644 --- a/pycytominer/tests/test_cyto_utils/test_cells.py +++ b/pycytominer/tests/test_cyto_utils/test_cells.py @@ -345,14 +345,16 @@ def test_get_sql_table_col_names(): # Iterate over initialized compartments for compartment in AP.compartments: expected_meta_cols = ["ObjectNumber", "ImageNumber", "TableNumber"] - expected_feat_cols = [f"{compartment.capitalize()}_{i}" for i in ["a", "b", "c", "d"]] - if compartment == 'cytoplasm': - expected_feat_cols += ["Cytoplasm_Parent_Cells","Cytoplasm_Parent_Nuclei"] + expected_feat_cols = [ + f"{compartment.capitalize()}_{i}" for i in ["a", "b", "c", "d"] + ] + if compartment == "cytoplasm": + expected_feat_cols += ["Cytoplasm_Parent_Cells", "Cytoplasm_Parent_Nuclei"] col_name_result = AP.get_sql_table_col_names(table=compartment) - assert sorted(col_name_result) == sorted(expected_feat_cols+expected_meta_cols) - meta_cols, feat_cols = AP.split_column_categories( - col_name_result + assert sorted(col_name_result) == sorted( + expected_feat_cols + expected_meta_cols ) + meta_cols, feat_cols = AP.split_column_categories(col_name_result) assert meta_cols == expected_meta_cols assert feat_cols == expected_feat_cols @@ -406,7 +408,6 @@ def test_merge_single_cells(): for method in ["standardize", "robustize"]: for samples in ["all", "Metadata_ImageNumber == 'x'"]: for features in ["infer", ["Cytoplasm_a", "Cells_a"]]: - norm_method_df = AP.merge_single_cells( single_cell_normalize=True, normalize_args={ @@ -426,6 +427,17 @@ def test_merge_single_cells(): check_dtype=False, ) + +@pytest.mark.skip( + reason="This test will soon fail because of a logic error in merge_single_cells" +) +def test_merge_single_cells_non_canonical(): + # The test raises this warning: + # FutureWarning: Passing 'suffixes' which cause duplicate columns + # {'ObjectNumber_cytoplasm'} in the result is deprecated and will raise a + # MergeError in a future version. + # See https://github.com/cytomining/pycytominer/issues/266 + # Test non-canonical compartment merging new_sc_merge_df = AP_NEW.merge_single_cells() @@ -476,8 +488,8 @@ def test_merge_single_cells(): traditional_norm_df.loc[:, new_compartment_cols].abs().describe(), ) -def test_merge_single_cells_subsample(): +def test_merge_single_cells_subsample(): for subsample_frac in [0.1, 0.5, 0.9]: ap_subsample = SingleCells( sql_file=TMP_SQLITE_FILE, subsample_frac=subsample_frac @@ -704,7 +716,6 @@ def test_aggregate_subsampling_count_cells(): def test_aggregate_subsampling_profile(): - assert isinstance( AP_SUBSAMPLE.aggregate_profiles(compute_subsample=True), pd.DataFrame ) @@ -724,7 +735,6 @@ def test_aggregate_subsampling_profile(): def test_aggregate_subsampling_profile_output(): - expected_result = pd.DataFrame( { "Metadata_Plate": ["plate", "plate"], @@ -768,7 +778,6 @@ def test_aggregate_subsampling_profile_output(): def test_aggregate_subsampling_profile_output_multiple_queries(): - expected_result = pd.DataFrame( { "Metadata_Plate": ["plate", "plate"], @@ -814,7 +823,6 @@ def test_aggregate_subsampling_profile_output_multiple_queries(): def test_n_aggregation_memory_strata(): - df_n1 = AP.aggregate_profiles(n_aggregation_memory_strata=1) df_n2 = AP.aggregate_profiles(n_aggregation_memory_strata=2) df_n3 = AP.aggregate_profiles(n_aggregation_memory_strata=3) @@ -832,7 +840,6 @@ def test_invalid_n_aggregation_memory_strata(): def test_sqlite_strata_conditions(): - df = pd.DataFrame( data={ "TableNumber": [[1], [2], [3], [4]], @@ -1082,4 +1089,3 @@ def test_load_non_canonical_image_table(): result.sort_index(axis="columns").drop("Metadata_Site_Count", axis="columns"), sc_aggregated_df, ) - diff --git a/pycytominer/tests/test_cyto_utils/test_modz.py b/pycytominer/tests/test_cyto_utils/test_modz.py index 8a026467..ca075ed8 100644 --- a/pycytominer/tests/test_cyto_utils/test_modz.py +++ b/pycytominer/tests/test_cyto_utils/test_modz.py @@ -1,5 +1,6 @@ import os import random +import pytest import numpy as np import pandas as pd from pycytominer.cyto_utils import modz @@ -143,7 +144,11 @@ def test_modz_multiple_columns_one_metadata_column(): consensus_df = modz( data_replicate_multi_df, replicate_columns, min_weight=1, precision=precision ) - expected_result = data_replicate_multi_df.groupby(replicate_columns).mean().round(4) + expected_result = ( + data_replicate_multi_df.groupby(replicate_columns) + .mean(numeric_only=True) + .round(4) + ) expected_result.index.name = replicate_columns pd.testing.assert_frame_equal( expected_result.reset_index(), consensus_df, check_exact=False, atol=1e-3 diff --git a/pycytominer/tests/test_data/cell_locations_example_data/shrink_BR00126114.sh b/pycytominer/tests/test_data/cell_locations_example_data/shrink_BR00126114.sh new file mode 100644 index 00000000..8a18202f --- /dev/null +++ b/pycytominer/tests/test_data/cell_locations_example_data/shrink_BR00126114.sh @@ -0,0 +1,70 @@ +#!/bin/bash + +# Create SQLite and LoadData CSV files for testing cell locations +# +# Steps: +# 1. Download SQLite file from S3 +# 2. Download LoadData CSV file from S3 +# 3. Query SQLite to select specific columns of all rows of the `Image` and `Nuclei` table in the SQLite file where `ImageNumber` is 1 or 2. +# 4. Create the SQLite file fixture using the output of the SQL queries +# 5. Create a new LoadData CSV fixture file with only the rows corresponding to the rows in SQLite file fixture + +# Download SQLite file +aws s3 cp s3://cellpainting-gallery/cpg0016-jump/source_4/workspace/backend/2021_08_23_Batch12/BR00126114/BR00126114.sqlite . + +# Download LoadData CSV file +aws s3 cp s3://cellpainting-gallery/cpg0016-jump/source_4/workspace/load_data_csv/2021_08_23_Batch12/BR00126114/load_data_with_illum.parquet . + +# Write a SQL query to select rows of the `Image` table in the SQLite file where `ImageNumber` is 1 or 2. +# Only select the columns: `Metadata_Plate`, `Metadata_Well`, `Metadata_Site`, `ImageNumber` + +sqlite3 -header -csv BR00126114.sqlite "SELECT Metadata_Plate, Metadata_Well, Metadata_Site, ImageNumber FROM Image WHERE ImageNumber = 1 OR ImageNumber = 2;" > image_query.csv + + +# Write a SQL query to select rows of the `Nuclei` table in the SQLite file where `ImageNumber` is 1 or 2. +# Only select the columns: `ImageNumber`, `ObjectNumber`, `Nuclei_Location_Center_X`, `Nuclei_Location_Center_Y` + +sqlite3 -header -csv BR00126114.sqlite "SELECT ImageNumber, ObjectNumber, Nuclei_Location_Center_X, Nuclei_Location_Center_Y FROM Nuclei WHERE ImageNumber = 1 LIMIT 10;" > nuclei_query_1.csv +sqlite3 -header -csv BR00126114.sqlite "SELECT ImageNumber, ObjectNumber, Nuclei_Location_Center_X, Nuclei_Location_Center_Y FROM Nuclei WHERE ImageNumber = 2 LIMIT 10;" > nuclei_query_2.csv + +csvstack nuclei_query_1.csv nuclei_query_2.csv > nuclei_query.csv + +# Create a text file with the following SQL commands: + +cat << EOF > create_tables.sql +.mode csv +.import image_query.csv Image +.import nuclei_query.csv Nuclei +EOF + +cat create_tables.sql + +# run the SQL commands in the text file to create the SQLite file + +sqlite3 test_BR00126114.sqlite < create_tables.sql + +# Print the list of tables in the SQLite file + +sqlite3 test_BR00126114.sqlite ".tables" + +# Print the contents of the `Image` table in the SQLite file + +sqlite3 test_BR00126114.sqlite "SELECT * FROM Image;" + +# Print the contents of the `Nuclei` table in the SQLite file + +sqlite3 test_BR00126114.sqlite "SELECT * FROM Nuclei;" + +cat << EOF > create_parquet.py +import pandas as pd +load_data = pd.read_parquet("load_data_with_illum.parquet") +load_data = load_data.astype({"Metadata_Plate": str, "Metadata_Well": str, "Metadata_Site": str}) +image_query = pd.read_csv("image_query.csv") +image_query = image_query[["Metadata_Plate", "Metadata_Well", "Metadata_Site"]] +image_query = image_query.astype({"Metadata_Plate": str, "Metadata_Well": str, "Metadata_Site": str}) +merged_df = image_query.merge(load_data, on=["Metadata_Plate", "Metadata_Well", "Metadata_Site"]) +merged_df.to_parquet("load_data_with_illum_subset.parquet") +EOF + +python create_parquet.py + diff --git a/pycytominer/tests/test_data/cell_locations_example_data/test_BR00126114.sqlite b/pycytominer/tests/test_data/cell_locations_example_data/test_BR00126114.sqlite new file mode 100644 index 0000000000000000000000000000000000000000..efeabe0204b6b3b443051f0ba7849ba9ad8601ba GIT binary patch literal 12288 zcmeH~zi(qj5XWsNA#lWg_al;Zh)5<9d5J<+J3G5OvsC0xE|9{HyNh&3WjVp1ASaO` z(_9z+3TpUE=#Z|Wprxng4}h=zlqe8EMRCQf?0L`gzHfHsGvi{PJvlg;O$J|`U!0C- z18&`IwcBeS4F;`NYZ>1qe6LmqH*??BYG2*_$91{oMjKrSI=x?7)ftQ}BVYuKfDteP zM!*Od0V7}pjDQg^0{;hr%Xd1f{f7_R-&M2m(RA|Zx5v}TNn@8DKOOEL4hM((j}C@| z&BnR8eP{FI)A83(o*qpuHV22p7l$7VHa|Q1W^z3HhqHNQbZ~w=o}HYZjUG?VW|NE2 zi|e=kMes!<*tuM4ulD=>_T^-e++yE_>HIylg>RnOClG!)o{dMJPsg*#^)}b;=acF5 zw)Yv1^w-{7#qHe0yf~XzKewviU~CxyBVYuKfDtePM!*Od0V7}pjDQg^0VEI{-cP;nduP32@7?Yn z-B;Zox-Yv=y4!1iLTJkf7y%<-1dM_moC~MIAej`+c5D_GCSXfRFvX)GK zVe5O1P*ljJ7`<`|86lX8lY&$jdyA*7yAbZ~g4Sbg95PPh1c$rKnO@O~8~C zDY6I?((;mnibHHoTyiQ1*lPmDgrbQ#`4~$mF+^-knSCfJB+qrX3CNL)826VU+t{Ee1^MAAqiuxu>!l_k77PF$ zVm=rsP$H{SMZK6)jAGg9%!Ru+)+d*WdP3`Qz{f0O6w!cKm`m_dx0`@eBD+KZ-C*@T RMN(9(MG|TaT*}SA>R&`04haAN literal 0 HcmV?d00001 diff --git a/pycytominer/tests/test_data/cell_locations_example_data/test_BR00126114_load_data_with_illum.parquet b/pycytominer/tests/test_data/cell_locations_example_data/test_BR00126114_load_data_with_illum.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9324e8b4b0f8a32bdae9ceb44d5172fd16996174 GIT binary patch literal 27012 zcmeHQYiwKP8Mf2(LRU&#VdIvp%Pcq>YE%0-PKZ0E?f4p-#EG56H^w~QK1Xqph>2kH-mrU_LD?R~%FbMcH)%o$XV0L*<3x9 zP1RG$c|KLo8HS9*#@F@6H}s5~Io7GlvnW_+7}CMVDfrUq;QplHjEMHA=@2~QtSklQ zA$V-U4-K$S4#~jom4HQaJWp_K8=uuPFCSt)9ztMpB~&Q)8%#5Z5Da(xAw39U@xw+z zqqmv#DkO)7nA?YGNEV7s3vC*o(KBxzW^NB75}^Vr3X@?(G{}jI#{fXBvc8tdC25U! zOiVLLzclvw(=i#X~}bq^#y?Z;{UhQ>C0FlxX_7 z-gsNjd~h%G+`Xu2yNcUO+?&qsAh%PUn^{{CG!ryBe{!a+&QL(1^NrKYty5H;yGo?$ zY&T4a?R+k4q_ndq+(u=iwwZ;TrZ&$t3cqKR6+WeIWKWprdrNQpkDmFnfw_4<>f5H; zw&JMTP8l8(wH@J26m4gV^-3FkOkvGh;MP%~NtrZK-Nf_eV>F!PUxXy(yW-&OqH%wylPc{C+k@M+RD=6UymY@Yk1ZUouW>^TMN zeDw%(^%0tNG_`k?zBlWzd$x|IWD7n`y2d)UEwXjw-Uh8B^|69|-oC{AZH#6gP5oWv z@6A5rd$y0JWD7n`y2d_l+hzNx`yR0~7R+u4ZYSN?VFJ zLAA)^HKjvO^iGG@QBxpk_1^|0~f!_4bb%#BHG%^qUwsonz? z?V~?vSoCjqTNa4}uE3(}9P|E<1}qx%MI?%*VbQHwnMJ46eNl5>B9Q2v0Q150fJ6fl z(Ma^)h)kk0Y7$A)H-ScfUtm5C4QMo=5sgOA{Y<8jW?m%Das?LM{0Z}U;v2`Jmxl~5 z4T-bs7xkabJinjQ>(l2h%2VrPQqNo)vpUAC>>@jn9Csw`Q)9EaRBe4NSIU*^ti>-( z(#K9TQVb zc<7B>MmUew`V^9+qls{);L|@s^9?u8CZ0|vi{)%?Gs)*Ra(vR7Op;Rt@9B;Igkw{$ z>6zcXrXSgbDB67S?3WKt=uAgyndMw5Ww7fECmtTs9*TTeH!}h_Ns{_lB0U9<2`W>x zNA;9xniFu!BzfOgJgU|q6_yOjRC!O7l=Xy_iIh`2%4CgPa2sW+Tyt{~-#$@;NxRFh zEGWwq2Qp=x0I+;Ek zu4MXlxRL44`NBKl5Fc{~oRnkwe;kdu6V7}wcfi4_yZSs3b2lGjVeaD77tCFKXn?t! z_qv(ECMj$CI=dK@hy^p)UE6<~4z^|X-=>3jzVLqOe*6C*5d5__hiTpF0S9P}A3k9m zW{w$u^~C);fMYsRO_j5$n(5P#r_DlaI8%YQB+51O<4p(E-iY;rp#Ba$Y9hc%+jmeke9$fm|}{rIYV z2k~NZkECPVyn}QxxmVK3YxJGuOBs5EfV>0W0f3agzw)J*@5K0dD+FqmUZQ}QcsCTy zEWJeG33me_^+;-@OLd?@k9I&0x4MsCdk5DxCQ96K7VZn?CzM%wXot?EL=ZjUq_of0zQ?3y*5-fB2^uT6;_hgd#_eEP)a_Q&W+nhZs5%6Cw1JmnfhMEtOe% zXot?EL=e5;ByF8VS*^7Wn(zw!q#X&YUUW&16>BcCsv%3PwOp!RStH9ZzL;K1t!?2d zth$w2TdOp%vW?tYtyn4JQ^zIiC94@$L*j6)NMkSAFO8e8=u9WLaCk0ySvVwQx;1IR z%U)RGI4%T#@p_!&?3}Z}c{#4YxeDZ&)2T{t;6sm;{x~-$+`}iSg{1pAuE9Y$_{K_{ zb~k*%J;(Vfkl*>Jzr?R6d=Y!h7TicrN9yVFY;msWO#699J{6r>rmgsayq(%b*yKjo3$AiS4@<6KJ5$V(kpe6KvYozLDF0SHeKSO{xI(^HVRN5n~pgrV&mb7j1TMOifLQm zsYEnr71CKg>+?b#;bs3yrI6bS@aa-;BjE$R5R)qqiFo4mIak1ukEpP3WB%xPttjDg z1%e5vC*kv2;}J(F4c~wXah6iiNG;)YtjD7)kLCQUvfLQ!tbu$1Z`rw=E`^sftizg% zIfcCC$a*S@n3jrYk90Xw&rI`5oj2m8Djy#Y3F*;@H69HwXMG;Yj`@WK>efbn7VO&~ zWm{~m)+q+|H7n{`3(q z3w^`XE^I94?;|f3cEyWGpr5>$&y@J`PJM52JWx$oz0*sA-}HkQ1aH_g;oJ$21-yOr zI{|ORzY`9t^7ezjg1uxPqKwDNK84_2jPlm>R^R#wf?;WO7kpq3+}}bl7FX0`f#gH2 zb4cC^83jWppId@Camu3_pQW+!pmwHYP?5SAG2AEtHj3xJdD2^nIgn}aJ}r~iCjdzZ%vv9}P1p}Ypf?%Z*VwN)`7=W9T$j)c4 z<5#P1M;5pMmbZj73uuq+m8o6YLx?rt zni8?NwxPVEW|>#nL#xjM3RLRTjIofu5i~Dg`O2>K5#l;9uOj;JSl+R?H+?X6g8Ix7 zd-yH+j{1;Rr|8qPt$PafnV%o;-aZ~>oJ!5tSu8*9cI=>i=xZjz*F`9QVO66KiR>%* zM)<0Xfr;ZR7r^qCB~5v4`w~{RmY4WS?CF{Z>UZUKkUN5cZtht?MTSE~=|a26H;3h65P-@Gxc@bc`&V#<-wFvIKl`}jxqz>jshdHDaA{s*>C2h{)o literal 0 HcmV?d00001 diff --git a/setup.py b/setup.py index 8d9b44c2..9a21905b 100644 --- a/setup.py +++ b/setup.py @@ -23,7 +23,15 @@ packages=find_packages(), license=ABOUT["__license__"], install_requires=REQUIRED_PKGS, - extras_require={"collate": ["cytominer-database==0.3.4"]}, + extras_require={ + "collate": ["cytominer-database==0.3.4"], + "cell_locations": [ + "fsspec>=2023.1.0", + "s3fs>=0.4.2", + "boto3>=1.26.79", + "fire>=0.5.0", + ], + }, python_requires=">=3.4", include_package_data=True, )