Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Windows support #951

Merged
merged 13 commits into from
Apr 15, 2024
14 changes: 7 additions & 7 deletions .github/workflows/python-package-conda.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ on:
branches: [ master ]

env:
latest_python: "3.11"
supported_pythons: '["3.7", "3.8", "3.9", "3.10", "3.11"]'
latest_python: "3.12"
supported_pythons: '["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]'
miniforge_version: "22.9.0-2"
miniforge_variant: "Mambaforge"

Expand All @@ -34,7 +34,7 @@ jobs:
needs: conf
runs-on: "ubuntu-latest"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: conda-incubator/setup-miniconda@v2
with:
auto-update-conda: true
Expand All @@ -56,7 +56,7 @@ jobs:
needs: ["conf", "lint"]
runs-on: "ubuntu-latest"
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: conda-incubator/setup-miniconda@v2
with:
auto-update-conda: true
Expand All @@ -81,11 +81,11 @@ jobs:
strategy:
fail-fast: true
matrix:
os: ["ubuntu-latest", "macos-latest"]
os: ["ubuntu-latest", "macos-latest", "windows-latest"]
python_version: ${{ fromJSON(needs.conf.outputs.supported_pythons) }}
use_conda: [true, false]
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: conda-incubator/setup-miniconda@v2
with:
auto-update-conda: true
Expand Down Expand Up @@ -115,7 +115,7 @@ jobs:
needs: ["conf", "lint", "doc", "test-all"]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
# setup-buildx-action uses the git context directly
# but checklist wants the .git directory
- name: Set up QEMU
Expand Down
25 changes: 15 additions & 10 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,15 @@ jobs:
name: Build sdist
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4

- name: Build distribution
run: |
export RELEASE_VERSION=${{ github.ref_name }}
pip install numpy cython
pipx run build --sdist

- uses: actions/upload-artifact@v3
- uses: actions/upload-artifact@v4
with:
name: dist-artifacts
path: dist/*.tar.gz
Expand All @@ -27,13 +27,13 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest]
pyver: ["37", "38", "39", "310", "311"]
os: [ubuntu-latest, macos-latest, windows-latest]
pyver: ["37", "38", "39", "310", "311", "312"]

steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v2
uses: actions/setup-python@v5
with:
python-version: 3.9

Expand All @@ -47,7 +47,7 @@ jobs:
- name: Build wheels (py ${{ matrix.pyver }}) Linux
if: matrix.os == 'ubuntu-latest'
env:
CIBW_ARCHS_LINUX: x86_64
CIBW_ARCHS_LINUX: "x86_64 aarch64"
CIBW_SKIP: "*-musllinux*"
CIBW_BUILD: "cp${{ matrix.pyver }}-*"

Expand All @@ -60,10 +60,15 @@ jobs:
CIBW_BUILD: "cp${{ matrix.pyver }}-*"

uses: pypa/[email protected]



- name: Build wheels (py ${{ matrix.pyver }}) Windows
if: matrix.os == 'windows-latest'
env:
CIBW_ARCHS_WINDOWS: "amd64 win32 arm64"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is that 32-bit windows? If so, does the build actually work?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

...this repository does not need to support 32-bit architectures

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe it is 32 bit windows and that it works, although @qiyunzhu wrote this section of the code so he may know more than me.

CIBW_BUILD: "cp${{ matrix.pyver }}-*"

- name: Upload wheels
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: dist-artifacts
path: ./wheelhouse/*.whl
Expand Down
7 changes: 6 additions & 1 deletion biom/tests/test_cli/test_add_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
# -----------------------------------------------------------------------------

import tempfile
import os
from unittest import TestCase, main

import biom
Expand All @@ -20,13 +21,17 @@ class TestAddMetadata(TestCase):
def setUp(self):
"""Set up data for use in unit tests."""
self.cmd = _add_metadata
with tempfile.NamedTemporaryFile('w') as fh:
with tempfile.NamedTemporaryFile('w', delete=False) as fh:
fh.write(biom1)
fh.flush()
self.biom_table1 = biom.load_table(fh.name)
self.temporary_fh_name = fh.name
self.sample_md_lines1 = sample_md1.split('\n')
self.obs_md_lines1 = obs_md1.split('\n')

def tearDown(self):
os.unlink(self.temporary_fh_name)

def test_add_sample_metadata_no_casting(self):
"""Correctly adds sample metadata without casting it."""
# Add a subset of sample metadata to a table that doesn't have any
Expand Down
13 changes: 7 additions & 6 deletions biom/tests/test_cli/test_subset_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,10 @@ def test_invalid_input(self):
def test_subset_samples_hdf5(self):
"""Correctly subsets samples in a hdf5 table"""
cwd = os.getcwd()
if '/' in __file__:
os.chdir(__file__.rsplit('/', 1)[0])
obs = _subset_table(hdf5_biom='test_data/test.biom', axis='sample',
if os.path.sep in __file__:
os.chdir(os.path.dirname(__file__))
obs = _subset_table(hdf5_biom=os.path.join('test_data', 'test.biom'),
axis='sample',
ids=['Sample1', 'Sample2', 'Sample3'],
json_table_str=None)
os.chdir(cwd)
Expand All @@ -71,9 +72,9 @@ def test_subset_samples_hdf5(self):
def test_subset_observations_hdf5(self):
"""Correctly subsets samples in a hdf5 table"""
cwd = os.getcwd()
if '/' in __file__:
os.chdir(__file__.rsplit('/', 1)[0])
obs = _subset_table(hdf5_biom='test_data/test.biom',
if os.path.sep in __file__:
os.chdir(os.path.dirname(__file__))
obs = _subset_table(hdf5_biom=os.path.join('test_data', 'test.biom'),
axis='observation',
ids=['GG_OTU_1', 'GG_OTU_3', 'GG_OTU_5'],
json_table_str=None)
Expand Down
7 changes: 6 additions & 1 deletion biom/tests/test_cli/test_summarize_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,21 @@
from biom.parse import load_table

import tempfile
import os
from unittest import TestCase, main


class TestSummarizeTable(TestCase):

def setUp(self):
with tempfile.NamedTemporaryFile(mode='w') as fh:
with tempfile.NamedTemporaryFile(mode='w', delete=False) as fh:
fh.write(biom1)
fh.flush()
self.biom1 = load_table(fh.name)
self.temporary_fh_name = fh.name

def tearDown(self):
os.unlink(self.temporary_fh_name)

def test_default(self):
""" TableSummarizer functions as expected
Expand Down
11 changes: 9 additions & 2 deletions biom/tests/test_cli/test_table_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------

import os
from os.path import abspath, dirname, join
import tempfile

Expand All @@ -28,16 +29,18 @@ def setUp(self):
self.cmd = _convert
self.output_filepath = tempfile.NamedTemporaryFile().name

with tempfile.NamedTemporaryFile('w') as fh:
with tempfile.NamedTemporaryFile('w', delete=False) as fh:
fh.write(biom1)
fh.flush()
self.biom_table1 = load_table(fh.name)
self.temporary_fh_table_name = fh.name

self.biom_lines1 = biom1.split('\n')
with tempfile.NamedTemporaryFile('w') as fh:
with tempfile.NamedTemporaryFile('w', delete=False) as fh:
fh.write(classic1)
fh.flush()
self.classic_biom1 = load_table(fh.name)
self.temporary_fh_classic_name = fh.name

self.sample_md1 = MetadataMap.from_file(sample_md1.split('\n'))

Expand All @@ -47,6 +50,10 @@ def setUp(self):
self.json_collapsed_samples = join(test_data_dir,
'json_sample_collapsed.biom')

def tearDown(self):
os.unlink(self.temporary_fh_classic_name)
os.unlink(self.temporary_fh_table_name)

def test_classic_to_biom(self):
"""Correctly converts classic to biom."""
self.cmd(table=self.classic_biom1,
Expand Down
6 changes: 3 additions & 3 deletions biom/tests/test_cli/test_table_normalizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@ def setUp(self):
self.cmd = _normalize_table

cwd = os.getcwd()
if '/' in __file__:
os.chdir(__file__.rsplit('/', 1)[0])
self.table = biom.load_table('test_data/test.json')
if os.path.sep in __file__:
os.chdir(os.path.dirname(__file__))
self.table = biom.load_table(os.path.join('test_data', 'test.json'))
os.chdir(cwd)

def test_bad_inputs(self):
Expand Down
3 changes: 2 additions & 1 deletion biom/tests/test_cli/test_validate_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ def setUp(self):
self.to_remove = []

cur_path = os.path.split(os.path.abspath(__file__))[0]
examples_path = os.path.join(cur_path.rsplit('/', 3)[0], 'examples')
examples_path = os.path.join(cur_path.rsplit(os.path.sep, 3)[0],
'examples')
self.hdf5_file_valid = os.path.join(examples_path,
'min_sparse_otu_table_hdf5.biom')
self.hdf5_file_valid_md = os.path.join(examples_path,
Expand Down
42 changes: 25 additions & 17 deletions biom/tests/test_parse.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,19 @@ def setUp(self):
self.legacy_otu_table1 = legacy_otu_table1
self.otu_table1 = otu_table1
self.otu_table1_floats = otu_table1_floats
self.files_to_remove = []
self.to_remove = []
self.biom_minimal_sparse = biom_minimal_sparse

self.classic_otu_table1_w_tax = classic_otu_table1_w_tax.split('\n')
self.classic_otu_table1_no_tax = classic_otu_table1_no_tax.split('\n')
self.classic_table_with_complex_metadata = \
classic_table_with_complex_metadata.split('\n')

def tearDown(self):
if self.to_remove:
for f in self.to_remove:
os.remove(f)

def test_from_tsv_bug_854(self):
data = StringIO('#FeatureID\tSample1')
exp = Table([], [], ['Sample1'])
Expand Down Expand Up @@ -281,38 +286,40 @@ def test_parse_adjacency_table_no_header(self):
def test_parse_biom_table_hdf5(self):
"""Make sure we can parse a HDF5 table through the same loader"""
cwd = os.getcwd()
if '/' in __file__[1:]:
os.chdir(__file__.rsplit('/', 1)[0])
Table.from_hdf5(h5py.File('test_data/test.biom', 'r'))
if os.path.sep in __file__[1:]:
os.chdir(os.path.dirname(__file__))
Table.from_hdf5(h5py.File(os.path.join('test_data', 'test.biom'),
'r'))
os.chdir(cwd)

def test_save_table_filepath(self):
t = Table(np.array([[0, 1, 2], [3, 4, 5]]), ['a', 'b'],
['c', 'd', 'e'])
with NamedTemporaryFile() as tmpfile:
with NamedTemporaryFile(delete=False) as tmpfile:
save_table(t, tmpfile.name)
obs = load_table(tmpfile.name)
self.assertEqual(obs, t)
self.to_remove.append(tmpfile.name)

def test_load_table_filepath(self):
cwd = os.getcwd()
if '/' in __file__[1:]:
os.chdir(__file__.rsplit('/', 1)[0])
load_table('test_data/test.biom')
if os.path.sep in __file__[1:]:
os.chdir(os.path.dirname(__file__))
load_table(os.path.join('test_data', 'test.biom'))
os.chdir(cwd)

def test_load_table_inmemory(self):
cwd = os.getcwd()
if '/' in __file__[1:]:
os.chdir(__file__.rsplit('/', 1)[0])
load_table(h5py.File('test_data/test.biom', 'r'))
if os.path.sep in __file__[1:]:
os.chdir(os.path.dirname(__file__))
load_table(h5py.File(os.path.join('test_data', 'test.biom'), 'r'))
os.chdir(cwd)

def test_load_table_inmemory_json(self):
cwd = os.getcwd()
if '/' in __file__[1:]:
os.chdir(__file__.rsplit('/', 1)[0])
load_table(open('test_data/test.json'))
if os.path.sep in __file__[1:]:
os.chdir(os.path.dirname(__file__))
load_table(open(os.path.join('test_data', 'test.json')))
os.chdir(cwd)

def test_load_table_inmemory_stringio(self):
Expand Down Expand Up @@ -350,10 +357,11 @@ def test_parse_biom_table_with_hdf5(self):
"""tests for parse_biom_table when we have h5py"""
# We will round-trip the HDF5 file to several different formats, and
# make sure we can recover the same table using parse_biom_table
if '/' in __file__[1:]:
os.chdir(__file__.rsplit('/', 1)[0])
if os.path.sep in __file__[1:]:
os.chdir(os.path.dirname(__file__))

t = parse_biom_table(h5py.File('test_data/test.biom', 'r'))
t = parse_biom_table(h5py.File(os.path.join('test_data', 'test.biom'),
'r'))

# These things are not round-trippable using the general-purpose
# parse_biom_table function
Expand Down
Loading
Loading