From ba9706d217e3cae32da5b5d6415d6c015dfd8955 Mon Sep 17 00:00:00 2001 From: Nico Tonnhofer Date: Thu, 2 Feb 2023 22:38:16 +0100 Subject: [PATCH 01/10] chore: update github workflow --- .github/workflows/tests.yaml | 83 +++++++++++++++++------------------- requirements/tests | 2 +- tox.ini | 24 +++-------- 3 files changed, 46 insertions(+), 63 deletions(-) diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 6cb3f47..3c05e1f 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -11,39 +11,26 @@ on: jobs: run_tox: - name: tox -e ${{ matrix.toxenv }} (${{matrix.python-version}} on ${{ matrix.os }}) + name: tox run (${{ matrix.python-version }} on ${{ matrix.os }}) runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: - python-version: - - "3.8" - os: - - "ubuntu-20.04" - toxenv: - - "pep8" + python-version: ["3.7", "3.8", "3.9", "3.10", "pypy3.9"] + os: ["ubuntu-22.04"] include: - - python-version: "3.5" - os: ubuntu-20.04 - toxenv: py35 - - python-version: "3.6" - os: ubuntu-20.04 - toxenv: py36 - python-version: "3.7" - os: ubuntu-20.04 - toxenv: py37 - - python-version: "3.8" - os: ubuntu-20.04 - toxenv: py38 - - python-version: "3.9" - os: ubuntu-20.04 - toxenv: py39 - - python-version: "3.10" - os: ubuntu-20.04 - toxenv: py310 - - python-version: pypy3 - os: ubuntu-20.04 - toxenv: pypy3 + os: "ubuntu-22.04" + coverage: true + mypy: true + pep8: true + - python-verson: "3.10" + os: "ubuntu-22.04" + mypy: true + + env: + OS: ${{ matrix.os }} + PYTHON: ${{ matrix.python-version }} # Steps to run in each job. # Some are GitHub actions, others run shell commands. @@ -52,35 +39,41 @@ jobs: uses: actions/checkout@v2 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - - name: Set up Python 3.8 env - if: ${{ matrix.toxenv == 'py35' }} - run: | - sudo apt-get update - sudo apt-get install -y build-essential python3.8 python3.8-dev python3.8-venv - python3.8 -m venv py38-env - - name: Install dependencies run: | - if [ -f py38-env/bin/activate ]; then source py38-env/bin/activate; fi - python -m pip install --upgrade pip - pip install coverage tox + pip install tox python --version pip --version tox --version - coverage --version - - name: Run tests + - name: Setup test suite + run: | + tox run -vv --notest + + - name: Run test suite + run: | + tox run --skip-pkg-install + + - name: Check pep8 + if: matrix.pep8 + run: | + tox run -e pep8 + + - name: Check mypy + if: matrix.mypy run: | - if [ -f py38-env/bin/activate ]; then source py38-env/bin/activate; fi - tox -e ${{ matrix.toxenv }} + tox run -e mypy - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 - if: ${{ matrix.toxenv == 'py38' }} + uses: codecov/codecov-action@v3 + if: matrix.coverage with: - env_vars: PYTHON + env_vars: OS,PYTHON + files: ./coverage.xml + flags: unittests + name: codecov-umbrella fail_ci_if_error: true diff --git a/requirements/tests b/requirements/tests index 7093b61..9955dec 100644 --- a/requirements/tests +++ b/requirements/tests @@ -1,2 +1,2 @@ -coverage pytest +pytest-cov diff --git a/tox.ini b/tox.ini index 777799b..27006d6 100644 --- a/tox.ini +++ b/tox.ini @@ -1,11 +1,12 @@ -[tox] -envlist = pep8, - py38, - coverage-report - [testenv] deps = -r{toxinidir}/requirements/tests -commands = coverage run -m pytest {posargs:tests} +package = editable +passenv = CI +commands = pytest \ + --cov=token_bucket \ + --cov-report=xml \ + --cov-report=term \ + {posargs:tests} # -------------------------------------------------------------------- # Style @@ -26,17 +27,6 @@ deps = flake8 flake8-import-order commands = flake8 {posargs:.} -# -------------------------------------------------------------------- -# Coverage -# -------------------------------------------------------------------- - -[testenv:coverage-report] -skip_install = true -commands = - coverage combine - coverage html -d .coverage_html - coverage report - # -------------------------------------------------------------------- # Documentation # -------------------------------------------------------------------- From 01b1882e1a29f0ab70fa174fe25e1f0218d108e4 Mon Sep 17 00:00:00 2001 From: Nico Tonnhofer Date: Wed, 1 Feb 2023 23:44:03 +0100 Subject: [PATCH 02/10] chore: use hatchling to build the project --- .coveragerc | 8 ------ MANIFEST.in | 4 --- pyproject.toml | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++ setup.cfg | 11 --------- setup.py | 48 ------------------------------------ tox.ini | 1 + 6 files changed, 67 insertions(+), 71 deletions(-) delete mode 100644 .coveragerc delete mode 100644 MANIFEST.in create mode 100644 pyproject.toml delete mode 100644 setup.cfg delete mode 100644 setup.py diff --git a/.coveragerc b/.coveragerc deleted file mode 100644 index 2b836a7..0000000 --- a/.coveragerc +++ /dev/null @@ -1,8 +0,0 @@ -[run] -branch = True -source = token_bucket - -parallel = True - -[report] -show_missing = True diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 31841ca..0000000 --- a/MANIFEST.in +++ /dev/null @@ -1,4 +0,0 @@ -include tests/* -include tools/* -include requirements/test -include tox.ini diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..228acc3 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,66 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +name = "token-bucket" +dynamic = ["version"] +description = "Very fast implementation of the token bucket algorithm." +readme = "README.rst" +license = "Apache-2.0" +requires-python = ">=3.7" +authors = [{ name = "kgriffs", email = "mail@kgriffs.com" }] +keywords = [ + "bucket", + "cloud", + "http", + "https", + "limiting", + "rate", + "throttling", + "token", + "web", +] +classifiers = [ + "Development Status :: 4 - Beta", + "Environment :: Web Environment", + "Intended Audience :: Developers", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: Apache Software License", + "Natural Language :: English", + "Operating System :: MacOS :: MacOS X", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Programming Language :: Python", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", + "Topic :: Internet :: WWW/HTTP", + "Topic :: Software Development :: Libraries", +] +dependencies = [] + +[project.urls] +Homepage = "https://github.com/falconry/token-bucket" + +[tool.hatch.version] +path = "token_bucket/version.py" + +[tool.hatch.build.targets.sdist] +include = ["/token_bucket"] + +[tool.coverage.run] +branch = true +source = ["token_bucket"] +parallel = true + +[tool.coverage.report] +show_missing = true +exclude_lines = [ + "pragma: no cover", + "if __name__ == .__main__.:", + "@(abc\\.)?abstractmethod", +] diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 4629a02..0000000 --- a/setup.cfg +++ /dev/null @@ -1,11 +0,0 @@ -[egg_info] -tag_build = dev1 - -[wheel] -universal = 1 - -[aliases] -test = pytest - -[tool:pytest] -addopts = tests diff --git a/setup.py b/setup.py deleted file mode 100644 index 892fd32..0000000 --- a/setup.py +++ /dev/null @@ -1,48 +0,0 @@ -import imp -import io -from os import path - -from setuptools import find_packages, setup - -VERSION = imp.load_source('version', path.join('.', 'token_bucket', 'version.py')) -VERSION = VERSION.__version__ - - -setup( - name='token_bucket', - version=VERSION, - description='Very fast implementation of the token bucket algorithm.', - long_description=io.open('README.rst', 'r', encoding='utf-8').read(), - classifiers=[ - 'Development Status :: 4 - Beta', - 'Environment :: Web Environment', - 'Natural Language :: English', - 'Intended Audience :: Developers', - 'Intended Audience :: System Administrators', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: MacOS :: MacOS X', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: POSIX', - 'Topic :: Internet :: WWW/HTTP', - 'Topic :: Software Development :: Libraries', - 'Programming Language :: Python', - 'Programming Language :: Python :: Implementation :: CPython', - 'Programming Language :: Python :: Implementation :: PyPy', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - ], - keywords='web http https cloud rate limiting token bucket throttling', - author='kgriffs', - author_email='mail@kgriffs.com', - url='https://github.com/falconry/token-bucket', - license='Apache 2.0', - packages=find_packages(exclude=['tests']), - python_requires='>=3.5', - install_requires=[], - setup_requires=['pytest-runner'], - tests_require=['pytest'], -) diff --git a/tox.ini b/tox.ini index 27006d6..cfb2610 100644 --- a/tox.ini +++ b/tox.ini @@ -6,6 +6,7 @@ commands = pytest \ --cov=token_bucket \ --cov-report=xml \ --cov-report=term \ + --cov-config=pyproject.toml \ {posargs:tests} # -------------------------------------------------------------------- From d6e7f1eb688754ae1c1f244903de7b05c9ebeaf6 Mon Sep 17 00:00:00 2001 From: Nico Tonnhofer Date: Sat, 4 Feb 2023 13:22:56 +0100 Subject: [PATCH 03/10] chore: use black as style --- pyproject.toml | 4 ++ tests/test_limiter.py | 109 +++++++++++++++++++---------------- tests/test_multithreading.py | 33 ++++++----- tests/test_version.py | 2 +- token_bucket/__init__.py | 4 +- token_bucket/limiter.py | 24 ++++---- token_bucket/storage.py | 6 +- token_bucket/version.py | 2 +- 8 files changed, 99 insertions(+), 85 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 228acc3..5ad330f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,3 +64,7 @@ exclude_lines = [ "if __name__ == .__main__.:", "@(abc\\.)?abstractmethod", ] + +[tool.black] +line-length = 88 +target-version = ['py37', 'py38'] diff --git a/tests/test_limiter.py b/tests/test_limiter.py index aabaaed..ee1e342 100644 --- a/tests/test_limiter.py +++ b/tests/test_limiter.py @@ -7,19 +7,22 @@ import token_bucket -@pytest.mark.parametrize('rate,capacity', [ - (0.3, 1), - (1, 1), - (2.5, 1), # Fractional rates are valid - (10, 100), # Long recovery time after bursting - (10, 10), - (10, 1), # Disallow bursting - (100, 100), - (100, 10), - (100, 1), # Disallow bursting -]) +@pytest.mark.parametrize( + "rate,capacity", + [ + (0.3, 1), + (1, 1), + (2.5, 1), # Fractional rates are valid + (10, 100), # Long recovery time after bursting + (10, 10), + (10, 1), # Disallow bursting + (100, 100), + (100, 10), + (100, 1), # Disallow bursting + ], +) def test_general_functionality(rate, capacity): - key = 'key' + key = "key" storage = token_bucket.MemoryStorage() limiter = token_bucket.Limiter(rate, capacity, storage) @@ -72,11 +75,11 @@ def consume_all(): consume_all() -@pytest.mark.parametrize('capacity', [1, 2, 4, 10]) +@pytest.mark.parametrize("capacity", [1, 2, 4, 10]) def test_consume_multiple_tokens_at_a_time(capacity): rate = 100 num_tokens = capacity - key = 'key' + key = "key" storage = token_bucket.MemoryStorage() limiter = token_bucket.Limiter(rate, capacity, storage) @@ -102,10 +105,10 @@ def test_different_keys(): keys = [ uuid.uuid4().bytes, - u'3084"5tj jafsb: f', - b'77752098', - u'whiz:bang', - b'x' + '3084"5tj jafsb: f', + b"77752098", + "whiz:bang", + b"x", ] # The last two should be non-conforming @@ -127,43 +130,49 @@ class DoesNotInheritFromStorageBase(object): token_bucket.Limiter(1, 1, DoesNotInheritFromStorageBase()) -@pytest.mark.parametrize('rate,capacity,etype', [ - (0, 0, ValueError), - (0, 1, ValueError), - (1, 0, ValueError), - (-1, -1, ValueError), - (-1, 0, ValueError), - (0, -1, ValueError), - (-2, -2, ValueError), - (-2, 0, ValueError), - (0, -2, ValueError), - ('x', 'y', TypeError), - ('x', -1, (ValueError, TypeError)), # Params could be checked in any order - (-1, 'y', (ValueError, TypeError)), # ^^^ - ('x', 1, TypeError), - (1, 'y', TypeError), - ('x', None, TypeError), - (None, 'y', TypeError), - (None, None, TypeError), - (None, 1, TypeError), - (1, None, TypeError), -]) +@pytest.mark.parametrize( + "rate,capacity,etype", + [ + (0, 0, ValueError), + (0, 1, ValueError), + (1, 0, ValueError), + (-1, -1, ValueError), + (-1, 0, ValueError), + (0, -1, ValueError), + (-2, -2, ValueError), + (-2, 0, ValueError), + (0, -2, ValueError), + ("x", "y", TypeError), + ("x", -1, (ValueError, TypeError)), # Params could be checked in any order + (-1, "y", (ValueError, TypeError)), # ^^^ + ("x", 1, TypeError), + (1, "y", TypeError), + ("x", None, TypeError), + (None, "y", TypeError), + (None, None, TypeError), + (None, 1, TypeError), + (1, None, TypeError), + ], +) def test_input_validation_rate_and_capacity(rate, capacity, etype): with pytest.raises(etype): token_bucket.Limiter(rate, capacity, token_bucket.MemoryStorage()) -@pytest.mark.parametrize('key,num_tokens,etype', [ - ('', 1, ValueError), - ('', 0, ValueError), - ('x', 0, ValueError), - ('x', -1, ValueError), - ('x', -2, ValueError), - (-1, None, (ValueError, TypeError)), # Params could be checked in any order - (None, -1, (ValueError, TypeError)), # ^^^ - (None, 1, TypeError), - (1, None, TypeError), -]) +@pytest.mark.parametrize( + "key,num_tokens,etype", + [ + ("", 1, ValueError), + ("", 0, ValueError), + ("x", 0, ValueError), + ("x", -1, ValueError), + ("x", -2, ValueError), + (-1, None, (ValueError, TypeError)), # Params could be checked in any order + (None, -1, (ValueError, TypeError)), # ^^^ + (None, 1, TypeError), + (1, None, TypeError), + ], +) def test_input_validation_on_consume(key, num_tokens, etype): limiter = token_bucket.Limiter(1, 1, token_bucket.MemoryStorage()) with pytest.raises(etype): diff --git a/tests/test_multithreading.py b/tests/test_multithreading.py index a6a57dd..19a1759 100644 --- a/tests/test_multithreading.py +++ b/tests/test_multithreading.py @@ -20,20 +20,23 @@ def _run_threaded(func, num_threads): # NOTE(kgriffs): Don't try to remove more tokens than could ever # be available according to the bucket capacity. -@pytest.mark.parametrize('rate,capacity,max_tokens_to_consume', [ - (10, 1, 1), - (100, 1, 1), - (100, 2, 2), - (10, 10, 1), - (10, 10, 2), - (100, 10, 1), - (100, 10, 10), - (100, 100, 5), - (100, 100, 10), - (1000, 10, 1), - (1000, 10, 5), - (1000, 10, 10), -]) +@pytest.mark.parametrize( + "rate,capacity,max_tokens_to_consume", + [ + (10, 1, 1), + (100, 1, 1), + (100, 2, 2), + (10, 10, 1), + (10, 10, 2), + (100, 10, 1), + (100, 10, 10), + (100, 100, 5), + (100, 100, 10), + (1000, 10, 1), + (1000, 10, 5), + (1000, 10, 10), + ], +) def test_negative_count(rate, capacity, max_tokens_to_consume): # NOTE(kgriffs): Usually there will be a much larger number of # keys in a production system, but keep to just five to increase @@ -101,7 +104,7 @@ def loop(): def test_conforming_ratio(): rate = 100 capacity = 10 - key = 'key' + key = "key" target_ratio = 0.5 ratio_max = 0.62 num_threads = 4 diff --git a/tests/test_version.py b/tests/test_version.py index 9ddf448..d33a659 100644 --- a/tests/test_version.py +++ b/tests/test_version.py @@ -6,7 +6,7 @@ def test_version(): assert isinstance(version, str) - numbers = version.split('.') + numbers = version.split(".") assert len(numbers) == 3 for n in numbers: # NOTE(kgriffs): Just check that these are ints by virtue diff --git a/token_bucket/__init__.py b/token_bucket/__init__.py index a68c1c3..5ad4f77 100644 --- a/token_bucket/__init__.py +++ b/token_bucket/__init__.py @@ -5,7 +5,7 @@ # not use this "front-door" module, but rather import using the # fully-qualified paths. -from .version import __version__ # NOQA +from .limiter import Limiter # NOQA from .storage import MemoryStorage # NOQA from .storage_base import StorageBase # NOQA -from .limiter import Limiter # NOQA +from .version import __version__ # NOQA diff --git a/token_bucket/limiter.py b/token_bucket/limiter.py index ea1c631..d10f75a 100644 --- a/token_bucket/limiter.py +++ b/token_bucket/limiter.py @@ -60,26 +60,26 @@ class Limiter(object): """ __slots__ = ( - '_rate', - '_capacity', - '_storage', + "_rate", + "_capacity", + "_storage", ) def __init__(self, rate, capacity, storage): if not isinstance(rate, (float, int)): - raise TypeError('rate must be an int or float') + raise TypeError("rate must be an int or float") if rate <= 0: - raise ValueError('rate must be > 0') + raise ValueError("rate must be > 0") if not isinstance(capacity, int): - raise TypeError('capacity must be an int') + raise TypeError("capacity must be an int") if capacity < 1: - raise ValueError('capacity must be >= 1') + raise ValueError("capacity must be >= 1") if not isinstance(storage, StorageBase): - raise TypeError('storage must be a subclass of StorageBase') + raise TypeError("storage must be a subclass of StorageBase") self._rate = rate self._capacity = capacity @@ -115,15 +115,15 @@ def consume(self, key, num_tokens=1): if not key: if key is None: - raise TypeError('key may not be None') + raise TypeError("key may not be None") - raise ValueError('key must not be a non-empty string or bytestring') + raise ValueError("key must not be a non-empty string or bytestring") if num_tokens is None: - raise TypeError('num_tokens may not be None') + raise TypeError("num_tokens may not be None") if num_tokens < 1: - raise ValueError('num_tokens must be >= 1') + raise ValueError("num_tokens must be >= 1") self._storage.replenish(key, self._rate, self._capacity) return self._storage.consume(key, num_tokens) diff --git a/token_bucket/storage.py b/token_bucket/storage.py index 9a93093..9a20fdf 100644 --- a/token_bucket/storage.py +++ b/token_bucket/storage.py @@ -109,17 +109,15 @@ def replenish(self, key, rate, capacity): # Limit to capacity min( capacity, - # NOTE(kgriffs): The new value is the current number # of tokens in the bucket plus the number of # tokens generated since last time. Fractional # tokens are permitted in order to improve # accuracy (now is a float, and rate may be also). - tokens_in_bucket + (rate * (now - last_replenished_at)) + tokens_in_bucket + (rate * (now - last_replenished_at)), ), - # Update the timestamp for use next time - now + now, ] except KeyError: diff --git a/token_bucket/version.py b/token_bucket/version.py index 9897f79..f0e7803 100644 --- a/token_bucket/version.py +++ b/token_bucket/version.py @@ -1,4 +1,4 @@ """Package version.""" -__version__ = '0.4.0' +__version__ = "0.4.0" """Current version of token_bucket.""" From 15a648e58354308efedc37c48a658d6372f2d174 Mon Sep 17 00:00:00 2001 From: Nico Tonnhofer Date: Sat, 4 Feb 2023 17:02:39 +0100 Subject: [PATCH 04/10] chore: src layout https://packaging.python.org/en/latest/tutorials/packaging-projects/ --- pyproject.toml | 8 ++++---- {token_bucket => src/token_bucket}/__init__.py | 0 {token_bucket => src/token_bucket}/limiter.py | 0 {token_bucket => src/token_bucket}/storage.py | 0 {token_bucket => src/token_bucket}/storage_base.py | 0 {token_bucket => src/token_bucket}/version.py | 0 tox.ini | 2 +- 7 files changed, 5 insertions(+), 5 deletions(-) rename {token_bucket => src/token_bucket}/__init__.py (100%) rename {token_bucket => src/token_bucket}/limiter.py (100%) rename {token_bucket => src/token_bucket}/storage.py (100%) rename {token_bucket => src/token_bucket}/storage_base.py (100%) rename {token_bucket => src/token_bucket}/version.py (100%) diff --git a/pyproject.toml b/pyproject.toml index 5ad330f..ce11767 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,14 +47,14 @@ dependencies = [] Homepage = "https://github.com/falconry/token-bucket" [tool.hatch.version] -path = "token_bucket/version.py" +path = "src/token_bucket/version.py" -[tool.hatch.build.targets.sdist] -include = ["/token_bucket"] +[tool.hatch.build] +source = ["src"] [tool.coverage.run] branch = true -source = ["token_bucket"] +source = ["src"] parallel = true [tool.coverage.report] diff --git a/token_bucket/__init__.py b/src/token_bucket/__init__.py similarity index 100% rename from token_bucket/__init__.py rename to src/token_bucket/__init__.py diff --git a/token_bucket/limiter.py b/src/token_bucket/limiter.py similarity index 100% rename from token_bucket/limiter.py rename to src/token_bucket/limiter.py diff --git a/token_bucket/storage.py b/src/token_bucket/storage.py similarity index 100% rename from token_bucket/storage.py rename to src/token_bucket/storage.py diff --git a/token_bucket/storage_base.py b/src/token_bucket/storage_base.py similarity index 100% rename from token_bucket/storage_base.py rename to src/token_bucket/storage_base.py diff --git a/token_bucket/version.py b/src/token_bucket/version.py similarity index 100% rename from token_bucket/version.py rename to src/token_bucket/version.py diff --git a/tox.ini b/tox.ini index cfb2610..cb48bb2 100644 --- a/tox.ini +++ b/tox.ini @@ -3,7 +3,7 @@ deps = -r{toxinidir}/requirements/tests package = editable passenv = CI commands = pytest \ - --cov=token_bucket \ + --cov=src \ --cov-report=xml \ --cov-report=term \ --cov-config=pyproject.toml \ From 89f8222a3a6ebba9edd2496cc4708802d91a74a8 Mon Sep 17 00:00:00 2001 From: Nico Tonnhofer Date: Sat, 4 Feb 2023 17:08:16 +0100 Subject: [PATCH 05/10] feat: improve typing support --- src/token_bucket/__init__.py | 10 ++++++---- src/token_bucket/limiter.py | 34 +++++++++----------------------- src/token_bucket/py.typed | 0 src/token_bucket/storage.py | 25 ++++++++++++----------- src/token_bucket/storage_base.py | 27 +++++++++++++------------ tox.ini | 9 +++++++++ 6 files changed, 52 insertions(+), 53 deletions(-) create mode 100644 src/token_bucket/py.typed diff --git a/src/token_bucket/__init__.py b/src/token_bucket/__init__.py index 5ad4f77..101e52b 100644 --- a/src/token_bucket/__init__.py +++ b/src/token_bucket/__init__.py @@ -5,7 +5,9 @@ # not use this "front-door" module, but rather import using the # fully-qualified paths. -from .limiter import Limiter # NOQA -from .storage import MemoryStorage # NOQA -from .storage_base import StorageBase # NOQA -from .version import __version__ # NOQA +from .limiter import Limiter +from .storage import MemoryStorage +from .storage_base import StorageBase +from .version import __version__ + +__all__ = ["Limiter", "MemoryStorage", "StorageBase"] diff --git a/src/token_bucket/limiter.py b/src/token_bucket/limiter.py index d10f75a..99f4032 100644 --- a/src/token_bucket/limiter.py +++ b/src/token_bucket/limiter.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .storage_base import StorageBase +from typing import Union +from .storage_base import KeyType, StorageBase -class Limiter(object): + +class Limiter: """Limits demand for a finite resource via keyed token buckets. A limiter manages a set of token buckets that have an identical @@ -65,40 +67,31 @@ class Limiter(object): "_storage", ) - def __init__(self, rate, capacity, storage): - if not isinstance(rate, (float, int)): - raise TypeError("rate must be an int or float") - + def __init__(self, rate: Union[float, int], capacity: int, storage: StorageBase): if rate <= 0: raise ValueError("rate must be > 0") - if not isinstance(capacity, int): - raise TypeError("capacity must be an int") - if capacity < 1: raise ValueError("capacity must be >= 1") - if not isinstance(storage, StorageBase): - raise TypeError("storage must be a subclass of StorageBase") - self._rate = rate self._capacity = capacity self._storage = storage - def consume(self, key, num_tokens=1): + def consume(self, key: KeyType, num_tokens: int = 1) -> bool: """Attempt to take one or more tokens from a bucket. If the specified token bucket does not yet exist, it will be created and initialized to full capacity before proceeding. Args: - key (bytes): A string or bytes object that specifies the + key: A string or bytes object that specifies the token bucket to consume from. If a global limit is desired for all consumers, the same key may be used for every call to consume(). Otherwise, a key based on consumer identity may be used to segregate limits. Keyword Args: - num_tokens (int): The number of tokens to attempt to + num_tokens: The number of tokens to attempt to consume, defaulting to 1 if not specified. It may be appropriate to ask for more than one token according to the proportion of the resource that a given request @@ -106,22 +99,13 @@ def consume(self, key, num_tokens=1): resource. Returns: - bool: True if the requested number of tokens were removed + True if the requested number of tokens were removed from the bucket (conforming), otherwise False (non- conforming). The entire number of tokens requested must be available in the bucket to be conforming. Otherwise, no tokens will be removed (it's all or nothing). """ - if not key: - if key is None: - raise TypeError("key may not be None") - - raise ValueError("key must not be a non-empty string or bytestring") - - if num_tokens is None: - raise TypeError("num_tokens may not be None") - if num_tokens < 1: raise ValueError("num_tokens must be >= 1") diff --git a/src/token_bucket/py.typed b/src/token_bucket/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/src/token_bucket/storage.py b/src/token_bucket/storage.py index 9a20fdf..e6b88ff 100644 --- a/src/token_bucket/storage.py +++ b/src/token_bucket/storage.py @@ -13,8 +13,12 @@ # limitations under the License. import time +from typing import Dict, List -from .storage_base import StorageBase +from .storage_base import KeyType, StorageBase + +TOKEN_POS = 0 +REPLENISH_TIME_POS = 1 class MemoryStorage(StorageBase): @@ -30,29 +34,29 @@ class MemoryStorage(StorageBase): """ def __init__(self): - self._buckets = {} + self._buckets: Dict[KeyType, List[float]] = {} - def get_token_count(self, key): + def get_token_count(self, key: KeyType) -> float: """Query the current token count for the given bucket. Note that the bucket is not replenished first, so the count will be what it was the last time replenish() was called. Args: - key (str): Name of the bucket to query. + key: Name of the bucket to query. Returns: - float: Number of tokens currently in the bucket (may be + Number of tokens currently in the bucket (may be fractional). """ try: - return self._buckets[key][0] + return self._buckets[key][TOKEN_POS] except KeyError: pass return 0 - def replenish(self, key, rate, capacity): + def replenish(self, key: KeyType, rate: float, capacity: int) -> None: """Add tokens to a bucket per the given rate. This method is exposed for use by the token_bucket.Limiter @@ -123,7 +127,7 @@ def replenish(self, key, rate, capacity): except KeyError: self._buckets[key] = [capacity, time.monotonic()] - def consume(self, key, num_tokens): + def consume(self, key: KeyType, num_tokens: int) -> bool: """Attempt to take one or more tokens from a bucket. This method is exposed for use by the token_bucket.Limiter @@ -132,7 +136,7 @@ def consume(self, key, num_tokens): # NOTE(kgriffs): Assume that the key will be present, since # replenish() will always be called before consume(). - tokens_in_bucket = self._buckets[key][0] + tokens_in_bucket = self._buckets[key][TOKEN_POS] if tokens_in_bucket < num_tokens: return False @@ -172,6 +176,5 @@ def consume(self, key, num_tokens): # much contention for the lock during such a short # time window, but we might as well remove the # possibility given the points above. - - self._buckets[key][0] -= num_tokens + self._buckets[key][TOKEN_POS] -= num_tokens return True diff --git a/src/token_bucket/storage_base.py b/src/token_bucket/storage_base.py index e64abdf..80424e3 100644 --- a/src/token_bucket/storage_base.py +++ b/src/token_bucket/storage_base.py @@ -13,28 +13,29 @@ # limitations under the License. import abc +from typing import Union +KeyType = Union[str, bytes] -class StorageBase(object): - __metaclass__ = abc.ABCMeta +class StorageBase(abc.ABC): @abc.abstractmethod - def get_token_count(self, key): + def get_token_count(self, key: KeyType) -> float: """Query the current token count for the given bucket. Note that the bucket is not replenished first, so the count will be what it was the last time replenish() was called. Args: - key (str): Name of the bucket to query. + key: Name of the bucket to query. Returns: - float: Number of tokens currently in the bucket (may be + Number of tokens currently in the bucket (may be fractional). """ @abc.abstractmethod - def replenish(self, key, rate, capacity): + def replenish(self, key: KeyType, rate: float, capacity: int) -> None: """Add tokens to a bucket per the given rate. Conceptually, tokens are added to the bucket at a rate of one @@ -44,28 +45,28 @@ def replenish(self, key, rate, capacity): bucket was replenished. Args: - key (str): Name of the bucket to replenish. - rate (float): Number of tokens per second to add to the + key: Name of the bucket to replenish. + rate: Number of tokens per second to add to the bucket. Over time, the number of tokens that can be consumed is limited by this rate. - capacity (int): Maximum number of tokens that the bucket + capacity: Maximum number of tokens that the bucket can hold. Once the bucket if full, additional tokens are discarded. """ @abc.abstractmethod - def consume(self, key, num_tokens): + def consume(self, key: KeyType, num_tokens: int) -> bool: """Attempt to take one or more tokens from a bucket. Args: - key (str): Name of the bucket to replenish. - num_tokens (int): Number of tokens to try to consume from + key: Name of the bucket to replenish. + num_tokens: Number of tokens to try to consume from the bucket. If the bucket contains fewer than the requested number, no tokens are removed (i.e., it's all or nothing). Returns: - bool: True if the requested number of tokens were removed + True if the requested number of tokens were removed from the bucket (conforming), otherwise False (non- conforming). """ diff --git a/tox.ini b/tox.ini index cb48bb2..b9ba69c 100644 --- a/tox.ini +++ b/tox.ini @@ -9,6 +9,15 @@ commands = pytest \ --cov-config=pyproject.toml \ {posargs:tests} +# -------------------------------------------------------------------- +# Typing +# -------------------------------------------------------------------- + +[testenv:mypy] +skip_install = true +deps = mypy +commands = mypy {posargs:src} + # -------------------------------------------------------------------- # Style # -------------------------------------------------------------------- From 83f11f3cef0560fd5850b73f69039aa1de5d581e Mon Sep 17 00:00:00 2001 From: Nico Tonnhofer Date: Sat, 4 Feb 2023 19:17:20 +0100 Subject: [PATCH 06/10] chore: freeze time for most test In most tests we can freeze time and step through it. This improves the testing experience because the tests run much faster. --- requirements/tests | 1 + tests/test_limiter.py | 107 ++++++++++++++--------------------- tests/test_multithreading.py | 92 +++++++++++++++++------------- 3 files changed, 96 insertions(+), 104 deletions(-) diff --git a/requirements/tests b/requirements/tests index 9955dec..21d4862 100644 --- a/requirements/tests +++ b/requirements/tests @@ -1,2 +1,3 @@ pytest pytest-cov +freezegun diff --git a/tests/test_limiter.py b/tests/test_limiter.py index ee1e342..04b300c 100644 --- a/tests/test_limiter.py +++ b/tests/test_limiter.py @@ -1,14 +1,23 @@ +import datetime import functools -import time import uuid +from typing import Type import pytest +from freezegun import freeze_time +from freezegun.api import FrozenDateTimeFactory import token_bucket +@pytest.fixture +def frozen_time(): + with freeze_time() as ft: + yield ft + + @pytest.mark.parametrize( - "rate,capacity", + ("rate", "capacity"), [ (0.3, 1), (1, 1), @@ -21,7 +30,9 @@ (100, 1), # Disallow bursting ], ) -def test_general_functionality(rate, capacity): +def test_general_functionality( + rate: int, capacity: int, frozen_time: FrozenDateTimeFactory +): key = "key" storage = token_bucket.MemoryStorage() limiter = token_bucket.Limiter(rate, capacity, storage) @@ -30,45 +41,29 @@ def test_general_functionality(rate, capacity): consume_one = functools.partial(limiter.consume, key) - # NOTE(kgriffs) Trigger creation of the bucket and then - # sleep to ensure it is at full capacity before testing it. - consume_one() - time.sleep(float(capacity) / rate) + # NOTE(kgriffs) Trigger creation of the bucket. + storage.replenish(key, rate, capacity) + assert storage.get_token_count(key) == capacity - # NOTE(kgriffs): This works because we can consume at a much - # higher rate relative to the replenishment rate, such that we - # easily consume the total capacity before a single token can - # be replenished. def consume_all(): - for i in range(capacity + 3): + conforming = limiter.consume(key, num_tokens=capacity) + assert conforming + for _ in range(3): conforming = consume_one() - - # NOTE(kgriffs): One past the end should be non-conforming, - # but sometimes an extra token or two can be generated, so - # only check a couple past the end for non-conforming. - if i < capacity: - assert conforming - elif i > capacity + 1: - assert not conforming + assert not conforming # Check non-conforming after consuming all of the tokens consume_all() # Let the bucket replenish 1 token - time.sleep(1.0 / rate) + frozen_time.tick(delta=datetime.timedelta(seconds=(1.5 / rate))) assert consume_one() - - # NOTE(kgriffs): Occasionally enough time will have elapsed to - # cause an additional token to be generated. Clear that one - # out if it is there. - consume_one() - assert storage.get_token_count(key) < 1.0 # NOTE(kgriffs): Let the bucket replenish all the tokens; do this # twice to verify that the bucket is limited to capacity. for __ in range(2): - time.sleep(float(capacity) / rate) + frozen_time.tick(delta=datetime.timedelta(seconds=((capacity + 0.5) / rate))) storage.replenish(key, rate, capacity) assert int(storage.get_token_count(key)) == capacity @@ -76,7 +71,9 @@ def consume_all(): @pytest.mark.parametrize("capacity", [1, 2, 4, 10]) -def test_consume_multiple_tokens_at_a_time(capacity): +def test_consume_multiple_tokens_at_a_time( + capacity: int, frozen_time: FrozenDateTimeFactory +): rate = 100 num_tokens = capacity key = "key" @@ -93,7 +90,9 @@ def test_consume_multiple_tokens_at_a_time(capacity): assert storage.get_token_count(key) < 1.0 # Sleep long enough to generate num_tokens - time.sleep(1.0 / rate * num_tokens) + frozen_time.tick( + delta=datetime.timedelta(seconds=(1.0 / rate * (num_tokens + 0.1))) + ) def test_different_keys(): @@ -112,26 +111,14 @@ def test_different_keys(): ] # The last two should be non-conforming - for i in range(capacity + 2): - for k in keys: - conforming = limiter.consume(k) - - if i < capacity: - assert conforming - else: - assert not conforming - - -def test_input_validation_storage_type(): - class DoesNotInheritFromStorageBase(object): - pass - - with pytest.raises(TypeError): - token_bucket.Limiter(1, 1, DoesNotInheritFromStorageBase()) + for k in keys: + assert limiter.consume(k, capacity) + for _ in range(2): + assert not limiter.consume(k) @pytest.mark.parametrize( - "rate,capacity,etype", + ("rate", "capacity", "etype"), [ (0, 0, ValueError), (0, 1, ValueError), @@ -142,38 +129,26 @@ class DoesNotInheritFromStorageBase(object): (-2, -2, ValueError), (-2, 0, ValueError), (0, -2, ValueError), - ("x", "y", TypeError), - ("x", -1, (ValueError, TypeError)), # Params could be checked in any order - (-1, "y", (ValueError, TypeError)), # ^^^ - ("x", 1, TypeError), - (1, "y", TypeError), - ("x", None, TypeError), - (None, "y", TypeError), - (None, None, TypeError), - (None, 1, TypeError), - (1, None, TypeError), ], ) -def test_input_validation_rate_and_capacity(rate, capacity, etype): +def test_input_validation_rate_and_capacity( + rate: float, capacity: int, etype: Type[Exception] +): with pytest.raises(etype): token_bucket.Limiter(rate, capacity, token_bucket.MemoryStorage()) @pytest.mark.parametrize( - "key,num_tokens,etype", + ("key", "num_tokens", "etype"), [ - ("", 1, ValueError), - ("", 0, ValueError), ("x", 0, ValueError), ("x", -1, ValueError), ("x", -2, ValueError), - (-1, None, (ValueError, TypeError)), # Params could be checked in any order - (None, -1, (ValueError, TypeError)), # ^^^ - (None, 1, TypeError), - (1, None, TypeError), ], ) -def test_input_validation_on_consume(key, num_tokens, etype): +def test_input_validation_on_consume( + key: bytes, num_tokens: int, etype: Type[Exception] +): limiter = token_bucket.Limiter(1, 1, token_bucket.MemoryStorage()) with pytest.raises(etype): limiter.consume(key, num_tokens) diff --git a/tests/test_multithreading.py b/tests/test_multithreading.py index 19a1759..86a25ea 100644 --- a/tests/test_multithreading.py +++ b/tests/test_multithreading.py @@ -1,14 +1,32 @@ +import datetime +import os import random import threading import time import uuid +from collections import Counter +from typing import Any, Callable, List import pytest +from freezegun import freeze_time +from freezegun.api import FrozenDateTimeFactory import token_bucket -def _run_threaded(func, num_threads): +def patched_freeze_time(): + f = freeze_time() + f.ignore = tuple(set(f.ignore) - {"threading"}) # pyright: ignore + return f + + +@pytest.fixture +def frozen_time(): + with patched_freeze_time() as ft: + yield ft + + +def _run_threaded(func: Callable[..., Any], num_threads: int): threads = [threading.Thread(target=func) for __ in range(num_threads)] for t in threads: @@ -20,8 +38,11 @@ def _run_threaded(func, num_threads): # NOTE(kgriffs): Don't try to remove more tokens than could ever # be available according to the bucket capacity. +# Test this only in the CI. It is incredibly slow and so +# unlikely that you may never see it. +@pytest.mark.skipif(os.getenv("CI") != "true", reason="slow test") @pytest.mark.parametrize( - "rate,capacity,max_tokens_to_consume", + ("rate", "capacity", "max_tokens_to_consume"), [ (10, 1, 1), (100, 1, 1), @@ -37,7 +58,11 @@ def _run_threaded(func, num_threads): (1000, 10, 10), ], ) -def test_negative_count(rate, capacity, max_tokens_to_consume): +def test_negative_count( + rate: int, + capacity: int, + max_tokens_to_consume: int, +): # NOTE(kgriffs): Usually there will be a much larger number of # keys in a production system, but keep to just five to increase # the likelihood of collisions. @@ -46,7 +71,7 @@ def test_negative_count(rate, capacity, max_tokens_to_consume): storage = token_bucket.MemoryStorage() limiter = token_bucket.Limiter(rate, capacity, storage) - token_counts = [] + token_counts: List[float] = [] def loop(): for __ in range(1000): @@ -76,7 +101,7 @@ def loop(): assert (max_tokens_to_consume * -2) < min(negative_counts) -def test_replenishment(): +def test_burst_replenishment(frozen_time: FrozenDateTimeFactory): capacity = 100 rate = 100 num_threads = 4 @@ -84,29 +109,28 @@ def test_replenishment(): storage = token_bucket.MemoryStorage() - def loop(): + def consume(): for i in range(trials): - key = str(i) + key = bytes(i) + storage.replenish(key, rate, capacity) - for __ in range(int(capacity / num_threads)): - storage.replenish(key, rate, capacity) - time.sleep(1.0 / rate) - - _run_threaded(loop, num_threads) + for __ in range(capacity // num_threads): + _run_threaded(consume, num_threads) + frozen_time.tick(1.0 / rate) # NOTE(kgriffs): Ensure that a race condition did not result in # not all the tokens being replenished for i in range(trials): - key = str(i) + key = bytes(i) assert storage.get_token_count(key) == capacity -def test_conforming_ratio(): +def test_burst_conforming_ratio(frozen_time: FrozenDateTimeFactory): rate = 100 capacity = 10 - key = "key" + key = b"key" target_ratio = 0.5 - ratio_max = 0.62 + max_ratio = 0.55 num_threads = 4 storage = token_bucket.MemoryStorage() @@ -114,34 +138,26 @@ def test_conforming_ratio(): # NOTE(kgriffs): Rather than using a lock to protect some counters, # rely on the GIL and count things up after the fact. - conforming_states = [] + conforming_states: Counter[bool] = Counter() # NOTE(kgriffs): Start with an empty bucket while limiter.consume(key): pass - def loop(): - # NOTE(kgriffs): Run for 10 seconds - for __ in range(int(rate * 10 / target_ratio / num_threads)): - conforming_states.append(limiter.consume(key)) + def consume(): + conforming_states.update([limiter.consume(key)]) - # NOTE(kgriffs): Only generate some of the tokens needed, so - # that some requests will end up being non-conforming. - time.sleep(1.0 / rate * target_ratio * num_threads) - - _run_threaded(loop, num_threads) + for __ in range(int(rate * 10 / target_ratio / num_threads)): + # NOTE(kgriffs): Only generate some of the tokens needed, so + # that some requests will end up being non-conforming. + sleep_in_seconds = 1.0 / rate * target_ratio * num_threads + frozen_time.tick(delta=datetime.timedelta(seconds=sleep_in_seconds)) - total_conforming = 0 - for c in conforming_states: - if c: - total_conforming += 1 + _run_threaded(consume, num_threads) - actual_ratio = float(total_conforming) / len(conforming_states) + actual_ratio = conforming_states[True] / len(list(conforming_states.elements())) - # NOTE(kgriffs): We don't expect to be super precise due to - # the inprecision of time.sleep() and also having to take into - # account execution time of the other instructions in the - # loop. We do expect a few more conforming states vs. non- - # conforming since the sleep time + overall execution time - # makes the threads run a little behind the replenishment rate. - assert target_ratio < actual_ratio < ratio_max + # NOTE: With a frozen time we should hit exactly. However, due to a tiny gap between + # replenish, frozen_time.tick and consume, it is possible that we have a little bit + # more than expected. You may see this only with PyPy. + assert target_ratio <= actual_ratio < max_ratio From af5d760abc9d291ca62e96612bfa8b7bf442fe8a Mon Sep 17 00:00:00 2001 From: Nico Tonnhofer Date: Tue, 7 Feb 2023 00:21:03 +0100 Subject: [PATCH 07/10] chore: format imports with google style --- src/token_bucket/__init__.py | 2 +- src/token_bucket/limiter.py | 3 ++- src/token_bucket/storage.py | 3 ++- tests/test_limiter.py | 4 ++-- tests/test_multithreading.py | 6 +++--- tox.ini | 1 + 6 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/token_bucket/__init__.py b/src/token_bucket/__init__.py index 101e52b..04b672d 100644 --- a/src/token_bucket/__init__.py +++ b/src/token_bucket/__init__.py @@ -10,4 +10,4 @@ from .storage_base import StorageBase from .version import __version__ -__all__ = ["Limiter", "MemoryStorage", "StorageBase"] +__all__ = ["Limiter", "MemoryStorage", "StorageBase", "__version__"] diff --git a/src/token_bucket/limiter.py b/src/token_bucket/limiter.py index 99f4032..49f058d 100644 --- a/src/token_bucket/limiter.py +++ b/src/token_bucket/limiter.py @@ -14,7 +14,8 @@ from typing import Union -from .storage_base import KeyType, StorageBase +from .storage_base import KeyType +from .storage_base import StorageBase class Limiter: diff --git a/src/token_bucket/storage.py b/src/token_bucket/storage.py index e6b88ff..da9b555 100644 --- a/src/token_bucket/storage.py +++ b/src/token_bucket/storage.py @@ -15,7 +15,8 @@ import time from typing import Dict, List -from .storage_base import KeyType, StorageBase +from .storage_base import KeyType +from .storage_base import StorageBase TOKEN_POS = 0 REPLENISH_TIME_POS = 1 diff --git a/tests/test_limiter.py b/tests/test_limiter.py index 04b300c..1d020c5 100644 --- a/tests/test_limiter.py +++ b/tests/test_limiter.py @@ -1,11 +1,11 @@ import datetime import functools -import uuid from typing import Type +import uuid -import pytest from freezegun import freeze_time from freezegun.api import FrozenDateTimeFactory +import pytest import token_bucket diff --git a/tests/test_multithreading.py b/tests/test_multithreading.py index 86a25ea..46eeb67 100644 --- a/tests/test_multithreading.py +++ b/tests/test_multithreading.py @@ -1,15 +1,15 @@ +from collections import Counter import datetime import os import random import threading import time -import uuid -from collections import Counter from typing import Any, Callable, List +import uuid -import pytest from freezegun import freeze_time from freezegun.api import FrozenDateTimeFactory +import pytest import token_bucket diff --git a/tox.ini b/tox.ini index b9ba69c..0d343ed 100644 --- a/tox.ini +++ b/tox.ini @@ -25,6 +25,7 @@ commands = mypy {posargs:src} [flake8] ; But do please try to stick to 80 unless it makes the code ugly max-line-length = 99 +inline-quotes = double max-complexity = 10 import-order-style = google application-import-names = token_bucket From 073a8e0894c540b2db393feccb1717e14dd8dfc0 Mon Sep 17 00:00:00 2001 From: Nico Tonnhofer Date: Sun, 12 Feb 2023 20:27:21 +0100 Subject: [PATCH 08/10] chore: disable coverage when testing with pypy --- tests/conftest.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 tests/conftest.py diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..397c537 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,20 @@ +import platform + +from pytest import Config + + +def pytest_configure(config: Config): + # When testing with PyPy and coverage, tests become incredible slow and + # could break them. There are several issues reported with other plugins + # too. So you should check carefully if PyPy support it. + # https://github.com/pytest-dev/pytest-cov/issues/418 + # https://github.com/pytest-dev/pytest/issues/7675 + if platform.python_implementation() == "PyPy": + cov = config.pluginmanager.get_plugin("_cov") + + # probably pytest_cov is not installed + if cov: + cov.options.no_cov = True + + if cov.cov_controller: + cov.cov_controller.pause() From 8c6a81389b149f0afca135ef3d3b50140c4e6a18 Mon Sep 17 00:00:00 2001 From: Nico Tonnhofer Date: Sun, 12 Feb 2023 20:29:38 +0100 Subject: [PATCH 09/10] chore: decale CPython 3.11 support --- .github/workflows/tests.yaml | 4 ++-- pyproject.toml | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 3c05e1f..97c0a36 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.7", "3.8", "3.9", "3.10", "pypy3.9"] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11", "pypy3.9"] os: ["ubuntu-22.04"] include: - python-version: "3.7" @@ -24,7 +24,7 @@ jobs: coverage: true mypy: true pep8: true - - python-verson: "3.10" + - python-verson: "3.11" os: "ubuntu-22.04" mypy: true diff --git a/pyproject.toml b/pyproject.toml index ce11767..6e6ad94 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,6 +36,7 @@ classifiers = [ "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Internet :: WWW/HTTP", From 9fbcd9954fd6aaf381e46b57364aa30324d3fb2b Mon Sep 17 00:00:00 2001 From: Nico Tonnhofer Date: Sun, 12 Feb 2023 20:48:47 +0100 Subject: [PATCH 10/10] chore: use hatch to build and publish --- tools/build.sh | 10 +++++----- tools/publish.sh | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/build.sh b/tools/build.sh index 1cabd0a..5b4ce26 100755 --- a/tools/build.sh +++ b/tools/build.sh @@ -33,7 +33,7 @@ _open_env() { pyenv shell $VENV_NAME pip install --upgrade pip - pip install --upgrade wheel twine + pip install --upgrade hatch } # Args: () @@ -77,9 +77,9 @@ pyenv uninstall -f $VENV_NAME #---------------------------------------------------------------------- _echo_task "Building source distribution" -_open_env 2.7.12 +_open_env 3.11.1 -python setup.py sdist -d $DIST_DIR +hatch build -t sdist $DIST_DIR _close_env @@ -88,8 +88,8 @@ _close_env #---------------------------------------------------------------------- _echo_task "Building universal wheel" -_open_env 2.7.12 +_open_env 3.11.1 -python setup.py bdist_wheel -d $DIST_DIR +hatch build -t wheel $DIST_DIR _close_env diff --git a/tools/publish.sh b/tools/publish.sh index 590e1a9..3e1ad06 100755 --- a/tools/publish.sh +++ b/tools/publish.sh @@ -3,5 +3,5 @@ DIST_DIR=./dist read -p "Sign and upload $DIST_DIR/* to PyPI? [y/N]: " CONTINUE if [[ $CONTINUE =~ ^[Yy]$ ]]; then - twine upload -s --skip-existing $DIST_DIR/* + hatch publish $DIST_DIR/* fi