Skip to content

Commit

Permalink
Updated code quality tooling (#29)
Browse files Browse the repository at this point in the history
* Ran 'pre-commit autoupdate'

* Ran 'uv lock --upgrade'

* Updated pytest disables

* Regenerated cassettes so tests pass

* Removed dead test cassettes

* Allowing latest Python 3.13 in CI since pylint fixed their issue

* Propagated fetch-depth comment in CI

* Modernized codeflash CI and fixed pull_request path

* Fixed codeflash formatter-cmds

* Added Renovate config with validation in CI

* Removed pytest-mock dependency

* Added mutable-override to mypy

* Added doctesting to pytest config

* Modernized refurb config for ruff port

* Added blacken-docs and removed extra exclude for codespell

* Updated ruff config and all fixes for latest ruff
  • Loading branch information
jamesbraza authored Jan 8, 2025
1 parent 988335f commit 25c76c8
Show file tree
Hide file tree
Showing 27 changed files with 874 additions and 2,364 deletions.
28 changes: 28 additions & 0 deletions .github/renovate.json5
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
{
$schema: "https://docs.renovatebot.com/renovate-schema.json",
extends: ["config:recommended"],
schedule: ["* 2 1-7 * 1"],
prHourlyLimit: 4,
timezone: "America/Los_Angeles",
rangeStrategy: "widen",
lockFileMaintenance: {
enabled: true,
schedule: ["* 2 1-7 * 1"], // Work around https://github.com/renovatebot/renovate/discussions/33152
},
minimumReleaseAge: "2 weeks",
"pre-commit": { enabled: true },
packageRules: [
{
matchUpdateTypes: ["lockFileMaintenance"],
automerge: true,
},
{
// group:allNonMajor, with automerge
groupName: "all non-major dependencies",
groupSlug: "all-minor-patch",
matchPackageNames: ["*"],
matchUpdateTypes: ["minor", "patch"],
automerge: true,
},
],
}
27 changes: 6 additions & 21 deletions .github/workflows/codeflash.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ name: CodeFlash
on:
pull_request:
paths:
- "ldp/**"
- "llmclient/**"
workflow_dispatch:

concurrency: # Cancel prior if new push, SEE: https://stackoverflow.com/a/72408109
Expand All @@ -13,31 +13,16 @@ concurrency: # Cancel prior if new push, SEE: https://stackoverflow.com/a/724081
jobs:
optimize: # SEE: https://docs.codeflash.ai/getting-started/codeflash-github-actions
runs-on: ubuntu-latest
if: ${{ github.actor != 'codeflash-ai[bot]' }}
env:
CODEFLASH_API_KEY: ${{ secrets.CODEFLASH_API_KEY }}
CODEFLASH_PR_NUMBER: ${{ github.event.number }}
steps:
- name: Check if PR is from CodeFlash bot
id: bot_check
working-directory: .
run: |
echo "Checking if this PR is created by CodeFlash bot..."
if [ "${{ github.event.pull_request.user.login }}" == "codeflash-ai[bot]" ]; then
echo "PR created by Codeflash bot. Skipping optimization."
echo "skip_remaining_steps=yes" >> $GITHUB_OUTPUT
else
echo "skip_remaining_steps=no" >> $GITHUB_OUTPUT
echo "It's not. Proceeding with the optimization."
fi
- if: steps.bot_check.outputs.skip_remaining_steps == 'no'
uses: actions/checkout@v4
- uses: actions/checkout@v4
with:
fetch-depth: 0
- if: steps.bot_check.outputs.skip_remaining_steps == 'no'
uses: astral-sh/setup-uv@v3
- uses: astral-sh/setup-uv@v4
with:
enable-cache: true
- if: steps.bot_check.outputs.skip_remaining_steps == 'no'
run: uv sync
- if: steps.bot_check.outputs.skip_remaining_steps == 'no'
run: uv run codeflash
- run: uv sync --group=codeflash
- run: uv run codeflash
12 changes: 7 additions & 5 deletions .github/workflows/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ jobs:
if: github.event_name == 'pull_request' # pre-commit-ci/lite-action only runs here
strategy:
matrix:
python-version: [3.11, 3.13.0] # Our min and max supported Python versions
python-version: [3.11, 3.13] # Our min and max supported Python versions
steps:
- uses: actions/checkout@v4
with:
Expand All @@ -27,26 +27,28 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.11, 3.13.0] # Our min and max supported Python versions
python-version: [3.11, 3.13] # Our min and max supported Python versions
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
fetch-depth: 0 # For setuptools-scm, replace with fetch-tags after https://github.com/actions/checkout/issues/1471
- uses: astral-sh/setup-uv@v4
with:
enable-cache: true
- run: uv python pin ${{ matrix.python-version }}
- name: Check build
if: matrix.python-version == '3.11'
if: matrix.python-version == '3.11' # Only need to run this on one version
uses: hynek/build-and-inspect-python-package@v2
- run: uv sync --python-preference=only-managed
- run: uv run refurb llmclient tests
- run: uv run pylint llmclient
- if: matrix.python-version == '3.11' # Only need to run this on one version
uses: suzuki-shunsuke/[email protected]
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.11, 3.13.0] # Our min and max supported Python versions
python-version: [3.11, 3.13] # Our min and max supported Python versions
steps:
- uses: actions/checkout@v4
- uses: astral-sh/setup-uv@v3
Expand Down
17 changes: 9 additions & 8 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
---
default_language_version:
python: python3
repos:
Expand All @@ -18,18 +19,22 @@ repos:
- id: mixed-line-ending
- id: trailing-whitespace
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.8.0
rev: v0.8.6
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix]
- repo: https://github.com/rbubley/mirrors-prettier
rev: v3.4.0
rev: v3.4.2
hooks:
- id: prettier
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 24.10.0
hooks:
- id: black
- repo: https://github.com/adamchainz/blacken-docs
rev: 1.19.1
hooks:
- id: blacken-docs
- repo: https://github.com/jumanjihouse/pre-commit-hooks
rev: 3.0.0
hooks:
Expand All @@ -40,10 +45,6 @@ repos:
- id: codespell
additional_dependencies: [".[toml]"]
exclude_types: [jupyter]
exclude: |
(?x)^(
tests/cassettes.*
)$
- repo: https://github.com/pappasam/toml-sort
rev: v0.24.2
hooks:
Expand All @@ -62,11 +63,11 @@ repos:
hooks:
- id: uv-lock
- repo: https://github.com/jsh9/markdown-toc-creator
rev: 0.0.8
rev: 0.0.10
hooks:
- id: markdown-toc-creator
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.13.0
rev: v1.14.1
hooks:
- id: mypy
args: [--pretty, --ignore-missing-imports]
Expand Down
6 changes: 3 additions & 3 deletions llmclient/cost_tracker.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import contextvars
import logging
from collections.abc import Awaitable, Callable
from contextlib import asynccontextmanager
from contextlib import contextmanager
from functools import wraps
from typing import ParamSpec, TypeVar

Expand Down Expand Up @@ -44,8 +44,8 @@ def enable_cost_tracking(enabled: bool = True) -> None:
GLOBAL_COST_TRACKER.enabled.set(enabled)


@asynccontextmanager
async def cost_tracking_ctx(enabled: bool = True):
@contextmanager
def cost_tracking_ctx(enabled: bool = True):
prev = GLOBAL_COST_TRACKER.enabled.get()
GLOBAL_COST_TRACKER.enabled.set(enabled)
try:
Expand Down
8 changes: 5 additions & 3 deletions llmclient/embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@ class EmbeddingModel(ABC, BaseModel):
config: dict[str, Any] = Field(
default_factory=dict,
description=(
"Optional `rate_limit` key, value must be a RateLimitItem or RateLimitItem string for parsing"
"Optional `rate_limit` key, value must be a RateLimitItem or RateLimitItem"
" string for parsing"
),
)

Expand Down Expand Up @@ -162,7 +163,7 @@ class SparseEmbeddingModel(EmbeddingModel):
model_config = ConfigDict(arbitrary_types_allowed=True)

name: str = "sparse"
ndim: int = 256
ndim: int = 256 # type: ignore[mutable-override]
enc: tiktoken.Encoding = Field(
default_factory=lambda: tiktoken.get_encoding("cl100k_base")
)
Expand Down Expand Up @@ -221,7 +222,8 @@ def __init__(self, **kwargs):
from sentence_transformers import SentenceTransformer
except ImportError as exc:
raise ImportError(
"Please install fh-llm-client[local] to use SentenceTransformerEmbeddingModel."
"Please install fh-llm-client[local] to use"
" SentenceTransformerEmbeddingModel."
) from exc

self._model = SentenceTransformer(self.name)
Expand Down
16 changes: 8 additions & 8 deletions llmclient/rate_limiter.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ class GlobalRateLimiter:
def __init__(
self,
rate_config: (
None | dict[tuple[str, str | MatchAllInputs], RateLimitItem]
dict[tuple[str, str | MatchAllInputs], RateLimitItem] | None
) = None,
use_in_memory: bool = False,
):
Expand Down Expand Up @@ -194,25 +194,25 @@ def parse_rate_limits_and_namespace(
# this needs to be checked first, since it's more specific than the stub machine id
if (namespace_w_machine_id_stripped, primary_key) in self.rate_config:
return (
self.rate_config[(namespace_w_machine_id_stripped, primary_key)],
self.rate_config[namespace_w_machine_id_stripped, primary_key],
namespace_w_machine_id_stripped,
)
# we keep the old namespace if we match on the namespace_w_stub_machine_id
if (namespace_w_stub_machine_id, primary_key) in self.rate_config:
return (
self.rate_config[(namespace_w_stub_machine_id, primary_key)],
self.rate_config[namespace_w_stub_machine_id, primary_key],
namespace,
)
# again we only want the original namespace, keep the old namespace
if (namespace_w_stub_machine_id, MATCH_ALL) in self.rate_config:
return (
self.rate_config[(namespace_w_stub_machine_id, MATCH_ALL)],
self.rate_config[namespace_w_stub_machine_id, MATCH_ALL],
namespace,
)
# again we want to use the stripped namespace if it matches
if (namespace_w_machine_id_stripped, MATCH_ALL) in self.rate_config:
return (
self.rate_config[(namespace_w_machine_id_stripped, MATCH_ALL)],
self.rate_config[namespace_w_machine_id_stripped, MATCH_ALL],
namespace_w_machine_id_stripped,
)
return FALLBACK_RATE_LIMIT, namespace
Expand Down Expand Up @@ -294,7 +294,7 @@ async def rate_limit_status(self):
rate_limit.amount,
rate_limit.get_expiry(),
)
limit_status[(namespace, primary_key)] = {
limit_status[namespace, primary_key] = {
"period_start": period_start,
"n_items_in_period": n_items_in_period,
"period_seconds": rate_limit.GRANULARITY.seconds,
Expand Down Expand Up @@ -343,14 +343,14 @@ async def try_acquire(
namespace_and_key, machine_id=machine_id
)

_rate_limit, new_namespace = self.parse_rate_limits_and_namespace(
rate_limit_, new_namespace = self.parse_rate_limits_and_namespace(
namespace, primary_key
)

if isinstance(rate_limit, str):
rate_limit = limit_parse(rate_limit)

rate_limit = rate_limit or _rate_limit
rate_limit = rate_limit or rate_limit_

if rate_limit.amount < weight and raise_impossible_limits:
raise ValueError(
Expand Down
Loading

0 comments on commit 25c76c8

Please sign in to comment.