Skip to content

Commit

Permalink
Merge branch 'main' into falkordbrmcreation
Browse files Browse the repository at this point in the history
  • Loading branch information
kingtroga authored Nov 6, 2024
2 parents 91a6eb2 + e654062 commit 30ce39b
Show file tree
Hide file tree
Showing 7 changed files with 152 additions and 28 deletions.
10 changes: 7 additions & 3 deletions .github/workflows/build_and_release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -84,15 +84,19 @@ jobs:
run: python3 setup.py sdist bdist_wheel
- name: Publish distribution 📦 to PyPI (dspy)
uses: pypa/gh-action-pypi-publish@release/v1 # This requires a trusted publisher to be setup in pypi
# Publish to dspy-ai
with:
attestations: false
# Publish to dspy-ai
- name: Update version in setup.py (dspy-ai)
run: sed -i '/#replace_package_version_marker/{n;s/version="[^"]*"/version="${{ needs.extract-tag.outputs.version }}"/;}' ./dspy/.internal_dspyai/setup.py
- name: Update package name in setup.py
run: sed -i '/#replace_package_name_marker/{n;s/name="[^"]*"/name="dspy-ai"/;}' ./dspy/.internal_dspyai/setup.py
- name: Update dspy dependency version in setup.py
run: |
sed -i '/#replace_dspy_version_marker/{n;s/dspy==[^"]*/dspy==${{ needs.extract-tag.outputs.version }}/;}' ./dspy/.internal_dspyai/setup.py
sed -i '/#replace_dspy_version_marker/{n;s/dspy==[^"]*/dspy>=${{ needs.extract-tag.outputs.version }}/;}' ./dspy/.internal_dspyai/setup.py
- name: Build a binary wheel
run: python3 ./dspy/.internal_dspyai/setup.py sdist bdist_wheel
- name: Publish distribution 📦 to PyPI (dspy-ai)
uses: pypa/gh-action-pypi-publish@release/v1 # This requires a trusted publisher to be setup in pypi
uses: pypa/gh-action-pypi-publish@release/v1 # This requires a trusted publisher to be setup in pypi
with:
attestations: false
2 changes: 1 addition & 1 deletion dspy/.internal_dspyai/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,5 +19,5 @@
packages=find_packages(include=["dsp.*", "dspy.*", "dsp", "dspy"]),
python_requires=">=3.9",
#replace_dspy_version_marker
install_requires=["dspy==2.5.3"]
install_requires=["dspy>=2.5.3"]
)
10 changes: 5 additions & 5 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ build-backend = "setuptools.build_meta"
#replace_package_name_marker
name="dspy"
#replace_package_version_marker
version="2.5.25"
version="2.5.27"
description = "DSPy"
readme = "README.md"
authors = [{ name = "Omar Khattab", email = "[email protected]" }]
Expand Down Expand Up @@ -133,7 +133,7 @@ llama-index = {version = "^0.10.30", optional = true}
snowflake-snowpark-python = { version = "*",optional=true, python = ">=3.9,<3.12" }
jinja2 = "^3.1.3"
magicattr = "^0.1.6"
litellm = "1.49.1"
litellm = "1.51.0"
diskcache = "^5.6.0"

redis = "^5.1.1"
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ diskcache
httpx
joblib~=1.3
json-repair
litellm<=1.49.1
litellm==1.51.0
magicattr~=0.1.6
openai
optuna
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
#replace_package_name_marker
name="dspy",
#replace_package_version_marker
version="2.5.25",
version="2.5.27",
description="DSPy",
long_description=long_description,
long_description_content_type="text/markdown",
Expand Down
150 changes: 135 additions & 15 deletions tests/clients/test_lm.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,145 @@
from unittest import mock

from dspy.clients.lm import LM
import pytest
# from litellm.router import RetryPolicy

# from dspy.clients.lm import LM, _get_litellm_router

def test_lm_chat_respects_max_retries():
lm = LM(model="openai/gpt4o", model_type="chat", max_retries=17)

with mock.patch("dspy.clients.lm.litellm.completion") as litellm_completion_api:
lm(messages=[{"content": "Hello, world!", "role": "user"}])
# @pytest.mark.parametrize("keys_in_env_vars", [True, False])
# def test_lm_chat_respects_max_retries(keys_in_env_vars, monkeypatch):
# model_name = "openai/gpt4o"
# num_retries = 17
# temperature = 0.5
# max_tokens = 100
# prompt = "Hello, world!"
# api_version = "2024-02-01"
# api_key = "apikey"

assert litellm_completion_api.call_count == 1
assert litellm_completion_api.call_args[1]["max_retries"] == 17
# assert litellm_completion_api.call_args[1]["retry_strategy"] == "exponential_backoff_retry"
# lm_kwargs = {
# "model": model_name,
# "model_type": "chat",
# "num_retries": num_retries,
# "temperature": temperature,
# "max_tokens": max_tokens,
# }
# if keys_in_env_vars:
# api_base = "http://testfromenv.com"
# monkeypatch.setenv("OPENAI_API_KEY", api_key)
# monkeypatch.setenv("OPENAI_API_BASE", api_base)
# monkeypatch.setenv("OPENAI_API_VERSION", api_version)
# else:
# api_base = "http://test.com"
# lm_kwargs["api_key"] = api_key
# lm_kwargs["api_base"] = api_base
# lm_kwargs["api_version"] = api_version

# lm = LM(**lm_kwargs)

def test_lm_completions_respects_max_retries():
lm = LM(model="openai/gpt-3.5-turbo", model_type="completions", max_retries=17)
# MockRouter = mock.MagicMock()
# mock_completion = mock.MagicMock()
# MockRouter.completion = mock_completion

with mock.patch("dspy.clients.lm.litellm.text_completion") as litellm_completion_api:
lm(prompt="Hello, world!")
# with mock.patch("dspy.clients.lm.Router", return_value=MockRouter) as MockRouterConstructor:
# lm(prompt=prompt)

assert litellm_completion_api.call_count == 1
assert litellm_completion_api.call_args[1]["max_retries"] == 17
# assert litellm_completion_api.call_args[1]["retry_strategy"] == "exponential_backoff_retry"
# MockRouterConstructor.assert_called_once_with(
# model_list=[
# {
# "model_name": model_name,
# "litellm_params": {
# "model": model_name,
# "api_key": api_key,
# "api_base": api_base,
# "api_version": api_version,
# },
# }
# ],
# retry_policy=RetryPolicy(
# TimeoutErrorRetries=num_retries,
# RateLimitErrorRetries=num_retries,
# InternalServerErrorRetries=num_retries,
# BadRequestErrorRetries=0,
# AuthenticationErrorRetries=0,
# ContentPolicyViolationErrorRetries=0,
# ),
# )
# mock_completion.assert_called_once_with(
# model=model_name,
# messages=[{"role": "user", "content": prompt}],
# temperature=temperature,
# max_tokens=max_tokens,
# cache=mock.ANY,
# )


# @pytest.mark.parametrize("keys_in_env_vars", [True, False])
# def test_lm_completions_respects_max_retries(keys_in_env_vars, monkeypatch):
# model_name = "azure/gpt-3.5-turbo"
# expected_model = "text-completion-openai/" + model_name.split("/")[-1]
# num_retries = 17
# temperature = 0.5
# max_tokens = 100
# prompt = "Hello, world!"
# api_version = "2024-02-01"
# api_key = "apikey"
# azure_ad_token = "adtoken"

# lm_kwargs = {
# "model": model_name,
# "model_type": "text",
# "num_retries": num_retries,
# "temperature": temperature,
# "max_tokens": max_tokens,
# }
# if keys_in_env_vars:
# api_base = "http://testfromenv.com"
# monkeypatch.setenv("AZURE_API_KEY", api_key)
# monkeypatch.setenv("AZURE_API_BASE", api_base)
# monkeypatch.setenv("AZURE_API_VERSION", api_version)
# monkeypatch.setenv("AZURE_AD_TOKEN", azure_ad_token)
# else:
# api_base = "http://test.com"
# lm_kwargs["api_key"] = api_key
# lm_kwargs["api_base"] = api_base
# lm_kwargs["api_version"] = api_version
# lm_kwargs["azure_ad_token"] = azure_ad_token

# lm = LM(**lm_kwargs)

# MockRouter = mock.MagicMock()
# mock_text_completion = mock.MagicMock()
# MockRouter.text_completion = mock_text_completion

# with mock.patch("dspy.clients.lm.Router", return_value=MockRouter) as MockRouterConstructor:
# lm(prompt=prompt)

# MockRouterConstructor.assert_called_once_with(
# model_list=[
# {
# "model_name": expected_model,
# "litellm_params": {
# "model": expected_model,
# "api_key": api_key,
# "api_base": api_base,
# "api_version": api_version,
# "azure_ad_token": azure_ad_token,
# },
# }
# ],
# retry_policy=RetryPolicy(
# TimeoutErrorRetries=num_retries,
# RateLimitErrorRetries=num_retries,
# InternalServerErrorRetries=num_retries,
# BadRequestErrorRetries=0,
# AuthenticationErrorRetries=0,
# ContentPolicyViolationErrorRetries=0,
# ),
# )
# mock_text_completion.assert_called_once_with(
# model=expected_model,
# prompt=prompt + "\n\nBEGIN RESPONSE:",
# temperature=temperature,
# max_tokens=max_tokens,
# cache=mock.ANY,
# )

0 comments on commit 30ce39b

Please sign in to comment.