Skip to content

Commit

Permalink
Fixing LiteLLM Router.acompletion typing issue (#43)
Browse files Browse the repository at this point in the history
  • Loading branch information
jamesbraza authored Jan 23, 2025
1 parent d893681 commit 21688b7
Showing 1 changed file with 10 additions and 2 deletions.
12 changes: 10 additions & 2 deletions llmclient/llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -553,7 +553,11 @@ async def check_rate_limit(self, token_count: float, **kwargs) -> None:

@rate_limited
async def acompletion(self, messages: list[Message], **kwargs) -> list[LLMResult]:
prompts = [m.model_dump(by_alias=True) for m in messages if m.content]
# cast is necessary for LiteLLM typing bug: https://github.com/BerriAI/litellm/issues/7641
prompts = cast(
list[litellm.types.llms.openai.AllMessageValues],
[m.model_dump(by_alias=True) for m in messages if m.content],
)
completions = await track_costs(self.router.acompletion)(
self.name, prompts, **kwargs
)
Expand Down Expand Up @@ -602,7 +606,11 @@ async def acompletion(self, messages: list[Message], **kwargs) -> list[LLMResult
async def acompletion_iter(
self, messages: list[Message], **kwargs
) -> AsyncIterable[LLMResult]:
prompts = [m.model_dump(by_alias=True) for m in messages if m.content]
# cast is necessary for LiteLLM typing bug: https://github.com/BerriAI/litellm/issues/7641
prompts = cast(
list[litellm.types.llms.openai.AllMessageValues],
[m.model_dump(by_alias=True) for m in messages if m.content],
)
stream_completions = await track_costs_iter(self.router.acompletion)(
self.name,
prompts,
Expand Down

0 comments on commit 21688b7

Please sign in to comment.