Skip to content
This repository has been archived by the owner on Nov 13, 2024. It is now read-only.

Commit

Permalink
[chat] Set defaults to max_prompt_tokens and generated_tokens
Browse files Browse the repository at this point in the history
Less effort for the user
  • Loading branch information
igiloh-pinecone committed Sep 7, 2023
1 parent 1c8e1dc commit 4f174d6
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 6 deletions.
4 changes: 2 additions & 2 deletions context_engine/chat_engine/chat_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ def __init__(self,
*,
llm: BaseLLM,
context_engine: ContextEngine,
max_prompt_tokens: int,
max_generated_tokens: int,
max_prompt_tokens: int = 4096,
max_generated_tokens: Optional[int] = None,
max_context_tokens: Optional[int] = None,
query_builder: Optional[QueryGenerator] = None,
system_prompt: Optional[str] = None,
Expand Down
5 changes: 1 addition & 4 deletions context_engine/service/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,10 +132,7 @@ def _init_engines() -> Tuple[KnowledgeBase, ContextEngine, ChatEngine]:
context_engine = ContextEngine(knowledge_base=kb)
llm = OpenAILLM(model_name='gpt-3.5-turbo-0613')

chat_engine = ChatEngine(llm=llm,
context_engine=context_engine,
max_prompt_tokens=4000,
max_generated_tokens=None)
chat_engine = ChatEngine(llm=llm, context_engine=context_engine)

return kb, context_engine, chat_engine

Expand Down

0 comments on commit 4f174d6

Please sign in to comment.