diff --git a/README.md b/README.md index e9228b3ba..f0a067167 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ Optionally generate a frontend if you've selected the Python or Express back-end ## Customizing the AI models -The app will default to OpenAI's `gpt-4-vision-preview` LLM and `text-embedding-3-large` embedding model. +The app will default to OpenAI's `gpt-4-turbo` LLM and `text-embedding-3-large` embedding model. If you want to use different OpenAI models, add the `--ask-models` CLI parameter. diff --git a/questions.ts b/questions.ts index 6efbc45d5..375ad8078 100644 --- a/questions.ts +++ b/questions.ts @@ -75,7 +75,7 @@ const defaults: QuestionArgs = { openAiKey: "", llamaCloudKey: "", useLlamaParse: false, - model: "gpt-4-vision-preview", + model: "gpt-4-turbo", embeddingModel: "text-embedding-3-large", communityProjectConfig: undefined, llamapack: "", diff --git a/templates/types/streaming/fastapi/pyproject.toml b/templates/types/streaming/fastapi/pyproject.toml index 8c087f734..30cf267f1 100644 --- a/templates/types/streaming/fastapi/pyproject.toml +++ b/templates/types/streaming/fastapi/pyproject.toml @@ -13,9 +13,9 @@ python = "^3.11,<3.12" fastapi = "^0.109.1" uvicorn = { extras = ["standard"], version = "^0.23.2" } python-dotenv = "^1.0.0" -llama-index = "0.10.15" -llama-index-core = "0.10.15" -llama-index-agent-openai = "0.1.5" +llama-index = "0.10.28" +llama-index-core = "0.10.28" +llama-index-agent-openai = "0.2.2" [build-system] requires = ["poetry-core"] diff --git a/templates/types/streaming/nextjs/app/components/chat-section.tsx b/templates/types/streaming/nextjs/app/components/chat-section.tsx index 08afc2548..63f8adf57 100644 --- a/templates/types/streaming/nextjs/app/components/chat-section.tsx +++ b/templates/types/streaming/nextjs/app/components/chat-section.tsx @@ -39,7 +39,7 @@ export default function ChatSection() { handleSubmit={handleSubmit} handleInputChange={handleInputChange} isLoading={isLoading} - multiModal={process.env.NEXT_PUBLIC_MODEL === "gpt-4-vision-preview"} + multiModal={process.env.NEXT_PUBLIC_MODEL === "gpt-4-turbo"} /> );