Skip to content

Commit

Permalink
feat: Improve FastAPI agentic template (#447)
Browse files Browse the repository at this point in the history
---------
Co-authored-by: Marcus Schiesser <[email protected]>
  • Loading branch information
leehuwuj authored Nov 26, 2024
1 parent f17449b commit 02b2473
Show file tree
Hide file tree
Showing 12 changed files with 71 additions and 55 deletions.
5 changes: 5 additions & 0 deletions .changeset/moody-mangos-punch.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"create-llama": patch
---

Show streaming errors in Python, optimize system prompts for tool usage and set the weather tool as default for the Agentic RAG use case
18 changes: 12 additions & 6 deletions helpers/env-variables.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,12 @@ import {

import { TSYSTEMS_LLMHUB_API_URL } from "./providers/llmhub";

const DEFAULT_SYSTEM_PROMPT =
"You are a helpful assistant who helps users with their questions.";

const DATA_SOURCES_PROMPT =
"You have access to a knowledge base including the facts that you should start with to find the answer for the user question. Use the query engine tool to retrieve the facts from the knowledge base.";

export type EnvVar = {
name?: string;
description?: string;
Expand Down Expand Up @@ -449,9 +455,6 @@ const getSystemPromptEnv = (
dataSources?: TemplateDataSource[],
template?: TemplateType,
): EnvVar[] => {
const defaultSystemPrompt =
"You are a helpful assistant who helps users with their questions.";

const systemPromptEnv: EnvVar[] = [];
// build tool system prompt by merging all tool system prompts
// multiagent template doesn't need system prompt
Expand All @@ -466,9 +469,12 @@ const getSystemPromptEnv = (
}
});

const systemPrompt = toolSystemPrompt
? `\"${toolSystemPrompt}\"`
: defaultSystemPrompt;
const systemPrompt =
"'" +
DEFAULT_SYSTEM_PROMPT +
(dataSources?.length ? `\n${DATA_SOURCES_PROMPT}` : "") +
(toolSystemPrompt ? `\n${toolSystemPrompt}` : "") +
"'";

systemPromptEnv.push({
name: "SYSTEM_PROMPT",
Expand Down
30 changes: 1 addition & 29 deletions helpers/tools.ts
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,7 @@ export const supportedTools: Tool[] = [
{
name: TOOL_SYSTEM_PROMPT_ENV_VAR,
description: "System prompt for DuckDuckGo search tool.",
value: `You are a DuckDuckGo search agent.
You can use the duckduckgo search tool to get information from the web to answer user questions.
value: `You have access to the duckduckgo search tool. Use it to get information from the web to answer user questions.
For better results, you can specify the region parameter to get results from a specific region but it's optional.`,
},
],
Expand All @@ -88,27 +87,13 @@ For better results, you can specify the region parameter to get results from a s
],
supportedFrameworks: ["fastapi", "express", "nextjs"],
type: ToolType.LLAMAHUB,
envVars: [
{
name: TOOL_SYSTEM_PROMPT_ENV_VAR,
description: "System prompt for wiki tool.",
value: `You are a Wikipedia agent. You help users to get information from Wikipedia.`,
},
],
},
{
display: "Weather",
name: "weather",
dependencies: [],
supportedFrameworks: ["fastapi", "express", "nextjs"],
type: ToolType.LOCAL,
envVars: [
{
name: TOOL_SYSTEM_PROMPT_ENV_VAR,
description: "System prompt for weather tool.",
value: `You are a weather forecast agent. You help users to get the weather forecast for a given location.`,
},
],
},
{
display: "Document generator",
Expand Down Expand Up @@ -211,14 +196,6 @@ For better results, you can specify the region parameter to get results from a s
},
supportedFrameworks: ["fastapi", "express", "nextjs"],
type: ToolType.LOCAL,
envVars: [
{
name: TOOL_SYSTEM_PROMPT_ENV_VAR,
description: "System prompt for openapi action tool.",
value:
"You are an OpenAPI action agent. You help users to make requests to the provided OpenAPI schema.",
},
],
},
{
display: "Image Generator",
Expand All @@ -231,11 +208,6 @@ For better results, you can specify the region parameter to get results from a s
description:
"STABILITY_API_KEY key is required to run image generator. Get it here: https://platform.stability.ai/account/keys",
},
{
name: TOOL_SYSTEM_PROMPT_ENV_VAR,
description: "System prompt for image generator tool.",
value: `You are an image generator agent. You help users to generate images using the Stability API.`,
},
],
},
{
Expand Down
2 changes: 1 addition & 1 deletion questions/simple.ts
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ const convertAnswers = async (
> = {
rag: {
template: "streaming",
tools: getTools(["wikipedia.WikipediaToolSpec"]),
tools: getTools(["weather"]),
frontend: true,
dataSources: [EXAMPLE_FILE],
},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,9 @@ export class InterpreterTool implements BaseTool<InterpreterParameter> {
const fileName = path.basename(filePath);
const localFilePath = path.join(this.uploadedFilesDir, fileName);
const content = fs.readFileSync(localFilePath);
await this.codeInterpreter?.files.write(filePath, content);

const arrayBuffer = new Uint8Array(content).buffer;
await this.codeInterpreter?.files.write(filePath, arrayBuffer);
}
} catch (error) {
console.error("Got error when uploading files to sandbox", error);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ class VercelStreamResponse(StreamingResponse):

TEXT_PREFIX = "0:"
DATA_PREFIX = "8:"
ERROR_PREFIX = "3:"

def __init__(self, request: Request, chat_data: ChatData, *args, **kwargs):
self.request = request
Expand All @@ -41,13 +42,16 @@ async def content_generator(self, event_handler, events):

yield output
except asyncio.CancelledError:
logger.info("Stopping workflow")
await event_handler.cancel_run()
logger.warning("Workflow has been cancelled!")
except Exception as e:
logger.error(
f"Unexpected error in content_generator: {str(e)}", exc_info=True
)
yield self.convert_error(
"An unexpected error occurred while processing your request, preventing the creation of a final answer. Please try again."
)
finally:
await event_handler.cancel_run()
logger.info("The stream has been stopped!")

def _create_stream(
Expand Down Expand Up @@ -107,6 +111,11 @@ def convert_data(cls, data: dict):
data_str = json.dumps(data)
return f"{cls.DATA_PREFIX}[{data_str}]\n"

@classmethod
def convert_error(cls, error: str):
error_str = json.dumps(error)
return f"{cls.ERROR_PREFIX}{error_str}\n"

@staticmethod
async def _generate_next_questions(chat_history: List[Message], response: str):
questions = await NextQuestionSuggestion.suggest_next_questions(
Expand Down
2 changes: 1 addition & 1 deletion templates/types/streaming/express/README-template.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ First, install the dependencies:
npm install
```

Second, generate the embeddings of the documents in the `./data` directory (if this folder exists - otherwise, skip this step):
Second, generate the embeddings of the documents in the `./data` directory:

```
npm run generate
Expand Down
2 changes: 1 addition & 1 deletion templates/types/streaming/fastapi/README-template.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ Then check the parameters that have been pre-configured in the `.env` file in th

If you are using any tools or data sources, you can update their config files in the `config` folder.

Second, generate the embeddings of the documents in the `./data` directory (if this folder exists - otherwise, skip this step):
Second, generate the embeddings of the documents in the `./data` directory:

```
poetry run generate
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ class VercelStreamResponse(StreamingResponse):

TEXT_PREFIX = "0:"
DATA_PREFIX = "8:"
ERROR_PREFIX = "3:"

def __init__(
self,
Expand Down Expand Up @@ -53,17 +54,26 @@ async def content_generator(
# Merge the chat response generator and the event generator
combine = stream.merge(chat_response_generator, event_generator)
is_stream_started = False
async with combine.stream() as streamer:
async for output in streamer:
if not is_stream_started:
is_stream_started = True
# Stream a blank message to start displaying the response in the UI
yield cls.convert_text("")

yield output

if await request.is_disconnected():
break
try:
async with combine.stream() as streamer:
async for output in streamer:
if await request.is_disconnected():
break

if not is_stream_started:
is_stream_started = True
# Stream a blank message to start displaying the response in the UI
yield cls.convert_text("")

yield output
except Exception:
logger.exception("Error in stream response")
yield cls.convert_error(
"An unexpected error occurred while processing your request, preventing the creation of a final answer. Please try again."
)
finally:
# Ensure event handler is marked as done even if connection breaks
event_handler.is_done = True

@classmethod
async def _event_generator(cls, event_handler: EventCallbackHandler):
Expand Down Expand Up @@ -131,6 +141,11 @@ def convert_data(cls, data: dict):
data_str = json.dumps(data)
return f"{cls.DATA_PREFIX}[{data_str}]\n"

@classmethod
def convert_error(cls, error: str):
error_str = json.dumps(error)
return f"{cls.ERROR_PREFIX}{error_str}\n"

@staticmethod
def _process_response_nodes(
source_nodes: List[NodeWithScore],
Expand Down
2 changes: 1 addition & 1 deletion templates/types/streaming/nextjs/README-template.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ First, install the dependencies:
npm install
```

Second, generate the embeddings of the documents in the `./data` directory (if this folder exists - otherwise, skip this step):
Second, generate the embeddings of the documents in the `./data` directory:

```
npm run generate
Expand Down
3 changes: 2 additions & 1 deletion templates/types/streaming/nextjs/app/api/sandbox/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,8 @@ export async function POST(req: Request) {
const localFilePath = path.join("output", "uploaded", fileName);
const fileContent = await fs.readFile(localFilePath);

await sbx.files.write(sandboxFilePath, fileContent);
const arrayBuffer = new Uint8Array(fileContent).buffer;
await sbx.files.write(sandboxFilePath, arrayBuffer);
console.log(`Copied file to ${sandboxFilePath} in ${sbx.sandboxID}`);
});
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,13 @@ export default function ChatSection() {
api: `${backend}/api/chat`,
onError: (error: unknown) => {
if (!(error instanceof Error)) throw error;
alert(JSON.parse(error.message).detail);
let errorMessage: string;
try {
errorMessage = JSON.parse(error.message).detail;
} catch (e) {
errorMessage = error.message;
}
alert(errorMessage);
},
});
return (
Expand Down

0 comments on commit 02b2473

Please sign in to comment.