-
Notifications
You must be signed in to change notification settings - Fork 75
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Updates chatbot demo to work with new API key checking function
- Loading branch information
1 parent
e221f3b
commit 798dd20
Showing
12 changed files
with
254 additions
and
212 deletions.
There are no files selected for viewing
2 changes: 1 addition & 1 deletion
2
burr/tracking/server/demo_data/demo:chatbot/chat-1-giraffe/graph.json
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1 @@ | ||
{"type": "application", "entrypoint": "prompt", "actions": [{"type": "action", "name": "prompt", "reads": [], "writes": ["chat_history", "prompt"], "code": "@action(reads=[], writes=[\"chat_history\", \"prompt\"])\ndef process_prompt(state: State, prompt: str) -> Tuple[dict, State]:\n result = {\"chat_item\": {\"role\": \"user\", \"content\": prompt, \"type\": \"text\"}}\n return result, state.wipe(keep=[\"prompt\", \"chat_history\"]).append(\n chat_history=result[\"chat_item\"]\n ).update(prompt=prompt)\n"}, {"type": "action", "name": "check_safety", "reads": ["prompt"], "writes": ["safe"], "code": "@action(reads=[\"prompt\"], writes=[\"safe\"])\ndef check_safety(state: State) -> Tuple[dict, State]:\n result = {\"safe\": \"unsafe\" not in state[\"prompt\"]} # quick hack to demonstrate\n return result, state.update(safe=result[\"safe\"])\n"}, {"type": "action", "name": "decide_mode", "reads": ["prompt"], "writes": ["mode"], "code": "@action(reads=[\"prompt\"], writes=[\"mode\"])\ndef choose_mode(state: State) -> Tuple[dict, State]:\n prompt = (\n f\"You are a chatbot. You've been prompted this: {state['prompt']}. \"\n f\"You have the capability of responding in the following modes: {', '.join(MODES)}. \"\n \"Please respond with *only* a single word representing the mode that most accurately \"\n \"corresponds to the prompt. Fr instance, if the prompt is 'draw a picture of a cat', \"\n \"the mode would be 'generate_image'. If the prompt is 'what is the capital of France', the mode would be 'answer_question'.\"\n \"If none of these modes apply, please respond with 'unknown'.\"\n )\n\n result = _get_openai_client().chat.completions.create(\n model=\"gpt-4\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant\"},\n {\"role\": \"user\", \"content\": prompt},\n ],\n )\n content = result.choices[0].message.content\n mode = content.lower()\n if mode not in MODES:\n mode = \"unknown\"\n result = {\"mode\": mode}\n return result, state.update(**result)\n"}, {"type": "action", "name": "generate_image", "reads": ["prompt", "chat_history", "mode"], "writes": ["response"], "code": "@action(reads=[\"prompt\", \"chat_history\", \"mode\"], writes=[\"response\"])\ndef image_response(state: State, model: str = \"dall-e-2\") -> Tuple[dict, State]:\n client = _get_openai_client()\n result = client.images.generate(\n model=model, prompt=state[\"prompt\"], size=\"1024x1024\", quality=\"standard\", n=1\n )\n response = result.data[0].url\n result = {\"response\": {\"content\": response, \"type\": MODES[state[\"mode\"]], \"role\": \"assistant\"}}\n return result, state.update(**result)\n"}, {"type": "action", "name": "generate_code", "reads": ["prompt", "chat_history", "mode"], "writes": ["response"], "code": "@action(reads=[\"prompt\", \"chat_history\", \"mode\"], writes=[\"response\"])\ndef chat_response(\n state: State, prepend_prompt: str, display_type: str = \"text\", model: str = \"gpt-3.5-turbo\"\n) -> Tuple[dict, State]:\n chat_history = state[\"chat_history\"].copy()\n chat_history[-1][\"content\"] = f\"{prepend_prompt}: {chat_history[-1]['content']}\"\n chat_history_api_format = [\n {\n \"role\": chat[\"role\"],\n \"content\": chat[\"content\"],\n }\n for chat in chat_history\n ]\n client = _get_openai_client()\n result = client.chat.completions.create(\n model=model,\n messages=chat_history_api_format,\n )\n response = result.choices[0].message.content\n result = {\"response\": {\"content\": response, \"type\": MODES[state[\"mode\"]], \"role\": \"assistant\"}}\n return result, state.update(**result)\n"}, {"type": "action", "name": "answer_question", "reads": ["prompt", "chat_history", "mode"], "writes": ["response"], "code": "@action(reads=[\"prompt\", \"chat_history\", \"mode\"], writes=[\"response\"])\ndef chat_response(\n state: State, prepend_prompt: str, display_type: str = \"text\", model: str = \"gpt-3.5-turbo\"\n) -> Tuple[dict, State]:\n chat_history = state[\"chat_history\"].copy()\n chat_history[-1][\"content\"] = f\"{prepend_prompt}: {chat_history[-1]['content']}\"\n chat_history_api_format = [\n {\n \"role\": chat[\"role\"],\n \"content\": chat[\"content\"],\n }\n for chat in chat_history\n ]\n client = _get_openai_client()\n result = client.chat.completions.create(\n model=model,\n messages=chat_history_api_format,\n )\n response = result.choices[0].message.content\n result = {\"response\": {\"content\": response, \"type\": MODES[state[\"mode\"]], \"role\": \"assistant\"}}\n return result, state.update(**result)\n"}, {"type": "action", "name": "prompt_for_more", "reads": ["prompt", "chat_history"], "writes": ["response"], "code": "@action(reads=[\"prompt\", \"chat_history\"], writes=[\"response\"])\ndef prompt_for_more(state: State) -> Tuple[dict, State]:\n result = {\n \"response\": {\n \"content\": \"None of the response modes I support apply to your question. Please clarify?\",\n \"type\": \"text\",\n \"role\": \"assistant\",\n }\n }\n return result, state.update(**result)\n"}, {"type": "action", "name": "response", "reads": ["response", "safe", "mode"], "writes": ["chat_history"], "code": "@action(reads=[\"response\", \"safe\", \"mode\"], writes=[\"chat_history\"])\ndef response(state: State) -> Tuple[dict, State]:\n if not state[\"safe\"]:\n result = {\n \"chat_item\": {\n \"role\": \"assistant\",\n \"content\": \"I'm sorry, I can't respond to that.\",\n \"type\": \"text\",\n }\n }\n else:\n result = {\"chat_item\": state[\"response\"]}\n return result, state.append(chat_history=result[\"chat_item\"])\n"}], "transitions": [{"type": "transition", "from_": "prompt", "to": "check_safety", "condition": "default"}, {"type": "transition", "from_": "check_safety", "to": "decide_mode", "condition": "safe=True"}, {"type": "transition", "from_": "check_safety", "to": "response", "condition": "default"}, {"type": "transition", "from_": "decide_mode", "to": "generate_image", "condition": "mode=generate_image"}, {"type": "transition", "from_": "decide_mode", "to": "generate_code", "condition": "mode=generate_code"}, {"type": "transition", "from_": "decide_mode", "to": "answer_question", "condition": "mode=answer_question"}, {"type": "transition", "from_": "decide_mode", "to": "prompt_for_more", "condition": "default"}, {"type": "transition", "from_": "generate_image", "to": "response", "condition": "default"}, {"type": "transition", "from_": "answer_question", "to": "response", "condition": "default"}, {"type": "transition", "from_": "generate_code", "to": "response", "condition": "default"}, {"type": "transition", "from_": "prompt_for_more", "to": "response", "condition": "default"}, {"type": "transition", "from_": "response", "to": "prompt", "condition": "default"}]} | ||
{"type": "application", "entrypoint": "prompt", "actions": [{"type": "action", "name": "prompt", "reads": [], "writes": ["chat_history", "prompt"], "code": "@action(reads=[], writes=[\"chat_history\", \"prompt\"])\ndef process_prompt(state: State, prompt: str) -> Tuple[dict, State]:\n result = {\"chat_item\": {\"role\": \"user\", \"content\": prompt, \"type\": \"text\"}}\n return result, state.wipe(keep=[\"prompt\", \"chat_history\"]).append(\n chat_history=result[\"chat_item\"]\n ).update(prompt=prompt)\n"}, {"type": "action", "name": "check_openai_key", "reads": [], "writes": ["has_openai_key"], "code": "@action(reads=[], writes=[\"has_openai_key\"])\ndef check_openai_key(state: State) -> Tuple[dict, State]:\n result = {\"has_openai_key\": \"OPENAI_API_KEY\" in os.environ}\n return result, state.update(**result)\n"}, {"type": "action", "name": "check_safety", "reads": ["prompt"], "writes": ["safe"], "code": "@action(reads=[\"prompt\"], writes=[\"safe\"])\ndef check_safety(state: State) -> Tuple[dict, State]:\n result = {\"safe\": \"unsafe\" not in state[\"prompt\"]} # quick hack to demonstrate\n return result, state.update(safe=result[\"safe\"])\n"}, {"type": "action", "name": "decide_mode", "reads": ["prompt"], "writes": ["mode"], "code": "@action(reads=[\"prompt\"], writes=[\"mode\"])\ndef choose_mode(state: State) -> Tuple[dict, State]:\n prompt = (\n f\"You are a chatbot. You've been prompted this: {state['prompt']}. \"\n f\"You have the capability of responding in the following modes: {', '.join(MODES)}. \"\n \"Please respond with *only* a single word representing the mode that most accurately \"\n \"corresponds to the prompt. Fr instance, if the prompt is 'draw a picture of a cat', \"\n \"the mode would be 'generate_image'. If the prompt is 'what is the capital of France', the mode would be 'answer_question'.\"\n \"If none of these modes apply, please respond with 'unknown'.\"\n )\n\n result = _get_openai_client().chat.completions.create(\n model=\"gpt-4\",\n messages=[\n {\"role\": \"system\", \"content\": \"You are a helpful assistant\"},\n {\"role\": \"user\", \"content\": prompt},\n ],\n )\n content = result.choices[0].message.content\n mode = content.lower()\n if mode not in MODES:\n mode = \"unknown\"\n result = {\"mode\": mode}\n return result, state.update(**result)\n"}, {"type": "action", "name": "generate_image", "reads": ["prompt", "chat_history", "mode"], "writes": ["response"], "code": "@action(reads=[\"prompt\", \"chat_history\", \"mode\"], writes=[\"response\"])\ndef image_response(state: State, model: str = \"dall-e-2\") -> Tuple[dict, State]:\n \"\"\"Generates an image response to the prompt. Optional save function to save the image to a URL.\"\"\"\n client = _get_openai_client()\n result = client.images.generate(\n model=model, prompt=state[\"prompt\"], size=\"1024x1024\", quality=\"standard\", n=1\n )\n response = result.data[0].url\n result = {\"response\": {\"content\": response, \"type\": MODES[state[\"mode\"]], \"role\": \"assistant\"}}\n return result, state.update(**result)\n"}, {"type": "action", "name": "generate_code", "reads": ["prompt", "chat_history", "mode"], "writes": ["response"], "code": "@action(reads=[\"prompt\", \"chat_history\", \"mode\"], writes=[\"response\"])\ndef chat_response(\n state: State, prepend_prompt: str, display_type: str = \"text\", model: str = \"gpt-3.5-turbo\"\n) -> Tuple[dict, State]:\n chat_history = copy.deepcopy(state[\"chat_history\"])\n chat_history[-1][\"content\"] = f\"{prepend_prompt}: {chat_history[-1]['content']}\"\n chat_history_api_format = [\n {\n \"role\": chat[\"role\"],\n \"content\": chat[\"content\"],\n }\n for chat in chat_history\n ]\n client = _get_openai_client()\n result = client.chat.completions.create(\n model=model,\n messages=chat_history_api_format,\n )\n response = result.choices[0].message.content\n result = {\"response\": {\"content\": response, \"type\": MODES[state[\"mode\"]], \"role\": \"assistant\"}}\n return result, state.update(**result)\n"}, {"type": "action", "name": "answer_question", "reads": ["prompt", "chat_history", "mode"], "writes": ["response"], "code": "@action(reads=[\"prompt\", \"chat_history\", \"mode\"], writes=[\"response\"])\ndef chat_response(\n state: State, prepend_prompt: str, display_type: str = \"text\", model: str = \"gpt-3.5-turbo\"\n) -> Tuple[dict, State]:\n chat_history = copy.deepcopy(state[\"chat_history\"])\n chat_history[-1][\"content\"] = f\"{prepend_prompt}: {chat_history[-1]['content']}\"\n chat_history_api_format = [\n {\n \"role\": chat[\"role\"],\n \"content\": chat[\"content\"],\n }\n for chat in chat_history\n ]\n client = _get_openai_client()\n result = client.chat.completions.create(\n model=model,\n messages=chat_history_api_format,\n )\n response = result.choices[0].message.content\n result = {\"response\": {\"content\": response, \"type\": MODES[state[\"mode\"]], \"role\": \"assistant\"}}\n return result, state.update(**result)\n"}, {"type": "action", "name": "prompt_for_more", "reads": ["prompt", "chat_history"], "writes": ["response"], "code": "@action(reads=[\"prompt\", \"chat_history\"], writes=[\"response\"])\ndef prompt_for_more(state: State) -> Tuple[dict, State]:\n result = {\n \"response\": {\n \"content\": \"None of the response modes I support apply to your question. Please clarify?\",\n \"type\": \"text\",\n \"role\": \"assistant\",\n }\n }\n return result, state.update(**result)\n"}, {"type": "action", "name": "response", "reads": ["response", "mode", "safe", "has_openai_key"], "writes": ["chat_history"], "code": "@action(reads=[\"response\", \"mode\", \"safe\", \"has_openai_key\"], writes=[\"chat_history\"])\ndef response(state: State) -> Tuple[dict, State]:\n if not state[\"has_openai_key\"]:\n result = {\n \"chat_item\": {\n \"role\": \"assistant\",\n \"content\": \"You have not set an API key for [OpenAI](https://www.openai.com). Do this \"\n \"by setting the environment variable `OPENAI_API_KEY` to your key. \"\n \"You can get a key at [OpenAI](https://platform.openai.com). \"\n \"You can still look at chat history/examples.\",\n \"type\": \"error\",\n }\n }\n elif not state[\"safe\"]:\n result = {\n \"chat_item\": {\n \"role\": \"assistant\",\n \"content\": \"I'm sorry, I can't respond to that.\",\n \"type\": \"error\",\n }\n }\n else:\n result = {\"chat_item\": state[\"response\"]}\n return result, state.append(chat_history=result[\"chat_item\"])\n"}], "transitions": [{"type": "transition", "from_": "prompt", "to": "check_openai_key", "condition": "default"}, {"type": "transition", "from_": "check_openai_key", "to": "check_safety", "condition": "has_openai_key=True"}, {"type": "transition", "from_": "check_openai_key", "to": "response", "condition": "default"}, {"type": "transition", "from_": "check_safety", "to": "decide_mode", "condition": "safe=True"}, {"type": "transition", "from_": "check_safety", "to": "response", "condition": "default"}, {"type": "transition", "from_": "decide_mode", "to": "generate_image", "condition": "mode=generate_image"}, {"type": "transition", "from_": "decide_mode", "to": "generate_code", "condition": "mode=generate_code"}, {"type": "transition", "from_": "decide_mode", "to": "answer_question", "condition": "mode=answer_question"}, {"type": "transition", "from_": "decide_mode", "to": "prompt_for_more", "condition": "default"}, {"type": "transition", "from_": "generate_image", "to": "response", "condition": "default"}, {"type": "transition", "from_": "answer_question", "to": "response", "condition": "default"}, {"type": "transition", "from_": "generate_code", "to": "response", "condition": "default"}, {"type": "transition", "from_": "prompt_for_more", "to": "response", "condition": "default"}, {"type": "transition", "from_": "response", "to": "prompt", "condition": "default"}]} |
Oops, something went wrong.