From 2f8d54f6ebfe49ba07ca80bae268d4d7d6d10f3c Mon Sep 17 00:00:00 2001 From: Stefan Krawczyk Date: Tue, 26 Mar 2024 17:30:19 -0700 Subject: [PATCH 1/2] Adds more documentation to examples This enhances the documentation. It adds some notebooks (more to be added), and also a video walkthrough. In creating the walkthrough, some minor adjustments to the ApplicationBuilder & Application were made as well. --- burr/core/application.py | 7 +- docs/examples/agents.rst | 10 + docs/examples/chatbot.rst | 21 +- docs/examples/index.rst | 1 + examples/blog_post/README.md | 15 + examples/conversational_rag/README.md | 40 + examples/conversational_rag/application.py | 49 +- examples/conversational_rag/notebook.ipynb | 1073 ++++++++++++++++++ examples/counter/notebook.ipynb | 193 ++++ examples/cowsay/notebook.ipynb | 313 +++++ examples/multi-agent-collaboration/README.md | 0 11 files changed, 1700 insertions(+), 22 deletions(-) create mode 100644 docs/examples/agents.rst create mode 100644 examples/blog_post/README.md create mode 100644 examples/conversational_rag/README.md create mode 100644 examples/conversational_rag/notebook.ipynb create mode 100644 examples/counter/notebook.ipynb create mode 100644 examples/cowsay/notebook.ipynb create mode 100644 examples/multi-agent-collaboration/README.md diff --git a/burr/core/application.py b/burr/core/application.py index 87146a77..4e4f2024 100644 --- a/burr/core/application.py +++ b/burr/core/application.py @@ -912,13 +912,13 @@ async def astream_result( @telemetry.capture_function_usage def visualize( self, - output_file_path: Optional[str], + output_file_path: Optional[str] = None, include_conditions: bool = False, include_state: bool = False, view: bool = False, engine: Literal["graphviz"] = "graphviz", **engine_kwargs: Any, - ): + ) -> Optional["graphviz.Digraph"]: # noqa: F821 """Visualizes the application graph using graphviz. This will render the graph. :param output_file_path: The path to save this to, None if you don't want to save. Do not pass an extension @@ -976,7 +976,8 @@ def visualize( label=condition.name if include_conditions and condition is not default else None, style="dashed" if transition.condition is not default else "solid", ) - digraph.render(output_file_path, view=view) + if output_file_path: + digraph.render(output_file_path, view=view) return digraph @staticmethod diff --git a/docs/examples/agents.rst b/docs/examples/agents.rst new file mode 100644 index 00000000..47e16bcb --- /dev/null +++ b/docs/examples/agents.rst @@ -0,0 +1,10 @@ +==================== +Agents +==================== + +Burr allows you to create agents that can interact with each other via State. + +Multi-Agent Example +-------------------- + +See `github repository example `_. diff --git a/docs/examples/chatbot.rst b/docs/examples/chatbot.rst index 53f0b9f2..fa2e1990 100644 --- a/docs/examples/chatbot.rst +++ b/docs/examples/chatbot.rst @@ -1,5 +1,24 @@ ================ -GPT-like chatbot +Chatbots ================ +Chat bots are a simple example where state influences the conversation. This is a +perfect use case for using Burr. + +GPT-like chatbot +---------------- + See `github repository example `_. + + +Conversational RAG chatbot +-------------------------- +See `github example `_. + +Accompanying video walkthrough: + +.. raw:: html + +
+ +
diff --git a/docs/examples/index.rst b/docs/examples/index.rst index 0b6f32a3..3442560e 100644 --- a/docs/examples/index.rst +++ b/docs/examples/index.rst @@ -12,5 +12,6 @@ Examples of more complex/powerful use-cases of Burr. Download/copy these to adap .. toctree:: simple chatbot + agents ml_training simulation diff --git a/examples/blog_post/README.md b/examples/blog_post/README.md new file mode 100644 index 00000000..fad29c75 --- /dev/null +++ b/examples/blog_post/README.md @@ -0,0 +1,15 @@ +Example that goes with [introductory blog post](https://blog.dagworks.io/p/burr-develop-stateful-ai-applications). + +## 🏃Quick start + +```bash +pip install "burr[start]" jupyter +``` + +Run the notebook: + +```bash +jupyter notebook +``` + +Then open `blog.ipynb` and run the cells. diff --git a/examples/conversational_rag/README.md b/examples/conversational_rag/README.md new file mode 100644 index 00000000..1247fa22 --- /dev/null +++ b/examples/conversational_rag/README.md @@ -0,0 +1,40 @@ +# Conversational RAG with memory +This example demonstrates how to build a conversational RAG agent with "memory". + +The "memory" here is stored in state, which Burr then can help you track, +manage, and introspect. + +The set up of this example is that you have: + +1. Some initial "documents" i.e. knowledge. +2. We bootstrap a vector store with these documents. +3. We then have a pipeline that uses a vector store for a RAG query. +4. We hook everything together with Burr that will manage the state +of the conversation and asking for user inputs. + +To run this example, install Burr and the necessary dependencies: + +```bash +pip install "burr[start]" -r requirements.txt +``` + +Then run the server in the background: + +```bash +burr +``` + +Make sure you have an `OPENAI_API_KEY` set in your environment. + +Then run +```bash +python application.py +``` + +You'll then have a text terminal where you can interact. Type exit to stop. + +# Video Walkthrough via Notebook +Watch the video walkthrough in the notebook (1.5x+ speed recommended): +
+ +
diff --git a/examples/conversational_rag/application.py b/examples/conversational_rag/application.py index c93f1454..69e707d7 100644 --- a/examples/conversational_rag/application.py +++ b/examples/conversational_rag/application.py @@ -8,6 +8,7 @@ from burr.core.action import action from burr.lifecycle import LifecycleAdapter, PostRunStepHook, PreRunStepHook +# create the pipeline conversational_rag = dataflows.import_module("conversational_rag") conversational_rag_driver = ( driver.Builder() @@ -17,6 +18,11 @@ ) +def bootstrap_vector_db(rag_driver: driver.Driver, input_texts: List[str]) -> object: + """Bootstrap the vector database with some input texts.""" + return rag_driver.execute(["vector_store"], inputs={"input_texts": input_texts})["vector_store"] + + class PrintStepHook(PostRunStepHook, PreRunStepHook): """Custom hook to print the action/result after each step.""" @@ -27,24 +33,28 @@ def pre_run_step(self, action: Action, **future_kwargs): print("⏳Processing input from user...") def post_run_step(self, *, state: "State", action: Action, result: dict, **future_kwargs): + if action.name == "human_converse": + print("🎙💬", result["question"], "\n") if action.name == "ai_converse": - print("💬", result["conversational_rag_response"], "\n") + print("🤖💬", result["conversational_rag_response"], "\n") @action( - reads=["input_texts", "question", "chat_history"], + reads=["question", "chat_history"], writes=["chat_history"], ) -def ai_converse(state: State) -> Tuple[dict, State]: - """AI conversing step. This calls out to an API on the Hamilton hub (hub.dagworks.io) - to do basic RAG""" +def ai_converse(state: State, vector_store: object) -> Tuple[dict, State]: + """AI conversing step. Uses Hamilton to execute the conversational pipeline.""" result = conversational_rag_driver.execute( ["conversational_rag_response"], inputs={ - "input_texts": state["input_texts"], "question": state["question"], "chat_history": state["chat_history"], }, + # we use overrides here because we want to pass in the vector store + overrides={ + "vector_store": vector_store, + }, ) new_history = f"AI: {result['conversational_rag_response']}" return result, state.append(chat_history=new_history) @@ -55,7 +65,7 @@ def ai_converse(state: State) -> Tuple[dict, State]: writes=["question", "chat_history"], ) def human_converse(state: State, user_question: str) -> Tuple[dict, State]: - """Human converse step -- this simply massages the state to be the right shape""" + """Human converse step -- make sure we get input, and store it as state.""" state = state.update(question=user_question).append(chat_history=f"Human: {user_question}") return {"question": user_question}, state @@ -65,26 +75,29 @@ def application( storage_dir: Optional[str] = "~/.burr", hooks: Optional[List[LifecycleAdapter]] = None, ) -> Application: + # our initial knowledge base + input_text = [ + "harrison worked at kensho", + "stefan worked at Stitch Fix", + "stefan likes tacos", + "elijah worked at TwoSigma", + "elijah likes mango", + "stefan used to work at IBM", + "elijah likes to go biking", + "stefan likes to bake sourdough", + ] + vector_store = bootstrap_vector_db(conversational_rag_driver, input_text) app = ( ApplicationBuilder() .with_state( **{ - "input_texts": [ - "harrison worked at kensho", - "stefan worked at Stitch Fix", - "stefan likes tacos", - "elijah worked at TwoSigma", - "elijah likes mango", - "stefan used to work at IBM", - "elijah likes to go biking", - "stefan likes to bake sourdough", - ], "question": "", "chat_history": [], } ) .with_actions( - ai_converse=ai_converse, + # bind the vector store to the AI conversational step + ai_converse=ai_converse.bind(vector_store=vector_store), human_converse=human_converse, terminal=burr.core.Result("chat_history"), ) diff --git a/examples/conversational_rag/notebook.ipynb b/examples/conversational_rag/notebook.ipynb new file mode 100644 index 00000000..7488e8de --- /dev/null +++ b/examples/conversational_rag/notebook.ipynb @@ -0,0 +1,1073 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# Conversational RAG Example" + ], + "metadata": { + "collapsed": false + }, + "id": "1622e1563a35aa32" + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "# Importing the necessary libraries\n", + "import pprint\n", + "from typing import Tuple\n", + "from hamilton import dataflows, driver\n", + "import burr.core\n", + "from burr.core import ApplicationBuilder, State, default, expr\n", + "from burr.core.action import action\n", + "from application import PrintStepHook # local import from application.py\n", + "from burr.tracking import LocalTrackingClient" + ], + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-26T20:47:37.515883Z", + "start_time": "2024-03-26T20:47:35.314767Z" + } + }, + "id": "initial_id", + "execution_count": 8 + }, + { + "cell_type": "markdown", + "id": "8f1d578469a918b1", + "metadata": { + "collapsed": false + }, + "source": [ + "# Load your \"chain\" or conversational RAG \"pipeline\"\n", + "\n", + "We use Hamilton here. But you could use LangChain, etc., or forgo them and write your own code." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "c6a018aff1154f0b", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-26T20:47:42.057246Z", + "start_time": "2024-03-26T20:47:40.500075Z" + }, + "collapsed": false + }, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "cluster__legend\n", + "\n", + "Legend\n", + "\n", + "\n", + "\n", + "context\n", + "\n", + "context\n", + "str\n", + "\n", + "\n", + "\n", + "answer_prompt\n", + "\n", + "answer_prompt\n", + "str\n", + "\n", + "\n", + "\n", + "context->answer_prompt\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "conversational_rag_response\n", + "\n", + "conversational_rag_response\n", + "str\n", + "\n", + "\n", + "\n", + "standalone_question\n", + "\n", + "standalone_question\n", + "str\n", + "\n", + "\n", + "\n", + "standalone_question->context\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "standalone_question->answer_prompt\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "llm_client\n", + "\n", + "llm_client\n", + "OpenAI\n", + "\n", + "\n", + "\n", + "llm_client->conversational_rag_response\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "llm_client->standalone_question\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "answer_prompt->conversational_rag_response\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "standalone_question_prompt\n", + "\n", + "standalone_question_prompt\n", + "str\n", + "\n", + "\n", + "\n", + "standalone_question_prompt->standalone_question\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "vector_store\n", + "\n", + "vector_store\n", + "VectorStoreRetriever\n", + "\n", + "\n", + "\n", + "vector_store->context\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "_context_inputs\n", + "\n", + "top_k\n", + "int\n", + "\n", + "\n", + "\n", + "_context_inputs->context\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "_standalone_question_prompt_inputs\n", + "\n", + "question\n", + "str\n", + "chat_history\n", + "list\n", + "\n", + "\n", + "\n", + "_standalone_question_prompt_inputs->standalone_question_prompt\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "_vector_store_inputs\n", + "\n", + "input_texts\n", + "list\n", + "\n", + "\n", + "\n", + "_vector_store_inputs->vector_store\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "input\n", + "\n", + "input\n", + "\n", + "\n", + "\n", + "function\n", + "\n", + "function\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Loads Hamilton DAG\n", + "conversational_rag = dataflows.import_module(\"conversational_rag\")\n", + "conversational_rag_driver = (\n", + " driver.Builder()\n", + " .with_config({}) # replace with configuration as appropriate\n", + " .with_modules(conversational_rag)\n", + " .build()\n", + ")\n", + "conversational_rag_driver.display_all_functions()" + ] + }, + { + "cell_type": "markdown", + "id": "82b3515afd2de6e4", + "metadata": { + "collapsed": false + }, + "source": [ + "# Create the actions that will constitute our application\n", + "\n", + "We will use the functional (vs class) approach to declaring actions here. " + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "6433dad5abc6eb16", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-26T20:47:42.065785Z", + "start_time": "2024-03-26T20:47:42.059539Z" + }, + "collapsed": false + }, + "outputs": [], + "source": [ + "@action(\n", + " reads=[\"question\", \"chat_history\"],\n", + " writes=[\"chat_history\"],\n", + ")\n", + "def ai_converse(state: State, vector_store: object) -> Tuple[dict, State]:\n", + " \"\"\"AI conversing step. Uses Hamilton to execute the conversational pipeline.\"\"\"\n", + " result = conversational_rag_driver.execute(\n", + " [\"conversational_rag_response\"],\n", + " inputs={\n", + " \"question\": state[\"question\"],\n", + " \"chat_history\": state[\"chat_history\"],\n", + " },\n", + " # we use overrides here because we want to pass in the vector store\n", + " overrides={\n", + " \"vector_store\": vector_store,\n", + " }\n", + " )\n", + " new_history = f\"AI: {result['conversational_rag_response']}\"\n", + " return result, state.append(chat_history=new_history)\n", + "\n", + "\n", + "@action(\n", + " reads=[],\n", + " writes=[\"question\", \"chat_history\"],\n", + ")\n", + "def human_converse(state: State, user_question: str) -> Tuple[dict, State]:\n", + " \"\"\"Human converse step -- make sure we get input, and store it as state.\"\"\"\n", + " state = state.update(question=user_question).append(chat_history=f\"Human: {user_question}\")\n", + " return {\"question\": user_question}, state" + ] + }, + { + "cell_type": "markdown", + "id": "9579b47aac2c53a0", + "metadata": { + "collapsed": false + }, + "source": [ + "# Create the application\n", + "\n", + "We now create the application, which is a collection of actions, and then set the transitions between the actions based on values in State.\n", + "\n", + "We also intialize initial values etc to populate the application with." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "77e9f67b660a0953", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-26T20:50:25.642660Z", + "start_time": "2024-03-26T20:50:25.616245Z" + }, + "collapsed": false + }, + "outputs": [], + "source": [ + "# what we will do RAG over.\n", + "initial_documents = [\n", + " \"harrison worked at kensho\",\n", + " \"stefan worked at Stitch Fix\",\n", + " \"stefan likes tacos\",\n", + " \"elijah worked at TwoSigma\",\n", + " \"elijah likes mango\",\n", + " \"stefan used to work at IBM\",\n", + " \"elijah likes to go biking\",\n", + " \"stefan likes to bake sourdough\",\n", + "]\n", + "# bootstrap the vector store;\n", + "vector_store = conversational_rag_driver.execute(\n", + " [\"vector_store\"],\n", + " inputs={\"input_texts\": initial_documents})[\"vector_store\"]\n", + "# what we will initialize the application with\n", + "initial_state = {\n", + " \"question\": \"\",\n", + " \"chat_history\": [],\n", + "}\n", + "import uuid\n", + "app_id = str(uuid.uuid4())\n", + "app = (\n", + " ApplicationBuilder()\n", + " # add the actions\n", + " .with_actions(\n", + " # bind the vector store to the AI conversational step\n", + " ai_converse=ai_converse.bind(vector_store=vector_store),\n", + " human_converse=human_converse,\n", + " terminal=burr.core.Result(\"chat_history\"),\n", + " )\n", + " # set the transitions between actions\n", + " .with_transitions(\n", + " (\"ai_converse\", \"human_converse\", default),\n", + " (\"human_converse\", \"terminal\", expr(\"'exit' in question\")),\n", + " (\"human_converse\", \"ai_converse\", default),\n", + " )\n", + " # add identifiers that will help track the application\n", + " .with_identifiers(app_id=app_id, partition_key=\"sample_user\")\n", + " # initialize the state\n", + " .with_state(**initial_state)\n", + " # say what the initial action is\n", + " .with_entrypoint(\"human_converse\")\n", + " # add a hook to print the steps -- optional but shows that Burr is pluggable\n", + " .with_hooks(PrintStepHook())\n", + " # add tracking -- this will show up in the BURR UI.\n", + " .with_tracker(project=\"demo:conversational-rag\")\n", + " # build the application\n", + " .build()\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "bf5d1e084a791fa5", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-26T20:48:39.379712Z", + "start_time": "2024-03-26T20:48:38.701659Z" + }, + "collapsed": false + }, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "ai_converse\n", + "\n", + "ai_converse\n", + "\n", + "\n", + "\n", + "human_converse\n", + "\n", + "human_converse\n", + "\n", + "\n", + "\n", + "ai_converse->human_converse\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "human_converse->ai_converse\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "terminal\n", + "\n", + "terminal\n", + "\n", + "\n", + "\n", + "human_converse->terminal\n", + "\n", + "\n", + "'exit' in question\n", + "\n", + "\n", + "\n", + "input__user_question\n", + "\n", + "input: user_question\n", + "\n", + "\n", + "\n", + "input__user_question->human_converse\n", + "\n", + "\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# let's visualize what we have\n", + "app.visualize(include_conditions=True)" + ] + }, + { + "cell_type": "markdown", + "id": "430bab287b6ad9a", + "metadata": { + "collapsed": false + }, + "source": [ + "# Let's run the app. \n", + "\n", + "Let's run it a step at a time." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "8bcfe9ca48f87618", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-26T20:50:38.782857Z", + "start_time": "2024-03-26T20:50:28.797204Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "⏳Processing input from user...\n", + "🎙💬 Who is Stefan? Please answer in English. \n", + "\n", + "Ran action human_converse with result:\n", + " {'question': 'Who is Stefan? Please answer in English.'} \n", + " and state:\n", + " {'__PRIOR_STEP': 'human_converse',\n", + " '__SEQUENCE_ID': 0,\n", + " 'chat_history': ['Human: Who is Stefan? Please answer in English.'],\n", + " 'input_texts': ['harrison worked at kensho',\n", + " 'stefan worked at Stitch Fix',\n", + " 'stefan likes tacos',\n", + " 'elijah worked at TwoSigma',\n", + " 'elijah likes mango',\n", + " 'stefan used to work at IBM',\n", + " 'elijah likes to go biking',\n", + " 'stefan likes to bake sourdough'],\n", + " 'question': 'Who is Stefan? Please answer in English.'}\n" + ] + } + ], + "source": [ + "app.reset_to_entrypoint() # reset the app to the entrypoint\n", + "user_question = input(\"Ask something (or type exit to quit): \")\n", + "previous_action, result, state = app.step(\n", + " inputs={\"user_question\": user_question},\n", + ")\n", + "print(f\"Ran action {previous_action.name} with result:\\n {pprint.pformat(result)} \\n and state:\\n {pprint.pformat(state.get_all())}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "81940578d58fd602", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-26T20:50:44.662755Z", + "start_time": "2024-03-26T20:50:41.919782Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "🤔 AI is thinking...\n", + "🤖💬 Stefan is a person who used to work at IBM, worked at Stitch Fix, likes tacos, and likes to bake sourdough. \n", + "\n", + "Ran action ai_converse with result:\n", + " {'conversational_rag_response': 'Stefan is a person who used to work at IBM, '\n", + " 'worked at Stitch Fix, likes tacos, and likes '\n", + " 'to bake sourdough.'} \n", + " and state:\n", + " {'__PRIOR_STEP': 'ai_converse',\n", + " '__SEQUENCE_ID': 1,\n", + " 'chat_history': ['Human: Who is Stefan? Please answer in English.',\n", + " 'AI: Stefan is a person who used to work at IBM, worked at '\n", + " 'Stitch Fix, likes tacos, and likes to bake sourdough.'],\n", + " 'input_texts': ['harrison worked at kensho',\n", + " 'stefan worked at Stitch Fix',\n", + " 'stefan likes tacos',\n", + " 'elijah worked at TwoSigma',\n", + " 'elijah likes mango',\n", + " 'stefan used to work at IBM',\n", + " 'elijah likes to go biking',\n", + " 'stefan likes to bake sourdough'],\n", + " 'question': 'Who is Stefan? Please answer in English.'}\n" + ] + } + ], + "source": [ + "# now let's run the AI conversational step\n", + "previous_action, result, state = app.step()\n", + "print(f\"Ran action {previous_action.name} with result:\\n {pprint.pformat(result)} \\n and state:\\n {pprint.pformat(state.get_all())}\")" + ] + }, + { + "cell_type": "markdown", + "id": "36ec2f4908c2dde2", + "metadata": { + "collapsed": false + }, + "source": [ + "# Let's now run the app to completion\n", + "\n", + "You could do the above for each action. Or you could tell the app to run until certain\n", + "actions/conditions are met." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "be6c573158b65cb1", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-26T20:52:21.364028Z", + "start_time": "2024-03-26T20:50:52.382808Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running RAG with initial state:\n", + " {'__PRIOR_STEP': 'ai_converse',\n", + " '__SEQUENCE_ID': 1,\n", + " 'chat_history': ['Human: Who is Stefan? Please answer in English.',\n", + " 'AI: Stefan is a person who used to work at IBM, worked at '\n", + " 'Stitch Fix, likes tacos, and likes to bake sourdough.'],\n", + " 'input_texts': ['harrison worked at kensho',\n", + " 'stefan worked at Stitch Fix',\n", + " 'stefan likes tacos',\n", + " 'elijah worked at TwoSigma',\n", + " 'elijah likes mango',\n", + " 'stefan used to work at IBM',\n", + " 'elijah likes to go biking',\n", + " 'stefan likes to bake sourdough'],\n", + " 'question': 'Who is Stefan? Please answer in English.'}\n", + "⏳Processing input from user...\n", + "🎙💬 where does Elijah work? \n", + "\n", + "🤔 AI is thinking...\n", + "🤖💬 Elijah works at TwoSigma. \n", + "⏳Processing input from user...\n", + "🎙💬 does he also like tacos? \n", + "\n", + "🤔 AI is thinking...\n", + "🤖💬 Based on the given context, we cannot determine if Elijah likes tacos, as it is only mentioned that he likes mango, enjoys biking, and worked at TwoSigma. There is no mention of Elijah's preference for tacos. \n", + "⏳Processing input from user...\n", + "🎙💬 where does Harrison work? \n", + "\n", + "🤔 AI is thinking...\n", + "🤖💬 Harrison works at Kensho. \n", + "⏳Processing input from user...\n", + "🎙💬 exit \n", + "\n", + "{'chat_history': ['Human: Who is Stefan? Please answer in English.',\n", + " 'AI: Stefan is a person who used to work at IBM, worked at '\n", + " 'Stitch Fix, likes tacos, and likes to bake sourdough.',\n", + " 'Human: where does Elijah work?',\n", + " 'AI: Elijah works at TwoSigma.',\n", + " 'Human: does he also like tacos?',\n", + " 'AI: Based on the given context, we cannot determine if '\n", + " 'Elijah likes tacos, as it is only mentioned that he likes '\n", + " 'mango, enjoys biking, and worked at TwoSigma. There is no '\n", + " \"mention of Elijah's preference for tacos.\",\n", + " 'Human: where does Harrison work?',\n", + " 'AI: Harrison works at Kensho.',\n", + " 'Human: exit']}\n" + ] + } + ], + "source": [ + "print(f\"Running RAG with initial state:\\n {pprint.pformat(app.state.get_all())}\")\n", + "while True:\n", + " user_question = input(\"Ask something (or type exit to quit): \")\n", + " previous_action, result, state = app.run(\n", + " halt_before=[\"human_converse\"],\n", + " halt_after=[\"terminal\"],\n", + " inputs={\"user_question\": user_question},\n", + " )\n", + " if previous_action.name == \"terminal\":\n", + " # reached the end\n", + " pprint.pprint(result)\n", + " break" + ] + }, + { + "cell_type": "markdown", + "id": "169946a65f977df9", + "metadata": { + "collapsed": false + }, + "source": [ + "# Reloading from prior state\n", + "\n", + "Burr makes it easy to reload from a prior state. In this example we'll just use what is logged to the tracker to \"go back in time\" and reload the application to that state. \n", + "\n", + "This is useful for debugging, building the application itself, etc.\n", + "\n", + "There are two ways to load prior state:\n", + "1. Load the state outside the Burr Application. i.e. pass it in as initial state.\n", + "2. Use the ApplicationBuilder .initialize_from() method.\n", + "\n", + "The difference between them, is that the first method is more flexible, allowing you to create\n", + "new \"app_ids\", i.e. traces. The second method will keep the same app_id, and thus allow you \n", + "\"pick up where you left off\", e.g. in the case of a crash, or if you wanted to start from \n", + "the last conversation with a user.\n", + "\n", + "Below we show how to do the first method. Then after that the second method, to show how\n", + "to pick up the prior conversation from where it left off. \n" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "c7f4dd64f73ed2d8", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-26T20:52:37.419848Z", + "start_time": "2024-03-26T20:52:37.393869Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loaded state from app_id:85747466-1524-4708-a283-8e7faa67b8ed, sequence_id:7::\n", + " {'__SEQUENCE_ID': 7,\n", + " 'chat_history': ['Human: Who is Stefan? Please answer in English.',\n", + " 'AI: Stefan is a person who used to work at IBM, worked at '\n", + " 'Stitch Fix, likes tacos, and likes to bake sourdough.',\n", + " 'Human: where does Elijah work?',\n", + " 'AI: Elijah works at TwoSigma.',\n", + " 'Human: does he also like tacos?',\n", + " 'AI: Based on the given context, we cannot determine if '\n", + " 'Elijah likes tacos, as it is only mentioned that he likes '\n", + " 'mango, enjoys biking, and worked at TwoSigma. There is no '\n", + " \"mention of Elijah's preference for tacos.\",\n", + " 'Human: where does Harrison work?',\n", + " 'AI: Harrison works at Kensho.'],\n", + " 'input_texts': ['harrison worked at kensho',\n", + " 'stefan worked at Stitch Fix',\n", + " 'stefan likes tacos',\n", + " 'elijah worked at TwoSigma',\n", + " 'elijah likes mango',\n", + " 'stefan used to work at IBM',\n", + " 'elijah likes to go biking',\n", + " 'stefan likes to bake sourdough'],\n", + " 'question': 'where does Harrison work?'}\n" + ] + } + ], + "source": [ + "# set up for rewinding to a prior state -- loading it in as initial state\n", + "prior_app_id = app_id\n", + "last_sequence_id = app.sequence_id\n", + "rewind_to_sequence_id = last_sequence_id - 2\n", + "new_app_id = str(uuid.uuid4())\n", + "\n", + "project_name = \"demo:conversational-rag\"\n", + "# we use the tracking client here to get the state of the application at a prior sequence_id\n", + "tracker = LocalTrackingClient(project=project_name)\n", + "persisted_state = tracker.load(partition_key=\"sample_user\", \n", + " app_id=prior_app_id, \n", + " sequence_id=rewind_to_sequence_id)\n", + "state_values = persisted_state['state'].get_all()\n", + "print(f\"Loaded state from app_id:{prior_app_id}, \"\n", + " f\"sequence_id:{rewind_to_sequence_id}::\\n \"\n", + " f\"{pprint.pformat(state_values)}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5ee618e3-15c0-403b-bc96-3a2faaea457e", + "metadata": {}, + "outputs": [], + "source": [ + "other_app = (\n", + " ApplicationBuilder()\n", + " # add the actions\n", + " .with_actions(\n", + " # bind the vector store to the AI conversational step\n", + " ai_converse=ai_converse.bind(vector_store=vector_store),\n", + " human_converse=human_converse,\n", + " terminal=burr.core.Result(\"chat_history\"),\n", + " )\n", + " # set the transitions between actions\n", + " .with_transitions(\n", + " (\"ai_converse\", \"human_converse\", default),\n", + " (\"human_converse\", \"terminal\", expr(\"'exit' in question\")),\n", + " (\"human_converse\", \"ai_converse\", default),\n", + " )\n", + " # add identifiers that will help track the application\n", + " .with_identifiers(app_id=new_app_id, partition_key=\"sample_user\")\n", + " # set state to prior state\n", + " .with_state(**persisted_state[\"state\"].get_all())\n", + " # say where we want to start\n", + " .with_entrypoint(\"human_converse\")\n", + " # add a hook to print the steps -- optional but shows that Burr is pluggable\n", + " .with_hooks(PrintStepHook())\n", + " # add tracking -- this will show up in the BURR UI.\n", + " .with_tracker(tracker)\n", + " # build the application\n", + " .build()\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "34140c5864b940dc", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-26T20:54:24.035153Z", + "start_time": "2024-03-26T20:53:19.237522Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running RAG with loaded state:\n", + " {'__SEQUENCE_ID': 7,\n", + " 'chat_history': ['Human: Who is Stefan? Please answer in English.',\n", + " 'AI: Stefan is a person who used to work at IBM, worked at '\n", + " 'Stitch Fix, likes tacos, and likes to bake sourdough.',\n", + " 'Human: where does Elijah work?',\n", + " 'AI: Elijah works at TwoSigma.',\n", + " 'Human: does he also like tacos?',\n", + " 'AI: Based on the given context, we cannot determine if '\n", + " 'Elijah likes tacos, as it is only mentioned that he likes '\n", + " 'mango, enjoys biking, and worked at TwoSigma. There is no '\n", + " \"mention of Elijah's preference for tacos.\",\n", + " 'Human: where does Harrison work?',\n", + " 'AI: Harrison works at Kensho.'],\n", + " 'input_texts': ['harrison worked at kensho',\n", + " 'stefan worked at Stitch Fix',\n", + " 'stefan likes tacos',\n", + " 'elijah worked at TwoSigma',\n", + " 'elijah likes mango',\n", + " 'stefan used to work at IBM',\n", + " 'elijah likes to go biking',\n", + " 'stefan likes to bake sourdough'],\n", + " 'question': 'where does Harrison work?'}\n", + "⏳Processing input from user...\n", + "🎙💬 does Harrison like pizza? \n", + "\n", + "🤔 AI is thinking...\n", + "🤖💬 Based on the context given, we do not have any information about whether Harrison likes pizza or not. \n", + "⏳Processing input from user...\n", + "🎙💬 I am going to a mexican restaurant. Who should I take with me? \n", + "\n", + "🤔 AI is thinking...\n", + "🤖💬 You should take Stefan with you to the Mexican restaurant. \n", + "⏳Processing input from user...\n", + "🎙💬 exit \n", + "\n", + "{'chat_history': ['Human: Who is Stefan? Please answer in English.',\n", + " 'AI: Stefan is a person who used to work at IBM, worked at '\n", + " 'Stitch Fix, likes tacos, and likes to bake sourdough.',\n", + " 'Human: where does Elijah work?',\n", + " 'AI: Elijah works at TwoSigma.',\n", + " 'Human: does he also like tacos?',\n", + " 'AI: Based on the given context, we cannot determine if '\n", + " 'Elijah likes tacos, as it is only mentioned that he likes '\n", + " 'mango, enjoys biking, and worked at TwoSigma. There is no '\n", + " \"mention of Elijah's preference for tacos.\",\n", + " 'Human: where does Harrison work?',\n", + " 'AI: Harrison works at Kensho.',\n", + " 'Human: does Harrison like pizza?',\n", + " 'AI: Based on the context given, we do not have any '\n", + " 'information about whether Harrison likes pizza or not.',\n", + " 'Human: I am going to a mexican restaurant. Who should I '\n", + " 'take with me?',\n", + " 'AI: You should take Stefan with you to the Mexican '\n", + " 'restaurant.',\n", + " 'Human: exit']}\n" + ] + } + ], + "source": [ + "# We can now change test, debug, etc. from this prior state.\n", + "print(f\"Running RAG with loaded state:\\n {pprint.pformat(state_values)}\")\n", + "while True:\n", + " user_question = input(\"Ask something (or type exit to quit): \")\n", + " previous_action, result, state = other_app.run(\n", + " halt_before=[\"human_converse\"],\n", + " halt_after=[\"terminal\"],\n", + " inputs={\"user_question\": user_question},\n", + " )\n", + " if previous_action and previous_action.name == \"terminal\":\n", + " # reached the end\n", + " pprint.pprint(result)\n", + " break\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "id": "fc62a033644c7b80", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-26T21:04:56.748649Z", + "start_time": "2024-03-26T21:04:56.742019Z" + }, + "collapsed": false + }, + "outputs": [], + "source": [ + "# Now let's show how to use the ApplicationBuilder.initialize_from() method to pick up where we left off.\n", + "# This is useful if you want to continue a conversation with a user, or if you had a crash, etc.\n", + "\n", + "# set up for rewinding to a prior state -- loading it in as initial state\n", + "prior_app_id = app_id\n", + "new_app_id = str(uuid.uuid4())\n", + "\n", + "project_name = \"demo:conversational-rag\"\n", + "# we use the tracking client here to get the state of the application at a prior sequence_id\n", + "tracker = LocalTrackingClient(project=project_name)\n", + "pick_up_where_we_left_off_app = (\n", + " ApplicationBuilder()\n", + " # add the actions\n", + " .with_actions(\n", + " # bind the vector store to the AI conversational step\n", + " ai_converse=ai_converse.bind(vector_store=vector_store),\n", + " human_converse=human_converse,\n", + " terminal=burr.core.Result(\"chat_history\"),\n", + " )\n", + " # set the transitions between actions\n", + " .with_transitions(\n", + " (\"ai_converse\", \"human_converse\", default),\n", + " (\"human_converse\", \"terminal\", expr(\"'exit' in question\")),\n", + " (\"human_converse\", \"ai_converse\", default),\n", + " )\n", + " # add identifiers that will help track the application\n", + " .with_identifiers(app_id=prior_app_id, partition_key=\"sample_user\")\n", + " .initialize_from(\n", + " initializer=tracker,\n", + " resume_at_next_action=False, # we want to always start at human_converse; our entrypoint\n", + " default_entrypoint=\"human_converse\",\n", + " default_state=initial_state, # set some default state incase we can't find the prior state\n", + " )\n", + " # add a hook to print the steps -- optional but shows that Burr is pluggable\n", + " .with_hooks(PrintStepHook())\n", + " # add tracking -- this will show up in the BURR UI.\n", + " .with_tracker(tracker)\n", + " # build the application\n", + " .build()\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "id": "b6d23d6d6a6643d0", + "metadata": { + "ExecuteTime": { + "end_time": "2024-03-26T21:05:41.246005Z", + "start_time": "2024-03-26T21:05:02.855430Z" + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running RAG with loaded state:\n", + " {'__PRIOR_STEP': 'terminal',\n", + " '__SEQUENCE_ID': 9,\n", + " 'chat_history': ['Human: Who is Stefan? Please answer in English.',\n", + " 'AI: Stefan is a person who used to work at IBM, worked at '\n", + " 'Stitch Fix, likes tacos, and likes to bake sourdough.',\n", + " 'Human: where does Elijah work?',\n", + " 'AI: Elijah works at TwoSigma.',\n", + " 'Human: does he also like tacos?',\n", + " 'AI: Based on the given context, we cannot determine if '\n", + " 'Elijah likes tacos, as it is only mentioned that he likes '\n", + " 'mango, enjoys biking, and worked at TwoSigma. There is no '\n", + " \"mention of Elijah's preference for tacos.\",\n", + " 'Human: where does Harrison work?',\n", + " 'AI: Harrison works at Kensho.',\n", + " 'Human: exit'],\n", + " 'input_texts': ['harrison worked at kensho',\n", + " 'stefan worked at Stitch Fix',\n", + " 'stefan likes tacos',\n", + " 'elijah worked at TwoSigma',\n", + " 'elijah likes mango',\n", + " 'stefan used to work at IBM',\n", + " 'elijah likes to go biking',\n", + " 'stefan likes to bake sourdough'],\n", + " 'question': 'exit'}\n", + "⏳Processing input from user...\n", + "🎙💬 who would most likely enjoy a fruit salad? \n", + "\n", + "🤔 AI is thinking...\n", + "🤖💬 Elijah would most likely enjoy a fruit salad since he is known to like mango. \n", + "⏳Processing input from user...\n", + "🎙💬 who would be the most helpful in terms of financial advice? \n", + "\n", + "🤔 AI is thinking...\n", + "🤖💬 Elijah would be the most helpful person for financial advice since he worked at TwoSigma, a financial services company. \n", + "⏳Processing input from user...\n", + "🎙💬 exit \n", + "\n", + "{'chat_history': ['Human: Who is Stefan? Please answer in English.',\n", + " 'AI: Stefan is a person who used to work at IBM, worked at '\n", + " 'Stitch Fix, likes tacos, and likes to bake sourdough.',\n", + " 'Human: where does Elijah work?',\n", + " 'AI: Elijah works at TwoSigma.',\n", + " 'Human: does he also like tacos?',\n", + " 'AI: Based on the given context, we cannot determine if '\n", + " 'Elijah likes tacos, as it is only mentioned that he likes '\n", + " 'mango, enjoys biking, and worked at TwoSigma. There is no '\n", + " \"mention of Elijah's preference for tacos.\",\n", + " 'Human: where does Harrison work?',\n", + " 'AI: Harrison works at Kensho.',\n", + " 'Human: exit',\n", + " 'Human: who would most likely enjoy a fruit salad?',\n", + " 'AI: Elijah would most likely enjoy a fruit salad since he '\n", + " 'is known to like mango.',\n", + " 'Human: who would be the most helpful in terms of financial '\n", + " 'advice?',\n", + " 'AI: Elijah would be the most helpful person for financial '\n", + " 'advice since he worked at TwoSigma, a financial services '\n", + " 'company.',\n", + " 'Human: exit']}\n" + ] + } + ], + "source": [ + "print(f\"Running RAG with loaded state:\\n {pprint.pformat(app.state.get_all())}\")\n", + "while True:\n", + " user_question = input(\"Ask something (or type exit to quit): \")\n", + " previous_action, result, state = pick_up_where_we_left_off_app.run(\n", + " halt_before=[\"human_converse\"],\n", + " halt_after=[\"terminal\"],\n", + " inputs={\"user_question\": user_question},\n", + " )\n", + " if previous_action and previous_action.name == \"terminal\":\n", + " # reached the end\n", + " pprint.pprint(result)\n", + " break" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fe401e84026db9bb", + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.4" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/counter/notebook.ipynb b/examples/counter/notebook.ipynb new file mode 100644 index 00000000..1ecdbc4a --- /dev/null +++ b/examples/counter/notebook.ipynb @@ -0,0 +1,193 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 26, + "id": "initial_id", + "metadata": { + "collapsed": true, + "ExecuteTime": { + "end_time": "2024-03-27T00:11:36.093767Z", + "start_time": "2024-03-27T00:11:36.088301Z" + } + }, + "outputs": [], + "source": [ + "from typing import List, Optional, Tuple\n", + "\n", + "import burr.core\n", + "from burr.core import Result, State, default, expr\n", + "from burr.core.action import action\n", + "from burr.core.persistence import SQLLitePersister\n", + "from burr.lifecycle import LifecycleAdapter\n" + ] + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "@action(reads=[\"counter\"], writes=[\"counter\"])\n", + "def counter(state: State) -> Tuple[dict, State]:\n", + " result = {\"counter\": state[\"counter\"] + 1}\n", + " print(f\"counted to {result['counter']}\")\n", + " return result, state.update(**result)\n", + "\n", + "\n", + "# some configuration\n", + "count_up_to: int = 10\n", + "partition_key: str = \"demo-user\"\n", + "app_id: Optional[str] = \"some-unique-id\"\n", + "storage_dir: Optional[str] = \".\"\n", + "hooks: Optional[List[LifecycleAdapter]] = None\n", + "# set up a persister -- if you rerun this script, it will resume from where it left off.\n", + "# change the app_id to start from scratch\n", + "persister = SQLLitePersister(\"demos.db\", \"counter\")\n", + "persister.initialize()" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-03-27T00:11:36.548223Z", + "start_time": "2024-03-27T00:11:36.539358Z" + } + }, + "id": "64c67b15363653e9", + "execution_count": 27 + }, + { + "cell_type": "code", + "outputs": [ + { + "data": { + "image/svg+xml": "\n\n\n\n\n\n\n\n\ncounter\n\ncounter\n\n\n\ncounter->counter\n\n\ncounter < 10\n\n\n\nresult\n\nresult\n\n\n\ncounter->result\n\n\n\n\n\n", + "text/plain": "" + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "app = (\n", + " burr.core.ApplicationBuilder()\n", + " .with_actions(counter=counter, result=Result(\"counter\"))\n", + " .with_transitions(\n", + " (\"counter\", \"counter\", expr(f\"counter < {count_up_to}\")),\n", + " (\"counter\", \"result\", default),\n", + " )\n", + " .with_identifiers(partition_key=partition_key, app_id=app_id)\n", + " .initialize_from(\n", + " persister,\n", + " resume_at_next_action=True,\n", + " default_state={\"counter\": 0},\n", + " default_entrypoint=\"counter\",\n", + " )\n", + " .with_state_persister(persister)\n", + " .with_tracker(project=\"demo:counter\", params={\"storage_dir\": storage_dir})\n", + " .with_hooks(*hooks if hooks else [])\n", + " .build()\n", + ")\n", + "app.visualize(include_conditions=True)" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-03-27T00:11:37.753610Z", + "start_time": "2024-03-27T00:11:37.490638Z" + } + }, + "id": "f43d7208a78b363e", + "execution_count": 28 + }, + { + "cell_type": "code", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "counted to 1\n", + "counted to 2\n", + "counted to 3\n", + "counted to 4\n", + "counted to 5\n", + "counted to 6\n", + "counted to 7\n", + "counted to 8\n", + "counted to 9\n", + "counted to 10\n", + "10\n" + ] + } + ], + "source": [ + "action_obj, result, state = app.run(halt_after=[\"result\"])\n", + "print(state[\"counter\"])" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-03-27T00:11:38.483852Z", + "start_time": "2024-03-27T00:11:38.463109Z" + } + }, + "id": "a63802fa10f73ca8", + "execution_count": 29 + }, + { + "cell_type": "code", + "outputs": [ + { + "data": { + "text/plain": "{'counter': 10, '__SEQUENCE_ID': 10, '__PRIOR_STEP': 'result'}" + }, + "execution_count": 30, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "app.state.get_all()" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-03-27T00:11:41.692154Z", + "start_time": "2024-03-27T00:11:41.687049Z" + } + }, + "id": "42ba7f71fd68f013", + "execution_count": 30 + }, + { + "cell_type": "code", + "outputs": [], + "source": [], + "metadata": { + "collapsed": false + }, + "id": "e6b6f52d6ebe9eb2" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/cowsay/notebook.ipynb b/examples/cowsay/notebook.ipynb new file mode 100644 index 00000000..3f4e8b26 --- /dev/null +++ b/examples/cowsay/notebook.ipynb @@ -0,0 +1,313 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "initial_id", + "metadata": { + "collapsed": true, + "ExecuteTime": { + "end_time": "2024-03-27T00:12:50.561804Z", + "start_time": "2024-03-27T00:12:50.531039Z" + } + }, + "outputs": [], + "source": [ + "import random\n", + "import time\n", + "from typing import Tuple\n", + "\n", + "import cowsay\n", + "\n", + "from burr.core import Action, Application, ApplicationBuilder, State, default, expr\n", + "from burr.core.action import action\n", + "from burr.lifecycle import PostRunStepHook" + ] + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "# create some hooks!\n", + "class PrintWhatTheCowSaid(PostRunStepHook):\n", + " def post_run_step(self, *, state: \"State\", action: \"Action\", **future_kwargs):\n", + " if action.name != \"cow_should_say\" and state[\"cow_said\"] is not None:\n", + " print(state[\"cow_said\"])\n", + "\n", + "\n", + "class CowCantSpeakFast(PostRunStepHook):\n", + " def __init__(self, sleep_time: float):\n", + " super(PostRunStepHook, self).__init__()\n", + " self.sleep_time = sleep_time\n", + "\n", + " def post_run_step(self, *, state: \"State\", action: \"Action\", **future_kwargs):\n", + " if action.name != \"cow_should_say\": # no need to print if we're not saying anything\n", + " time.sleep(self.sleep_time)" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-03-27T00:13:03.529875Z", + "start_time": "2024-03-27T00:13:03.524449Z" + } + }, + "id": "9f5b9bfc78851a59", + "execution_count": 3 + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "# instantiate actions\n", + "@action(reads=[], writes=[\"cow_said\"])\n", + "def cow_said(state: State, say_what: list[str]) -> Tuple[dict, State]:\n", + " said = random.choice(say_what)\n", + " result = {\"cow_said\": cowsay.get_output_string(\"cow\", said) if say_what is not None else None}\n", + " return result, state.update(**result)\n", + "\n", + "\n", + "@action(reads=[], writes=[\"cow_should_speak\"])\n", + "def cow_should_speak(state: State) -> Tuple[dict, State]:\n", + " result = {\"cow_should_speak\": random.randint(0, 3) == 0}\n", + " return result, state.update(**result)\n" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-03-27T00:13:12.756137Z", + "start_time": "2024-03-27T00:13:12.750764Z" + } + }, + "id": "5b29e9de2ca536b9", + "execution_count": 4 + }, + { + "cell_type": "code", + "outputs": [], + "source": [ + "# build application\n", + "hooks = [\n", + " PrintWhatTheCowSaid(),\n", + " CowCantSpeakFast(sleep_time=2.0),\n", + "]\n", + " \n", + "app = (ApplicationBuilder()\n", + " .with_state(cow_said=None)\n", + " .with_actions(\n", + " say_nothing=cow_said.bind(say_what=None),\n", + " say_hello=cow_said.bind(\n", + " say_what=[\"Hello world!\", \"What's up?\", \"Are you Aaron Burr, sir?\"]\n", + " ),\n", + " cow_should_speak=cow_should_speak,\n", + " )\n", + " .with_transitions(\n", + " (\"cow_should_speak\", \"say_hello\", expr(\"cow_should_speak\")),\n", + " (\"say_hello\", \"cow_should_speak\", default),\n", + " (\"cow_should_speak\", \"say_nothing\", expr(\"not cow_should_speak\")),\n", + " (\"say_nothing\", \"cow_should_speak\", default),\n", + " )\n", + " .with_entrypoint(\"cow_should_speak\")\n", + " .with_hooks(*hooks)\n", + " .build()\n", + ")" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-03-27T00:14:10.836543Z", + "start_time": "2024-03-27T00:14:10.829258Z" + } + }, + "id": "271b868317fbbdf4", + "execution_count": 5 + }, + { + "cell_type": "code", + "outputs": [ + { + "data": { + "image/svg+xml": "\n\n\n\n\n\n\n\n\nsay_nothing\n\nsay_nothing\n\n\n\ncow_should_speak\n\ncow_should_speak\n\n\n\nsay_nothing->cow_should_speak\n\n\n\n\n\nsay_hello\n\nsay_hello\n\n\n\nsay_hello->cow_should_speak\n\n\n\n\n\ncow_should_speak->say_nothing\n\n\nnot cow_should_speak\n\n\n\ncow_should_speak->say_hello\n\n\ncow_should_speak\n\n\n\n", + "text/plain": "" + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "app.visualize(include_conditions=True)" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-03-27T00:14:29.829383Z", + "start_time": "2024-03-27T00:14:29.511355Z" + } + }, + "id": "794bc0abda9e48ad", + "execution_count": 6 + }, + { + "cell_type": "code", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " ____________\n", + "| Hello world! |\n", + " ============\n", + " \\\n", + " \\\n", + " ^__^\n", + " (oo)\\_______\n", + " (__)\\ )\\/\\\n", + " ||----w |\n", + " || ||\n", + " ____________\n", + "| Hello world! |\n", + " ============\n", + " \\\n", + " \\\n", + " ^__^\n", + " (oo)\\_______\n", + " (__)\\ )\\/\\\n", + " ||----w |\n", + " || ||\n", + " ____________\n", + "| Hello world! |\n", + " ============\n", + " \\\n", + " \\\n", + " ^__^\n", + " (oo)\\_______\n", + " (__)\\ )\\/\\\n", + " ||----w |\n", + " || ||\n", + " ____________\n", + "| Hello world! |\n", + " ============\n", + " \\\n", + " \\\n", + " ^__^\n", + " (oo)\\_______\n", + " (__)\\ )\\/\\\n", + " ||----w |\n", + " || ||\n" + ] + } + ], + "source": [ + "# run things!\n", + "for i in range(0, 4):\n", + " # step through 4 times\n", + " a_obj, result, state = app.step()" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-03-27T00:16:40.234406Z", + "start_time": "2024-03-27T00:16:32.211734Z" + } + }, + "id": "2facfb753233cd7d", + "execution_count": 11 + }, + { + "cell_type": "code", + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " ________________________\n", + "| Are you Aaron Burr, sir? |\n", + " ========================\n", + " \\\n", + " \\\n", + " ^__^\n", + " (oo)\\_______\n", + " (__)\\ )\\/\\\n", + " ||----w |\n", + " || ||\n", + " ________________________\n", + "| Are you Aaron Burr, sir? |\n", + " ========================\n", + " \\\n", + " \\\n", + " ^__^\n", + " (oo)\\_______\n", + " (__)\\ )\\/\\\n", + " ||----w |\n", + " || ||\n", + " ____________\n", + "| Hello world! |\n", + " ============\n", + " \\\n", + " \\\n", + " ^__^\n", + " (oo)\\_______\n", + " (__)\\ )\\/\\\n", + " ||----w |\n", + " || ||\n", + " ____________\n", + "| Hello world! |\n", + " ============\n", + " \\\n", + " \\\n", + " ^__^\n", + " (oo)\\_______\n", + " (__)\\ )\\/\\\n", + " ||----w |\n", + " || ||\n" + ] + } + ], + "source": [ + "# Run some more\n", + "for i in range(0, 4):\n", + " # step through 4 times\n", + " a_obj, result, state = app.step()" + ], + "metadata": { + "collapsed": false, + "ExecuteTime": { + "end_time": "2024-03-27T00:17:18.640978Z", + "start_time": "2024-03-27T00:17:10.619878Z" + } + }, + "id": "fed65721e7cc457a", + "execution_count": 16 + }, + { + "cell_type": "code", + "outputs": [], + "source": [], + "metadata": { + "collapsed": false + }, + "id": "19095ae96e98fd83" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.6" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/examples/multi-agent-collaboration/README.md b/examples/multi-agent-collaboration/README.md new file mode 100644 index 00000000..e69de29b From 5ce8fb26dfedf7729b950202f9110ab7a35abdbf Mon Sep 17 00:00:00 2001 From: Stefan Krawczyk Date: Tue, 26 Mar 2024 17:35:29 -0700 Subject: [PATCH 2/2] Bumps version to 0.11.0 To push out minor API changes to builder. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index dd4a75eb..3afbeae6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "burr" -version = "0.10.2" +version = "0.11.0" dependencies = [] # yes, there are none requires-python = ">=3.9" authors = [