Skip to content

Commit

Permalink
Adds script for generating all demo data
Browse files Browse the repository at this point in the history
  • Loading branch information
elijahbenizzy committed Feb 27, 2024
1 parent e57210c commit c0421df
Show file tree
Hide file tree
Showing 3 changed files with 168 additions and 1 deletion.
28 changes: 27 additions & 1 deletion burr/cli/__main__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
import shutil
import subprocess
import sys
import threading
import time
import webbrowser
Expand Down Expand Up @@ -91,13 +92,24 @@ def build_ui():
@click.option("--port", default=7241, help="Port to run the server on")
@click.option("--dev-mode", is_flag=True, help="Run the server in development mode")
@click.option("--no-open", is_flag=True, help="Run the server without opening it")
def run_server(port: int, dev_mode: bool, no_open: bool):
@click.option("--no-copy-demo_data", is_flag=True, help="Don't copy demo data over.")
def run_server(port: int, dev_mode: bool, no_open: bool, no_copy_demo_data: bool):
# TODO: Implement server running logic here
# Example: Start a web server, configure ports, etc.
logger.info(f"Starting server on port {port}")
cmd = f"uvicorn burr.tracking.server.run:app --port {port}"
if dev_mode:
cmd += " --reload"
base_dir = os.path.expanduser("~/.burr")
if not no_copy_demo_data:
logger.info(f"Copying demo data over to {base_dir}...")
git_root = _get_git_root()
demo_data_path = f"{git_root}/burr/tracking/server/demo_data"
for top_level in os.listdir(demo_data_path):
if not os.path.exists(f"{base_dir}/{top_level}"):
logger.info(f"Copying {top_level} over...")
shutil.copytree(f"{demo_data_path}/{top_level}", f"{base_dir}/{top_level}")

if not no_open:
thread = threading.Thread(
target=open_when_ready,
Expand Down Expand Up @@ -129,6 +141,20 @@ def build_and_publish(prod: bool, no_wipe_dist: bool):
logger.info(f"Published to {repository}! 🎉")


@cli.command(help="generate demo data for the UI")
def generate_demo_data():
git_root = _get_git_root()
# We need to add the examples directory to the path so we have all the imports
# The GPT-one relies on a local import
sys.path.extend([git_root, f"{git_root}/examples/gpt"])
from burr.cli.demo_data import generate_all

with cd(git_root):
logger.info("Removing old demo data")
shutil.rmtree("burr/tracking/server/demo_data", ignore_errors=True)
generate_all("burr/tracking/server/demo_data")


# quick trick to expose every subcommand as a variable
# will create a command called `cli_{command}` for every command we have
for key, command in cli.commands.items():
Expand Down
140 changes: 140 additions & 0 deletions burr/cli/demo_data.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
import os
from typing import Any, Dict, Optional

from burr.core import Action
from burr.lifecycle import PostRunStepHook, PreRunStepHook
from examples.conversational_rag import application as conversational_rag_application
from examples.counter import application as counter_application
from examples.gpt import application as chatbot_application


class ProgressHook(
PreRunStepHook,
PostRunStepHook,
):
def pre_run_step(self, *, action: "Action", inputs: Dict[str, Any], **future_kwargs: Any):
print(f">>> Running action {action.name} with inputs {inputs}")

def post_run_step(
self,
*,
action: "Action",
result: Optional[Dict[str, Any]],
exception: Exception,
**future_kwargs: Any,
):
print(f">>> Action {action.name} completed")


def generate_chatbot_data(data_dir: str):
working_conversations = {
"chat-1-giraffe": [
"Please draw a giraffe.", # Answered by the image mode
"Please write a function that queries the internet for the height of a giraffe", # answered by the code mode
"OK, just tell me, how tall is a giraffe?", # answered by the question mode
"Please build me a giraffe", # Answered by nothing
"If Aaron burr were an animal, would he be a giraffe?", # answered by the question mode
],
"chat-2-geography": [
"What is the capital of France?", # answered by the question mode
"Please draw a map of the world", # answered by the image mode
"Please write code to compute the circumpherence of the earth", # answered by the code mode
"Geography! Geography! Geography!", # answered by nothing
],
"chat-3-physics": [
"Please draw a free-body diagram of a block on a ramp", # answered by the image mode
"Please write code to compute the force of gravity on the moon", # answered by the code mode
"What is the force of gravity on the moon?", # answered by the question mode
"Please build me a block on a ramp", # answered by nothing
],
"chat-4-philosophy": [
"Please draw a picture of a philosopher", # answered by the image mode
"Please write code to compute the meaning of life (hint, its 42)", # answered by the code mode
"What is the meaning of life?", # answered by the question mode (ish)
],
"chat-5-jokes": [
"Please draw a picture of a good joke joke", # answered by the image mode
"Please write code for an interactive knock-knock joke", # answered by the code mode
"What is a good joke?", # answered by the question mode
"The chicken crossed the road because it was a free-range chicken", # answered by nothing
],
}
broken_conversations = {"chat-6-demonstrate-errors": working_conversations["chat-1-giraffe"]}

def _run_conversation(app_id, prompts):
app = chatbot_application.application(
use_hamilton=False,
app_id=app_id,
storage_dir=data_dir,
hooks=[ProgressHook()],
)
for prompt in prompts:
app.run(halt_after=["response"], inputs={"prompt": prompt})

for app_id, prompts in sorted(working_conversations.items()):
_run_conversation(app_id, prompts)
old_api_key = os.environ.get("OPENAI_API_KEY")
os.environ["OPENAI_API_KEY"] = "fake"
for app_id, prompts in sorted(broken_conversations.items()):
try:
_run_conversation(app_id, prompts)
except Exception as e:
print(f"Got an exception: {e}")
os.environ["OPENAI_API_KEY"] = old_api_key


def generate_counter_data(data_dir: str = "~/.burr"):
counts = [1, 10, 100, 50, 42]
for count in counts:
app = counter_application.application(
count_up_to=count,
app_id=f"count-to-{count}",
storage_dir=data_dir,
hooks=[ProgressHook()],
)
app.run(halt_after=["result"])


def generate_rag_data(data_dir: str = "~/.burr"):
conversations = {
"rag-1-food": [
"What is Elijah's favorite food?",
"What is Stefan's favorite food?",
"What is Aaron's favorite food?", # unknown
"exit",
],
"rag-2-work-history": [
"Where did Elijah work?",
"Where did Stefan work?",
"Where did Harrison work?",
"Where did Jonathan work?",
"Did Stefan and Harrison work together?",
"exit",
],
"rag-3-activities": [
"What does Elijah like to do?",
"What does Stefan like to do?",
"exit",
],
"rag-4-everything": [
"What is Elijah's favorite food?",
"Where did Elijah work?",
"Where did Stefan work" "What does Elijah like to do?",
"What is Stefan's favorite food?",
"Whose favorite food is better, Elijah's or Stefan's?" "exit",
],
}
for app_id, prompts in sorted(conversations.items()):
app = conversational_rag_application.application(
app_id=app_id,
storage_dir=data_dir,
hooks=[ProgressHook()],
)
for prompt in prompts:
app.run(halt_after=["ai_converse", "terminal"], inputs={"user_question": prompt})


def generate_all(data_dir: str):
generate_chatbot_data(data_dir)
generate_counter_data(data_dir)
generate_rag_data(data_dir)
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -111,3 +111,4 @@ burr = "burr.cli.__main__:cli_run_server"
burr-admin-server = "burr.cli.__main__:cli_run_server"
burr-admin-publish = "burr.cli.__main__:cli_build_and_publish"
burr-admin-build-ui = "burr.cli.__main__:cli_build_ui"
burr-admin-generate-demo-data = "burr.cli.__main__:cli_generate_demo_data"

0 comments on commit c0421df

Please sign in to comment.