From bbae49e32b75fd00a16e3814aed530118e270b33 Mon Sep 17 00:00:00 2001 From: Jason Liu Date: Mon, 1 Apr 2024 11:30:12 -0400 Subject: [PATCH] init --- README.md | 21 +++++++++++++++++++-- requirements.txt | 4 ++++ run.py | 49 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 2 deletions(-) create mode 100644 requirements.txt create mode 100644 run.py diff --git a/README.md b/README.md index 8daeefc..d577b38 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,19 @@ -# cloud -Annoucing Instructor Cloud +# ☁️ Instructor Cloud ☁️ + +Introducing Instructor Cloud + +With Instructor Cloud, you can: + +- Extract models from any text with blazing speed 🚀 +- Stream extracted data in real-time +- Rely on the power of GPT-4* to do your job for you! 🤖 + +*GPT-4 not included. Accuracy not guaranteed. Use at your own risk. + +## Running FastAPI + +To run the FastAPI server, use the following command: + +```sh +uvicorn run:app --reload +``` \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..dd3ac13 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,4 @@ +instructor +fastapi +pydantic +uvicorn \ No newline at end of file diff --git a/run.py b/run.py new file mode 100644 index 0000000..c40c750 --- /dev/null +++ b/run.py @@ -0,0 +1,49 @@ +from typing import List +from fastapi import FastAPI +from fastapi.responses import StreamingResponse +from pydantic import BaseModel + + +import instructor +import openai + +app = FastAPI() +client = instructor.from_openai(openai.OpenAI(), model="gpt-4-turbo-preview") + + +class Property(BaseModel): + name: str + value: str + + +class User(BaseModel): + name: str + age: int + properties: List[Property] + + +@app.post("/v1/extract_user", response_model=User) +def extract_user(text: str): + user = client.chat.completions.create( + messages=[ + {"role": "user", "content": f"Extract user from `{text}`"}, + ], + response_model=User, + ) + return user + + +@app.post("/v1/extract_user_stream") +def extract_user_stream(text: str): + user_stream = client.chat.completions.create_partial( + messages=[ + {"role": "user", "content": f"Extract user from `{text}`"}, + ], + response_model=User, + ) + + def stream(): + for partial_user in user_stream: + yield f"data: {partial_user.model_dump_json()}\n\n" + + return StreamingResponse(stream(), media_type="text/event-stream")