diff --git a/app/plugins/elevenlabs.py b/app/plugins/elevenlabs.py index 09d7cce..6d999ac 100644 --- a/app/plugins/elevenlabs.py +++ b/app/plugins/elevenlabs.py @@ -2,6 +2,5 @@ from elevenlabs import * ELEVENLABS_API_KEY = os.environ.get("ELEVENLABS_API_KEY") -print(ELEVENLABS_API_KEY) set_api_key(ELEVENLABS_API_KEY) diff --git a/app/routers/chat.py b/app/routers/chat.py index 2bf5cdf..2f0ebb4 100644 --- a/app/routers/chat.py +++ b/app/routers/chat.py @@ -8,6 +8,7 @@ characters = {} + def get_character(character_id: str): if character_id not in characters: characters[character_id] = EdenCharacter(character_id) @@ -43,7 +44,7 @@ async def test(request: ChatTestRequest): return response except Exception as e: - raise HTTPException(status_code=400, detail=str(e)) + raise HTTPException(status_code=400, detail=str(e)) class ChatRequest(BaseModel): @@ -66,7 +67,7 @@ async def think(request: ChatRequest): return response except Exception as e: - raise HTTPException(status_code=400, detail=str(e)) + raise HTTPException(status_code=400, detail=str(e)) @router.post("/chat/speak") @@ -81,4 +82,4 @@ async def speak(request: ChatRequest): return response except Exception as e: - raise HTTPException(status_code=400, detail=str(e)) + raise HTTPException(status_code=400, detail=str(e)) diff --git a/app/routers/dags.py b/app/routers/dags.py index 45b3774..556a973 100644 --- a/app/routers/dags.py +++ b/app/routers/dags.py @@ -1,6 +1,6 @@ import os import uuid -import elevenlabs +import elevenlabs from io import BytesIO from botocore.exceptions import NoCredentialsError from typing import Optional, List @@ -16,16 +16,14 @@ class MonologueRequest(BaseModel): character_id: str prompt: str - model: str = "gpt-4-1106-preview" + model: str = "gpt-4-1106-preview" params: dict = {} + @router.post("/dags/monologue") async def monologue_dag(request: MonologueRequest): - #result = await monologue(request) - result = { - "prompt": "lucy is a dog", - "voice": "Adam" - } + # result = await monologue(request) + result = {"prompt": "lucy is a dog", "voice": "Adam"} print(result) prompt = result["prompt"] @@ -39,7 +37,7 @@ async def monologue_dag(request: MonologueRequest): "face_url": image, "speech_url": audio_url, "gfpgan": False, - "gfpgan_upscale": 1 + "gfpgan_upscale": 1, } # output = run_replicate_task(w2l_config, model_name=CHARACTER_GENERATOR) @@ -52,21 +50,18 @@ async def monologue_dag(request: MonologueRequest): output = replicate.wav2lip(config) print(output) - - - output_url = s3.upload(output, "test.mp4") print(output_url) - class DialogueRequest(BaseModel): character_ids: List[str] prompt: str - model: str = "gpt-4-1106-preview" + model: str = "gpt-4-1106-preview" params: dict = {} + @router.post("/dags/dialogue") async def dialogue_dag(request: DialogueRequest): - print("DIA") - print(request) \ No newline at end of file + print("DIA") + print(request) diff --git a/app/routers/scenario.py b/app/routers/scenario.py index 7deeb77..17aca63 100644 --- a/app/routers/scenario.py +++ b/app/routers/scenario.py @@ -12,9 +12,10 @@ class MonologueRequest(BaseModel): character_id: str prompt: str - model: str = "gpt-4-1106-preview" + model: str = "gpt-4-1106-preview" params: dict = {} + @router.post("/scenarios/monologue") async def monologue(request: MonologueRequest): try: @@ -25,13 +26,12 @@ async def monologue(request: MonologueRequest): description = character_data.get("logosData").get("identity") system_message = monologue_template.substitute( - name=name, - description=description + name=name, description=description ) - + llm = LLM(model=request.model, system_message=system_message, params=params) message = llm(request.prompt) - + result = {"message": message} return result @@ -43,27 +43,32 @@ async def monologue(request: MonologueRequest): class DialogueRequest(BaseModel): character_ids: List[str] prompt: str - model: str = "gpt-4-1106-preview" + model: str = "gpt-4-1106-preview" params: dict = {} + @router.post("/scenarios/dialogue") async def dialogue(request: DialogueRequest): try: params = {"temperature": 1.0, "max_tokens": 1000, **request.params} - characters = [get_character_data(character_id) for character_id in request.character_ids] - - llms = [] + characters = [ + get_character_data(character_id) for character_id in request.character_ids + ] + + llms = [] for c, character in enumerate(characters): - other_character = characters[(c+1)%2] + other_character = characters[(c + 1) % 2] system_message = dialogue_template.substitute( name=character.get("name"), description=character.get("logosData").get("identity"), other_name=other_character.get("name"), other_description=other_character.get("logosData").get("identity"), - prompt=request.prompt + prompt=request.prompt, + ) + llms.append( + LLM(model=request.model, system_message=system_message, params=params) ) - llms.append(LLM(model=request.model, system_message=system_message, params=params)) message = "You are beginning the conversation. What is the first thing you say? Just the line. No quotes, no name markers." @@ -78,12 +83,14 @@ async def dialogue(request: DialogueRequest): if not message: raise Exception("No response from character") - conversation.append({"character": request.character_ids[m%2], "message": message}) + conversation.append( + {"character": request.character_ids[m % 2], "message": message} + ) result = {"conversation": conversation} print(result) return result - + except Exception as e: raise HTTPException(status_code=400, detail=str(e)) diff --git a/app/routers/story.py b/app/routers/story.py index 4ec4adc..9f20e75 100644 --- a/app/routers/story.py +++ b/app/routers/story.py @@ -6,30 +6,47 @@ from ..llm import LLM from ..utils import clean_text from ..prompt_templates.cinema import ( - screenwriter_template, - director_template, - cinematographer_template + screenwriter_template, + director_template, + cinematographer_template, ) + router = APIRouter() class CinemaRequest(BaseModel): prompt: str - model: str = "gpt-4-1106-preview" + model: str = "gpt-4-1106-preview" params: dict = {} + @router.post("/story/cinema") async def cinema(request: CinemaRequest): try: params = {"temperature": 1.0, "max_tokens": 1000, **request.params} - screenwriter_message = str(screenwriter_template) + screenwriter_message = str(screenwriter_template) director_message = str(director_template) cinematographer_message = str(cinematographer_template) - screenwriter = LLM(model=request.model, system_message=screenwriter_message, params=params, id="storyteller") - director = LLM(model=request.model, system_message=director_message, params=params, id="director") - cinematographer = LLM(model=request.model, system_message=cinematographer_message, params=params, id="cinematographer") + screenwriter = LLM( + model=request.model, + system_message=screenwriter_message, + params=params, + id="storyteller", + ) + director = LLM( + model=request.model, + system_message=director_message, + params=params, + id="director", + ) + cinematographer = LLM( + model=request.model, + system_message=cinematographer_message, + params=params, + id="cinematographer", + ) story = screenwriter(request.prompt) stills = director(story) @@ -46,12 +63,12 @@ async def cinema(request: CinemaRequest): raise HTTPException(status_code=400, detail=str(e)) - class ComicRequest(BaseModel): prompt: str - model: str = "gpt-4-1106-preview" + model: str = "gpt-4-1106-preview" params: dict = {} + @router.post("/story/comic") async def comic(request: ComicRequest): try: diff --git a/requirements-dev.lock b/requirements-dev.lock index dfc5774..8719de5 100644 --- a/requirements-dev.lock +++ b/requirements-dev.lock @@ -9,12 +9,19 @@ -e file:. annotated-types==0.6.0 anyio==3.7.1 +asttokens==2.4.1 black==23.11.0 +boto3==1.34.9 +botocore==1.34.9 certifi==2023.11.17 cfgv==3.4.0 +charset-normalizer==3.3.2 click==8.1.7 +decorator==5.1.1 distlib==0.3.7 dnspython==2.4.2 +elevenlabs==0.2.27 +executing==2.0.1 fastapi==0.104.1 filelock==3.13.1 fire==0.5.0 @@ -25,7 +32,11 @@ httpx==0.25.2 identify==2.5.33 idna==3.6 iniconfig==2.0.0 +ipython==8.19.0 +jedi==0.19.1 +jmespath==1.0.1 markdown-it-py==3.0.0 +matplotlib-inline==0.1.6 mccabe==0.7.0 mdurl==0.1.2 mypy==1.7.1 @@ -33,10 +44,15 @@ mypy-extensions==1.0.0 nodeenv==1.8.0 orjson==3.9.10 packaging==23.2 +parso==0.8.3 pathspec==0.12.1 +pexpect==4.9.0 platformdirs==4.1.0 pluggy==1.3.0 pre-commit==3.6.0 +prompt-toolkit==3.0.43 +ptyprocess==0.7.0 +pure-eval==0.2.2 pycodestyle==2.11.1 pydantic==2.5.2 pydantic-core==2.14.5 @@ -47,13 +63,21 @@ pytest==7.4.3 python-dateutil==2.8.2 python-dotenv==1.0.0 pyyaml==6.0.1 +replicate==0.22.0 +requests==2.31.0 rich==13.7.0 +s3transfer==0.10.0 six==1.16.0 sniffio==1.3.0 +stack-data==0.6.3 starlette==0.27.0 termcolor==2.4.0 +traitlets==5.14.0 typing-extensions==4.9.0 +urllib3==2.0.7 uvicorn==0.24.0.post1 virtualenv==20.25.0 +wcwidth==0.2.12 +websockets==12.0 # The following packages are considered to be unsafe in a requirements file: setuptools==69.0.2 diff --git a/requirements.lock b/requirements.lock index b81250c..98caa46 100644 --- a/requirements.lock +++ b/requirements.lock @@ -9,28 +9,53 @@ -e file:. annotated-types==0.6.0 anyio==3.7.1 +asttokens==2.4.1 +boto3==1.34.9 +botocore==1.34.9 certifi==2023.11.17 +charset-normalizer==3.3.2 click==8.1.7 +decorator==5.1.1 dnspython==2.4.2 +elevenlabs==0.2.27 +executing==2.0.1 fastapi==0.104.1 fire==0.5.0 h11==0.14.0 httpcore==1.0.2 httpx==0.25.2 idna==3.6 +ipython==8.19.0 +jedi==0.19.1 +jmespath==1.0.1 markdown-it-py==3.0.0 +matplotlib-inline==0.1.6 mdurl==0.1.2 orjson==3.9.10 +packaging==23.2 +parso==0.8.3 +pexpect==4.9.0 +prompt-toolkit==3.0.43 +ptyprocess==0.7.0 +pure-eval==0.2.2 pydantic==2.5.2 pydantic-core==2.14.5 pygments==2.17.2 pymongo==4.6.1 python-dateutil==2.8.2 python-dotenv==1.0.0 +replicate==0.22.0 +requests==2.31.0 rich==13.7.0 +s3transfer==0.10.0 six==1.16.0 sniffio==1.3.0 +stack-data==0.6.3 starlette==0.27.0 termcolor==2.4.0 +traitlets==5.14.0 typing-extensions==4.9.0 +urllib3==2.0.7 uvicorn==0.24.0.post1 +wcwidth==0.2.12 +websockets==12.0