Skip to content

Commit

Permalink
Merge pull request #5 from edenartlab/stage
Browse files Browse the repository at this point in the history
Stage
  • Loading branch information
genekogan authored Jan 12, 2024
2 parents 6f93df3 + 1796155 commit 45e740c
Show file tree
Hide file tree
Showing 27 changed files with 1,204 additions and 174 deletions.
52 changes: 52 additions & 0 deletions .github/workflows/pipeline-stg.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
name: 'Build and Deploy - Logos Service (Stg)'

on:
workflow_dispatch:
push:
branches: [stage]

pull_request:
branches: [stage]

env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
ENV_STAGE: stage

jobs:
build-and-push-to-ghcr:
name: Build
runs-on: ubuntu-20.04
permissions:
contents: read
packages: write
outputs:
tag: ${{ steps.prep.outputs.tag }}
steps:
- uses: actions/checkout@v2

- id: prep
run: |
TAG=$(echo $GITHUB_SHA | head -c7)
IMAGE="${{ env.REGISTRY }}/edenartlab/logos-svc-stg"
echo ::set-output name=tagged_image::${IMAGE}:${TAG}
echo ::set-output name=tag::${TAG}
echo ::set-output name=image::${IMAGE}
- name: Log in to GHCR
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}

- name: Build and Push
uses: docker/build-push-action@v2
with:
context: .
file: ./Dockerfile
push: true
tags:
${{ steps.prep.outputs.tagged_image }},${{ steps.prep.outputs.image
}}:latest
2 changes: 1 addition & 1 deletion .github/workflows/pipeline.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ on:
env:
REGISTRY: ghcr.io
IMAGE_NAME: ${{ github.repository }}
ENV_STAGE: stage
ENV_STAGE: prod

jobs:
build-and-push-to-ghcr:
Expand Down
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ COPY README.md README.md
COPY requirements.lock requirements.txt

RUN apt-get update \
&& apt-get install -y git \
&& apt-get install -y git ffmpeg \
&& pip install -r requirements.txt

ENTRYPOINT ["uvicorn", "app.server:app", "--host", "0.0.0.0"]
59 changes: 49 additions & 10 deletions app/character.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from pydantic import Field, BaseModel, ValidationError

from .mongo import get_character_data
from .routers.tasks import summary, SummaryRequest
from .llm import LLM
from .llm.models import ChatMessage
from .prompt_templates.assistant import (
Expand Down Expand Up @@ -60,7 +61,7 @@ class CreatorInput(BaseModel):
"""
Input to creator LLM containing a prompt, and optionally a list of attachments
"""
prompt: str = Field(description="Message to LLM")
message: str = Field(description="Message to LLM")
attachments: Optional[List[str]] = Field(default_factory=list, description="List of file paths to attachments")


Expand All @@ -76,6 +77,8 @@ def __init__(
creation_enabled=False,
concept=None,
smart_reply=False,
image=None,
voice=None,
):
self.reply_params = {"temperature": 0.0, "max_tokens": 10}
self.router_params = {"temperature": 0.0, "max_tokens": 10}
Expand All @@ -89,6 +92,8 @@ def __init__(
self.qa = LLM(model="gpt-4-1106-preview", params=self.qa_params)
self.chat = LLM(model="gpt-4-1106-preview", params=self.chat_params)

self.knowledge_summary = ""

self.update(
name=name,
identity=identity,
Expand All @@ -97,6 +102,8 @@ def __init__(
creation_enabled=creation_enabled,
concept=concept,
smart_reply=smart_reply,
image=image,
voice=voice,
)

def update(
Expand All @@ -108,15 +115,27 @@ def update(
creation_enabled=False,
concept=None,
smart_reply=False,
image=None,
voice=None,
):
self.name = name
self.identity = identity
if knowledge_summary:
self.knowledge_summary = knowledge_summary
self.knowledge = knowledge
self.creation_enabled = creation_enabled
self.concept = concept
self.smart_reply = smart_reply
self.image = image
self.voice = voice
self.function_map = {"1": self._chat_}
options = ["Regular conversation, chat, humor, or small talk"]

if knowledge_summary:
if knowledge:
if not self.knowledge_summary.strip():
self.knowledge_summary = summary(SummaryRequest(text=self.knowledge)).summary
options.append("A question about or reference to your knowledge")
knowledge_summary = f"You have the following knowledge: {knowledge_summary}"
knowledge_summary = f"You have the following knowledge: {self.knowledge_summary}"
self.function_map[str(len(options))] = self._qa_
if creation_enabled:
options.append("A request for an image or video creation")
Expand Down Expand Up @@ -151,6 +170,7 @@ def update(

self.creator_prompt = creator_template.substitute(
name=name,
identity=identity,
)

self.router.update(system_message=self.router_prompt)
Expand All @@ -159,6 +179,21 @@ def update(
self.chat.update(system_message=self.chat_prompt)
self.reply.update(system_message=self.identity_prompt)

def __str__(self):
def truncate(s):
return (s[:47] + '...') if len(s) > 50 else s
return (
f"Name: {truncate(self.name)}\n"
f"Identity: {truncate(self.identity)}\n"
f"Knowledge Summary: {truncate(str(self.knowledge_summary))}\n"
f"Knowledge: {truncate(str(self.knowledge))}\n"
f"Creation Enabled: {truncate(str(self.creation_enabled))}\n"
f"Concept: {truncate(str(self.concept))}\n"
f"Smart Reply: {truncate(str(self.smart_reply))}\n"
f"Image: {truncate(str(self.image))}\n"
f"Voice: {truncate(str(self.voice))}"
)

def think(
self,
message,
Expand Down Expand Up @@ -187,7 +222,7 @@ def _route_(
for msg in conversation:
role = "Eden" if msg.role == "assistant" else "Me"
router_prompt += f"{role}: {msg.content}\n"
router_prompt += f"Me: {message.prompt}\n"
router_prompt += f"Me: {message.message}\n"
index = self.router(router_prompt, save_messages=False)
match = re.match(r'-?\d+', index)
if match:
Expand All @@ -201,8 +236,8 @@ def _chat_(
message,
session_id=None,
) -> dict:
response = self.chat(message.prompt, id=session_id, save_messages=False)
user_message = ChatMessage(role="user", content=message.prompt)
response = self.chat(message.message, id=session_id, save_messages=False)
user_message = ChatMessage(role="user", content=message.message)
assistant_message = ChatMessage(role="assistant", content=response)
output = {
"message": response,
Expand All @@ -215,8 +250,8 @@ def _qa_(
message,
session_id=None
) -> dict:
response = self.qa(message.prompt, id=session_id, save_messages=False)
user_message = ChatMessage(role="user", content=message.prompt)
response = self.qa(message.message, id=session_id, save_messages=False)
user_message = ChatMessage(role="user", content=message.message)
assistant_message = ChatMessage(role="assistant", content=response)
output = {
"message": response,
Expand Down Expand Up @@ -256,7 +291,7 @@ def _create_(
if config:
message_out += f"\nConfig: {config}"

message_in = message.prompt
message_in = message.message
if message.attachments:
message_in += f"\n\nAttachments: {message.attachments}"

Expand Down Expand Up @@ -329,7 +364,9 @@ def sync(self):
knowledge = logos_data.get("knowledge")
creation_enabled = True
concept = logos_data.get("concept")
smart_reply = False
smart_reply = character_data.get("smartReply", False)
image = character_data.get("image")
voice = character_data.get("voice")

self.update(
name=name,
Expand All @@ -339,5 +376,7 @@ def sync(self):
creation_enabled=creation_enabled,
concept=concept,
smart_reply=smart_reply,
image=image,
voice=voice,
)

Loading

0 comments on commit 45e740c

Please sign in to comment.