Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add api #52

Merged
merged 1 commit into from
Mar 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
[flake8]
max-line-length = 120
per-file-ignores =
# imported but unused
__init__.py: F401
1 change: 0 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ services:
# - model=gpt-3.5-turbo # optional
# - rate_limit=10/minute # optional
# - user_rate_limit=600/hour # optional
# - log_dir=/logs/ # optional
- github_client_id=xxx
- github_client_secret=xxx
- jwt_secret=secret
Expand Down
12 changes: 6 additions & 6 deletions config.py
Original file line number Diff line number Diff line change
@@ -1,24 +1,23 @@
import logging
from typing import Optional

from pydantic import BaseSettings
from pydantic import BaseSettings, Field

logging.basicConfig(
format="%(asctime)s: %(levelname)s: %(name)s: %(message)s",
level=logging.INFO
)
_logger = logging.getLogger(__name__)


class Settings(BaseSettings):
api_key: str = "sk-xxx"
api_key: str = Field(default="sk-xxx", exclude=True)
api_base: str = "https://api.openai.com/v1"
model: str = "gpt-3.5-turbo"
rate_limit: str = "60/hour"
user_rate_limit: str = "600/hour"
log_dir: str = ""
github_client_id: str = ""
github_client_secret: str = ""
jwt_secret: str = "secret"
github_client_secret: str = Field(default="", exclude=True)
jwt_secret: str = Field(default="secret", exclude=True)
ad_client: str = ""
ad_slot: str = ""

Expand All @@ -27,3 +26,4 @@ class Config:


settings = Settings()
_logger.info(f"settings: {settings.json()}")
1 change: 0 additions & 1 deletion docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ services:
# - model=gpt-3.5-turbo # optional
# - rate_limit=10/minute # optional
# - user_rate_limit=600/hour # optional
# - log_dir=/logs/ # optional
- github_client_id=xxx
- github_client_secret=xxx
- jwt_secret=secret
Expand Down
2 changes: 2 additions & 0 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
from router.limiter import limiter, get_real_ipaddr
from router.date_router import router as date_router
from router.chatgpt_router import router as chatgpt_router
from router.divination_router import router as divination_router
from router.user_router import router as user_router


Expand All @@ -38,6 +39,7 @@

app.include_router(date_router)
app.include_router(chatgpt_router)
app.include_router(divination_router)
app.include_router(user_router)

if os.path.exists("dist"):
Expand Down
11 changes: 10 additions & 1 deletion models.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from typing import Optional
from pydantic import BaseModel
from pydantic import BaseModel, Field


class SettingsInfo(BaseModel):
Expand Down Expand Up @@ -46,3 +46,12 @@ class DivinationBody(BaseModel):
new_name: Optional[NewName] = None
plum_flower: Optional[PlumFlower] = None
fate: Optional[Fate] = None


class BirthdayBody(BaseModel):
birthday: str = Field(example="2000-08-17 00:00:00")


class CommonResponse(BaseModel):
content: str
request_id: str
9 changes: 2 additions & 7 deletions router/chatgpt_router.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import openai
import logging

from datetime import datetime
from fastapi import Depends, HTTPException, Request, status


Expand All @@ -15,7 +14,6 @@
from router.user import get_user
from .limiter import get_real_ipaddr, limiter
from .divination import DivinationFactory
from .file_logger import file_logger

openai.api_key = settings.api_key
openai.api_base = settings.api_base
Expand All @@ -26,9 +24,6 @@
"幫助", "現在", "開始", "开始", "start", "restart", "重新开始", "重新開始",
"遵守", "遵循", "遵从", "遵從"
]
_logger.info(
f"Loaded divination types: {list(DivinationFactory.divination_map.keys())}"
)


@limiter.limit(settings.rate_limit)
Expand Down Expand Up @@ -68,13 +63,13 @@ async def divination(
)
if any(w in divination_body.prompt.lower() for w in STOP_WORDS):
raise HTTPException(
status_code=403,
status_code=status.HTTP_403_FORBIDDEN,
detail="Prompt contains stop words"
)
divination_obj = DivinationFactory.get(divination_body.prompt_type)
if not divination_obj:
raise HTTPException(
status_code=400,
status_code=status.HTTP_400_BAD_REQUEST,
detail=f"No prompt type {divination_body.prompt_type} not supported"
)
prompt, system_prompt = divination_obj.build_prompt(divination_body)
Expand Down
7 changes: 7 additions & 0 deletions router/divination/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,10 @@
from . import plum_flower
from . import fate
from .base import DivinationFactory

import logging

_logger = logging.getLogger("divination factory")
_logger.info(
f"Loaded divination types: {list(DivinationFactory.divination_map.keys())}"
)
5 changes: 4 additions & 1 deletion router/divination/birthday.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,11 @@ class BirthdayFactory(DivinationFactory):
divination_type = "birthday"

def build_prompt(self, divination_body: DivinationBody) -> tuple[str, str]:
return self.internal_build_prompt(divination_body.birthday)

def internal_build_prompt(self, birthday: str) -> tuple[str, str]:
birthday = datetime.datetime.strptime(
divination_body.birthday, '%Y-%m-%d %H:%M:%S'
birthday, '%Y-%m-%d %H:%M:%S'
)
prompt = f"我的生日是{birthday.year}年{birthday.month}月{birthday.day}日{birthday.hour}时{birthday.minute}分{birthday.second}秒"
return prompt, BIRTHDAY_PROMPT
138 changes: 138 additions & 0 deletions router/divination_router.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
import json
import uuid
import openai
import logging

from datetime import datetime
from fastapi import Depends, HTTPException, Request
from fastapi.responses import StreamingResponse
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials

from config import settings
from fastapi import APIRouter

from models import BirthdayBody, CommonResponse
from .limiter import get_real_ipaddr
from .divination.birthday import BirthdayFactory

openai.api_key = settings.api_key
openai.api_base = settings.api_base
router = APIRouter()
security = HTTPBearer()
_logger = logging.getLogger(__name__)


def get_token(
credentials: HTTPAuthorizationCredentials = Depends(security)
):
return credentials.credentials


@router.post("/api/streaming_divination/birthday", tags=["divination"])
async def birthday_divination_streaming(
request: Request,
birthday_body: BirthdayBody,
token: str = Depends(get_token)
) -> StreamingResponse:
_logger.info(
f"Request from {get_real_ipaddr(request)}, birthday_body={birthday_body}"
)
prompt, system_prompt = BirthdayFactory().internal_build_prompt(
birthday_body.birthday
)
return common_openai_streaming_call(token, prompt, system_prompt)


@router.post("/api/divination/birthday", tags=["divination"])
async def birthday_divination(
request: Request,
birthday_body: BirthdayBody,
token: str = Depends(get_token)
) -> CommonResponse:
_logger.info(
f"Request from {get_real_ipaddr(request)}, birthday_body={birthday_body}"
)
prompt, system_prompt = BirthdayFactory().internal_build_prompt(
birthday_body.birthday
)
return common_openai_call(request, token, prompt, system_prompt)


def common_openai_streaming_call(
token: str,
prompt: str,
system_prompt: str
) -> StreamingResponse:
def get_openai_generator():
try:
openai_stream = openai.ChatCompletion.create(
api_key=token,
model=settings.model,
max_tokens=1000,
temperature=0.9,
top_p=1,
stream=True,
messages=[
{
"role": "system",
"content": system_prompt
},
{"role": "user", "content": prompt}
]
)
except openai.error.OpenAIError as e:
raise HTTPException(
status_code=500,
detail=f"OpenAI error: {e}"
)
for event in openai_stream:
if "content" in event["choices"][0].delta:
current_response = event["choices"][0].delta.content
yield current_response

return StreamingResponse(get_openai_generator(), media_type='text/event-stream')


def common_openai_call(
request: Request,
token: str,
prompt: str,
system_prompt: str
) -> CommonResponse:
start_time = datetime.now()
request_id = uuid.uuid4()

try:
response = openai.ChatCompletion.create(
api_key=token,
model=settings.model,
max_tokens=1000,
temperature=0.9,
top_p=1,
messages=[
{
"role": "system",
"content": system_prompt
},
{"role": "user", "content": prompt}
]
)
except openai.error.OpenAIError as e:
raise HTTPException(
status_code=500,
detail=f"OpenAI error: {e}"
)

res = response['choices'][0]['message']['content']
latency = datetime.now() - start_time
_logger.info(
f"Request {request_id}:"
f"Request from {get_real_ipaddr(request)}, "
f"latency_seconds={latency.total_seconds()}, "
f"prompt={prompt},"
f"res={json.dumps(res, ensure_ascii=False)}"
)
return CommonResponse(
content=res,
request_id=request_id.hex
)
19 changes: 0 additions & 19 deletions router/file_logger.py

This file was deleted.

5 changes: 2 additions & 3 deletions router/user.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from typing import Optional
import jwt

from fastapi import Depends, status, Request
from fastapi import Depends, status
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials

from config import settings
Expand All @@ -29,6 +29,5 @@ def get_user(
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail="Token expired")
return jwt_payload
except Exception as e:
_logger.exception(e)
except Exception:
return
22 changes: 0 additions & 22 deletions vercel.json

This file was deleted.

Loading