-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathllm_api.py
49 lines (42 loc) · 1.68 KB
/
llm_api.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import json
from aiohttp import *
import data_manager
from models import *
from data_manager import *
import config
async def send_to_llm(prompt: GenerationRequest) -> str | None:
# Get the queue item that's next in the list
timeout = ClientTimeout(total=600)
connector = TCPConnector(limit_per_host=10)
async with ClientSession(timeout=timeout, connector=connector) as session:
try:
async with session.post(
config.text_api["address"] + config.text_api["generation"],
headers=config.text_api["headers"],
json=prompt.__dict__
) as response:
if response.status == 200:
try:
json_response = await response.json()
print("JSON response get")
print(json_response)
return read_results_from_json(json_response)
except json.decoder.JSONDecodeError as e:
print(f"JSON decode error: {e}")
return None
else:
# Handle non-200 responses here
print(f"HTTP request failed with status: {response.status}")
return None
except Exception as e:
# Handle any other exceptions
print(f"An error occurred: {e}")
return None
def read_results_from_json(json_data) -> str:
response = Response()
for result in json_data['results']:
response.results.append(Result(
text=result['text'],
finish_reason=result['finish_reason']
))
return response.results[0].text