-
Notifications
You must be signed in to change notification settings - Fork 0
/
predict.py
126 lines (99 loc) · 3.59 KB
/
predict.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# Prediction interface for Cog ⚙️
# https://github.com/replicate/cog/blob/main/docs/python.md
from cog import BasePredictor, Input, Path
import requests
import time
import json
import base64
from PIL import Image
from io import BytesIO
class Prodia:
def __init__(self, api_key, base=None):
self.base = base or "https://api.prodia.com/v1"
self.headers = {
"X-Prodia-Key": api_key
}
def generate(self, params):
response = self._post(f"{self.base}/job", params)
return response.json()
def transform(self, params):
response = self._post(f"{self.base}/transform", params)
return response.json()
def controlnet(self, params):
response = self._post(f"{self.base}/controlnet", params)
return response.json()
def get_job(self, job_id):
response = self._get(f"{self.base}/job/{job_id}")
return response.json()
def wait(self, job):
job_result = job
while job_result['status'] not in ['succeeded', 'failed']:
time.sleep(0.25)
job_result = self.get_job(job['job'])
return job_result
def list_models(self):
response = self._get(f"{self.base}/models/list")
return response.json()
def _post(self, url, params):
headers = {
**self.headers,
"Content-Type": "application/json"
}
response = requests.post(url, headers=headers, data=json.dumps(params))
if response.status_code != 200:
raise Exception(f"Bad Prodia Response: {response.status_code}")
return response
def _get(self, url):
response = requests.get(url, headers=self.headers)
if response.status_code != 200:
raise Exception(f"Bad Prodia Response: {response.status_code}")
return response
def image_to_base64(image_path):
# Open the image with PIL
with Image.open(image_path) as image:
# Convert the image to bytes
buffered = BytesIO()
image.save(buffered, format="PNG") # You can change format to PNG if needed
# Encode the bytes to base64
img_str = base64.b64encode(buffered.getvalue())
return img_str.decode('utf-8') # Convert bytes to string
class Predictor(BasePredictor):
def setup(self) -> None:
"""Load the model into memory to make running multiple predictions efficient"""
# self.model = torch.load("./weights.pth")
def predict(
self,
api_key: str = Input(
description="Prodia API Key"
),
prompt: str = Input(
description="Prompt", default="puppies in a cloud, 4k"
),
model: str = Input(
description="Model", default="v1-5-pruned-emaonly.safetensors [d7049739]"
),
negative_prompt: str = Input(
description="Negative Prompt", default="badly drawn"
),
steps: int = Input(
description="Steps", default=25
),
cfg_scale: int = Input(
description="CFG Scale", default=7
),
sampler: str = Input(
description="Sampler", default="DPM++ 2M Karras"
)
) -> Path:
"""Run a single prediction on the model"""
prodia_client = Prodia(api_key=api_key)
result = prodia_client.generate({
"model": model,
"prompt": prompt,
"negative_prompt": negative_prompt,
"steps": steps,
"cfg_scale": cfg_scale,
"sampler": sampler
})
job = prodia_client.wait(result)
return job["imageUrl"]