-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathopenai.lua
173 lines (138 loc) · 5.88 KB
/
openai.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
local openai = {}
-- Optional filter according to OpenAI usage policies:
--* https://platform.openai.com/docs/usage-policies/usage-policies
openai.isFilter = true
openai.flags = {}
openai.isFlagged = false
--[[
?MODEL GUIDE (read more at https://beta.openai.com/docs/models)
* gpt-3.5-turbo:
Turbo is the same model family that powers ChatGPT.
It is optimized for conversational chat input and output but does equally well on completions when compared with the Davinci model family.
Any use case that can be done well in ChatGPT should perform well with the Turbo model family in the API.
* davinci:
Most capable GPT-3 model. Can do any task the other models can do, often with higher quality.
[Good at: Complex intent, cause and effect, summarization for audience]
* curie:
Very capable, but faster and lower cost than Davinci.
[Good at: Language translation, complex classification, text sentiment, summarization]
* babbage:
Capable of straightforward tasks, very fast, and lower cost.
[Good at: Moderate classification, semantic search classification]
* ada:
Capable of very simple tasks, usually the fastest model in the GPT-3 series, and lowest cost.
[Good at: Parsing text, simple classification, address correction, keywords]
]]
-- Authenticate API key with error handling
local function authenticate(path)
--! Testing .env.env misunderstanding
local isEnv = fs.exists(path .. ".env.env", "r")
if isEnv then error("The template.env file was renamed incorrectly\nRename the file .env.env, to just .env") end
--! Testing .env
local isEnv = fs.exists(path .. ".env", "r")
if not isEnv then error("No .env found") end
-- Accessing private key in local .env file
local apiEnv = fs.open(path .. ".env", "r")
local apiAuth = apiEnv.readAll()
-- Ensuring the key contains no common string errors
apiAuth = string.gsub(apiAuth, "\n", "")
apiAuth = string.gsub(apiAuth, " +", "")
--! Testing template text
local isTemplate = string.find(apiAuth, "PRIVATE%-API%-KEY%-HERE%-then%-rename%-to%-.env")
if isTemplate then error("Template text left in .env") end
--! Testing "sk-"
local isKey = string.find(apiAuth:sub(1, 3), "sk%-")
if not isKey then error("Incorrect API key (no 'sk-' prefix)") end
--! Testing length
if #apiAuth ~= 51 then error("Incorrect API key (too many or too few chars)") end
-- Finished with file
apiEnv.close()
--! Testing HTTP
local request = http.get("https://example.tweaked.cc")
if not request then
error("HTTP failed! Please follow steps at...\n\n => https://tweaked.cc/guide/local_ips.html <=\n\nHyperlink available in openai-lua, at line 51")
end
-- => HTTP is working!
request.close()
-- Return error-checked API authentication key
return apiAuth
end
-- Tests against OpenAI usage policies
function openai.filter(input, key)
key = key or authenticate("/DavinCC/lib/openai-lua/")
local test = http.post("https://api.openai.com/v1/moderations",
'{"input": "' .. input .. '"}',
{ ["Content-Type"] = "application/json", ["Authorization"] = "Bearer " .. key })
-- Error handling on empty response
if test then
return test.readAll()
else
return false
end
end
-- Checks for flagging
function openai.check(input, key)
-- Check for filter option
if openai.isFilter then
local test = openai.filter(input, key)
-- Check filter result
if test then
openai.flags = textutils.unserialiseJSON(test).results[1]
openai.isFlagged = openai.flags.flagged
end
end
end
-- Request completion from OpenAI, using provided model, prompt, temperature, and maximum tokens
function openai.complete(model, prompt, temp, tokens)
-- Retrieving private API key
local cmplKey = authenticate("/DavinCC/lib/openai-lua/")
if not cmplKey then error("Error retrieving cmpl API key, reason not found :(") end
-- Check flagging status
openai.check(prompt, cmplKey)
if openai.isFlagged then
return false
end
-- Posting to OpenAI using the private key
local cmplPost
if model == "gpt-3.5-turbo" or model == "gpt-4" or model == "gpt-4-32k" then
-- Specialised post for chat format
cmplPost = http.post("https://api.openai.com/v1/chat/completions",
'{"model": "' ..
model .. '", "messages": ' .. prompt .. ', "temperature": ' .. temp .. ', "max_tokens": ' .. tokens .. '}',
{ ["Content-Type"] = "application/json",["Authorization"] = "Bearer " .. cmplKey })
else
-- General post format for all other completions
cmplPost = http.post("https://api.openai.com/v1/completions",
'{"model": "' ..
model .. '", "prompt": "' .. prompt .. '", "temperature": ' .. temp .. ', "max_tokens": ' .. tokens .. '}',
{ ["Content-Type"] = "application/json",["Authorization"] = "Bearer " .. cmplKey })
end
-- Error handling on empty response
if cmplPost then
return cmplPost.readAll()
else
return false
end
end
-- Request image generation from OpenAI, using provided prompt, number, and size
function openai.generate(prompt, number, size)
-- Retrieving private API key
local genKey = authenticate("/DALL-CC/lib/openai-lua/")
if not genKey then error("Error retrieving gen API key, reason not found :(") end
-- Check flagging status
openai.check(prompt, genKey)
if openai.isFlagged then
return false
end
-- Posting to OpenAI using the private key
local genPost = http.post("https://api.openai.com/v1/images/generations",
'{"prompt": "' .. prompt .. '", "n": ' .. number .. ', "size": "' .. size .. '"}',
{ ["Content-Type"] = "application/json", ["Authorization"] = "Bearer " .. genKey })
-- Error handling on empty response
if genPost then
return genPost.readAll()
else
return false
end
end
return openai