diff --git a/src/pages/api/diffData.ts b/src/pages/api/diffData.ts new file mode 100644 index 0000000..53b0961 --- /dev/null +++ b/src/pages/api/diffData.ts @@ -0,0 +1,31 @@ +import type { NextApiRequest, NextApiResponse } from 'next'; +import OpenAI from 'openai'; +import { LLMRequest, LLMResponse } from '../../types'; + +const openai = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY, + baseURL: 'https://oai.hconeai.com/v1', + defaultHeaders: { + 'Helicone-Cache-Enabled': 'true', + 'Helicone-Auth': `Bearer ${process.env.HELICONE_API_KEY}`, + }, +}); + +export default async function handler( + req: NextApiRequest, + res: NextApiResponse +) { + const auth = req.headers.authorization; + if (!auth || auth !== `Bearer ${process.env.NEXT_PUBLIC_LLM_API_KEY}`) { + res.status(401).json({ error: 'Unauthorized' }); + return; + } + const llmRequest: LLMRequest = req.body; + console.log(`LLM middleware: got request: ${JSON.stringify(llmRequest)}`); + const completion = await openai.chat.completions.create( + llmRequest.completion_create + ); + console.log(`LLM middleware: got completion: ${JSON.stringify(completion)}`); + const response: LLMResponse = { completion }; + res.status(200).json(response); +} diff --git a/src/pages/api/otherData.ts b/src/pages/api/otherData.ts new file mode 100644 index 0000000..b12eaae --- /dev/null +++ b/src/pages/api/otherData.ts @@ -0,0 +1,31 @@ +import { NextApiRequest, NextApiResponse } from 'next'; +import OpenAI from 'openai'; +import { LLMRequest, LLMResponse } from '../../types'; + +const openai = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY, + baseURL: 'https://oai.hconeai.com/v1', + defaultHeaders: { + 'Helicone-Cache-Enabled': 'true', + 'Helicone-Auth': `Bearer ${process.env.HELICONE_API_KEY}`, + }, +}); + +export default async function handler( + req: NextApiRequest, + res: NextApiResponse +) { + const auth = req.headers.authorization; + if (!auth || auth !== `Bearer ${process.env.NEXT_PUBLIC_LLM_API_KEY}`) { + res.status(401).json({ error: 'Unauthorized' }); + return; + } + const llmRequest: LLMRequest = req.body; + console.log(`LLM middleware: got request: ${JSON.stringify(llmRequest)}`); + const completion = await openai.chat.completions.create( + llmRequest.completion_create + ); + console.log(`LLM middleware: got completion: ${JSON.stringify(completion)}`); + const response: LLMResponse = { completion }; + res.status(200).json(response); +} \ No newline at end of file diff --git a/src/pages/forms/fill/[id].tsx b/src/pages/forms/fill/[id].tsx index c763e35..eeaf96d 100644 --- a/src/pages/forms/fill/[id].tsx +++ b/src/pages/forms/fill/[id].tsx @@ -99,7 +99,7 @@ export function InnerChat(props: { setMessages(messagesToSend); setInputValue(''); setIsWaiting(true); - const assistantResponse = await callLLM(PROMPT_FILL(form), messagesToSend); + const assistantResponse = await callLLM(PROMPT_FILL(form), messagesToSend, '/api/diffData'); if (assistantResponse instanceof Error) { setError(assistantResponse); return; diff --git a/src/utils.ts b/src/utils.ts index 0b5d4c3..33ea3bc 100644 --- a/src/utils.ts +++ b/src/utils.ts @@ -12,7 +12,8 @@ import { Database, Json } from '../types/supabase'; export const callLLM = async ( systemPrompt: string, - messages: ChatMessage[] + messages: ChatMessage[], + endpoint: string ) => { const data: LLMRequest = { completion_create: { @@ -21,7 +22,7 @@ export const callLLM = async ( messages: [{ role: 'system', content: systemPrompt }, ...messages], }, }; - const response = await fetch('/api/llm', { + const response = await fetch(endpoint, { method: 'POST', body: JSON.stringify(data), headers: { @@ -35,7 +36,6 @@ export const callLLM = async ( const json: LLMResponse = await response.json(); return json.completion.choices[0].message; }; - export async function getUserFromSupabase( session: Session | null, supabase: SupabaseClient, @@ -136,4 +136,4 @@ export const removeStartAndEndQuotes = (str: string | null) => { return str; } return str.replace(/^"(.*)"$/, '$1'); -}; +}; \ No newline at end of file