diff --git a/app/src/pages/prompt/__tests__/fixtures.ts b/app/src/pages/prompt/__tests__/fixtures.ts new file mode 100644 index 0000000000..435bc1ece4 --- /dev/null +++ b/app/src/pages/prompt/__tests__/fixtures.ts @@ -0,0 +1,66 @@ +import type { PromptCodeExportCard__main$data as PromptVersion } from "../__generated__/PromptCodeExportCard__main.graphql"; + +export type FixturePromptVersion = Omit; + +export const BASE_MOCK_PROMPT_VERSION = { + modelProvider: "OPENAI", + modelName: "gpt-4", + templateType: "CHAT", + templateFormat: "MUSTACHE", + template: { + __typename: "PromptChatTemplate", + messages: [ + { + role: "USER", + content: [{ __typename: "TextContentPart", text: { text: "Hello" } }], + }, + ], + }, + invocationParameters: { + temperature: 0.7, + }, + tools: [], + responseFormat: null, +} satisfies FixturePromptVersion; + +export const OPENAI_TOOL = { + type: "function", + function: { + name: "test", + description: "test function", + parameters: { + type: "object", + properties: { + foo: { + type: "string", + }, + }, + required: ["foo"], + }, + }, +}; + +export const ANTHROPIC_TOOL = { + name: "test", + description: "test function", + input: { + type: "object", + properties: { + foo: { type: "string" }, + }, + }, +}; + +export const OPENAI_RESPONSE_FORMAT = { + type: "json_schema", + json_schema: { + name: "test_format", + description: "test format", + schema: { + type: "object", + properties: { + format: { type: "string" }, + }, + }, + }, +}; diff --git a/app/src/pages/prompt/__tests__/promptCodeSnippets.anthropic.test.ts b/app/src/pages/prompt/__tests__/promptCodeSnippets.anthropic.test.ts new file mode 100644 index 0000000000..78d865b175 --- /dev/null +++ b/app/src/pages/prompt/__tests__/promptCodeSnippets.anthropic.test.ts @@ -0,0 +1,439 @@ +import { mapPromptToSnippet } from "../promptCodeSnippets"; + +import { + ANTHROPIC_TOOL, + BASE_MOCK_PROMPT_VERSION, + FixturePromptVersion, +} from "./fixtures"; + +describe("promptCodeSnippets", () => { + describe("anthropic", () => { + describe("typescript", () => { + it("should generate basic message template", () => { + const prompt = { + ...BASE_MOCK_PROMPT_VERSION, + modelProvider: "ANTHROPIC", + modelName: "claude-3-sonnet-latest", + template: { + __typename: "PromptChatTemplate", + messages: [ + { + role: "USER", + content: [ + { + __typename: "TextContentPart", + text: { text: "Hello Claude" }, + }, + ], + }, + ], + }, + } satisfies FixturePromptVersion; + + const result = mapPromptToSnippet({ + promptVersion: prompt, + language: "TypeScript", + }); + expect(result).toMatchInlineSnapshot(` + "import Anthropic from "@anthropic-ai/sdk"; + + const client = new Anthropic(); + + const messages = [ + { + role: "user", + content: [ + { + type: "text", + text: "Hello Claude" + } + ] + } + ]; + // ^ apply additional templating to messages if needed + + const response = await client.messages.create({ + model: "claude-3-sonnet-latest", + temperature: 0.7, + messages, + }); + + console.log(response.content);" + `); + }); + + it("should handle tool usage", () => { + const prompt = { + ...BASE_MOCK_PROMPT_VERSION, + invocationParameters: { + toolChoice: { + type: "auto", + }, + }, + tools: [{ definition: ANTHROPIC_TOOL }], + modelProvider: "ANTHROPIC", + modelName: "claude-3-sonnet-latest", + template: { + __typename: "PromptChatTemplate", + messages: [ + { + role: "USER", + content: [ + { + __typename: "TextContentPart", + text: { text: "Use the test tool" }, + }, + { + __typename: "ToolCallContentPart", + toolCall: { + toolCallId: "call_123", + toolCall: { + name: ANTHROPIC_TOOL.name, + arguments: JSON.stringify({ foo: "bar" }), + }, + }, + }, + ], + }, + { + role: "TOOL", + content: [ + { + __typename: "ToolResultContentPart", + toolResult: { + toolCallId: "call_123", + result: { bar: "baz" }, + }, + }, + ], + }, + ], + }, + } satisfies FixturePromptVersion; + + const result = mapPromptToSnippet({ + promptVersion: prompt, + language: "TypeScript", + }); + expect(result).toMatchInlineSnapshot(` + "import Anthropic from "@anthropic-ai/sdk"; + + const client = new Anthropic(); + + const messages = [ + { + role: "user", + content: [ + { + type: "text", + text: "Use the test tool" + }, + { + id: "call_123", + type: "tool_use", + name: "test", + input: { + foo: "bar" + } + } + ] + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "call_123", + content: "{\\n \\"bar\\": \\"baz\\"\\n}" + } + ] + } + ]; + // ^ apply additional templating to messages if needed + + const response = await client.messages.create({ + model: "claude-3-sonnet-latest", + toolChoice: { + type: "auto" + }, + messages, + tools: [ + { + name: "test", + description: "test function", + input: { + type: "object", + properties: { + foo: { + type: "string" + } + } + } + } + ], + }); + + console.log(response.content);" + `); + }); + + it("should include invocation parameters", () => { + const prompt = { + ...BASE_MOCK_PROMPT_VERSION, + modelProvider: "ANTHROPIC", + modelName: "claude-3-sonnet-latest", + invocationParameters: { + temperature: 0.7, + max_tokens: 1000, + }, + } satisfies FixturePromptVersion; + + const result = mapPromptToSnippet({ + promptVersion: prompt, + language: "TypeScript", + }); + expect(result).toMatchInlineSnapshot(` + "import Anthropic from "@anthropic-ai/sdk"; + + const client = new Anthropic(); + + const messages = [ + { + role: "user", + content: [ + { + type: "text", + text: "Hello" + } + ] + } + ]; + // ^ apply additional templating to messages if needed + + const response = await client.messages.create({ + model: "claude-3-sonnet-latest", + temperature: 0.7, + max_tokens: 1000, + messages, + }); + + console.log(response.content);" + `); + }); + }); + + describe("python", () => { + it("should generate basic message template", () => { + const prompt = { + ...BASE_MOCK_PROMPT_VERSION, + modelProvider: "ANTHROPIC", + modelName: "claude-3-sonnet-latest", + template: { + __typename: "PromptChatTemplate", + messages: [ + { + role: "USER", + content: [ + { + __typename: "TextContentPart", + text: { text: "Hello Claude" }, + }, + ], + }, + ], + }, + } satisfies FixturePromptVersion; + + const result = mapPromptToSnippet({ + promptVersion: prompt, + language: "Python", + }); + expect(result).toMatchInlineSnapshot(` + "from anthropic import Anthropic + + client = Anthropic() + + messages=[ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "Hello Claude" + } + ] + } + ] + # ^ apply additional templating to messages if needed + + completion = client.messages.create( + model="claude-3-sonnet-latest", + temperature=0.7, + messages=messages, + ) + + print(completion.content)" + `); + }); + + it("should handle tool usage", () => { + const prompt = { + ...BASE_MOCK_PROMPT_VERSION, + invocationParameters: { + toolChoice: { + type: "auto", + }, + }, + tools: [{ definition: ANTHROPIC_TOOL }], + modelProvider: "ANTHROPIC", + modelName: "claude-3-sonnet-latest", + template: { + __typename: "PromptChatTemplate", + messages: [ + { + role: "USER", + content: [ + { + __typename: "TextContentPart", + text: { text: "Use the test tool" }, + }, + { + __typename: "ToolCallContentPart", + toolCall: { + toolCallId: "call_123", + toolCall: { + name: ANTHROPIC_TOOL.name, + arguments: JSON.stringify({ foo: "bar" }), + }, + }, + }, + ], + }, + { + role: "TOOL", + content: [ + { + __typename: "ToolResultContentPart", + toolResult: { + toolCallId: "call_123", + result: { bar: "baz" }, + }, + }, + ], + }, + ], + }, + } satisfies FixturePromptVersion; + + const result = mapPromptToSnippet({ + promptVersion: prompt, + language: "Python", + }); + expect(result).toMatchInlineSnapshot(` + "from anthropic import Anthropic + + client = Anthropic() + + messages=[ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "Use the test tool" + }, + { + "id": "call_123", + "type": "tool_use", + "name": "test", + "input": { + "foo": "bar" + } + } + ] + }, + { + "role": "user", + "content": [ + { + "type": "tool_result", + "tool_use_id": "call_123", + "content": "{\\n \\"bar\\": \\"baz\\"\\n}" + } + ] + } + ] + # ^ apply additional templating to messages if needed + + completion = client.messages.create( + model="claude-3-sonnet-latest", + toolChoice={ + "type": "auto" + }, + messages=messages, + tools=[ + { + "name": "test", + "description": "test function", + "input": { + "type": "object", + "properties": { + "foo": { + "type": "string" + } + } + } + } + ], + ) + + print(completion.content)" + `); + }); + + it("should include invocation parameters", () => { + const prompt = { + ...BASE_MOCK_PROMPT_VERSION, + modelProvider: "ANTHROPIC", + modelName: "claude-3-sonnet-latest", + invocationParameters: { + temperature: 0.7, + max_tokens: 1000, + }, + } satisfies FixturePromptVersion; + + const result = mapPromptToSnippet({ + promptVersion: prompt, + language: "Python", + }); + expect(result).toMatchInlineSnapshot(` + "from anthropic import Anthropic + + client = Anthropic() + + messages=[ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "Hello" + } + ] + } + ] + # ^ apply additional templating to messages if needed + + completion = client.messages.create( + model="claude-3-sonnet-latest", + temperature=0.7, + max_tokens=1000, + messages=messages, + ) + + print(completion.content)" + `); + }); + }); + }); +}); diff --git a/app/src/pages/prompt/__tests__/promptCodeSnippets.openai.test.ts b/app/src/pages/prompt/__tests__/promptCodeSnippets.openai.test.ts new file mode 100644 index 0000000000..dd29f21843 --- /dev/null +++ b/app/src/pages/prompt/__tests__/promptCodeSnippets.openai.test.ts @@ -0,0 +1,492 @@ +import { mapPromptToSnippet } from "../promptCodeSnippets"; + +import { + BASE_MOCK_PROMPT_VERSION, + FixturePromptVersion, + OPENAI_RESPONSE_FORMAT, + OPENAI_TOOL, +} from "./fixtures"; + +describe("promptCodeSnippets", () => { + describe("openai", () => { + describe("typescript", () => { + it("should generate basic message template", () => { + const prompt = { + ...BASE_MOCK_PROMPT_VERSION, + template: { + __typename: "PromptChatTemplate", + messages: [ + { + role: "USER", + content: [ + { + __typename: "TextContentPart", + text: { text: "Hello OpenAI" }, + }, + ], + }, + ], + }, + } satisfies FixturePromptVersion; + + const result = mapPromptToSnippet({ + promptVersion: prompt, + language: "TypeScript", + }); + expect(result).toMatchInlineSnapshot(` + "import OpenAI from "openai"; + + const openai = new OpenAI(); + + const messages = [ + { + role: "user", + content: "Hello OpenAI" + } + ]; + // ^ apply additional templating to messages if needed + + const response = openai.chat.completions.create({ + model: "gpt-4", + temperature: 0.7, + messages, + }); + + response.then((completion) => console.log(completion.choices[0].message));" + `); + }); + + it("should handle tool usage", () => { + const prompt = { + ...BASE_MOCK_PROMPT_VERSION, + invocationParameters: { + toolChoice: "auto", + }, + tools: [{ definition: OPENAI_TOOL }], + template: { + __typename: "PromptChatTemplate", + messages: [ + { + role: "USER", + content: [ + { + __typename: "TextContentPart", + text: { text: "Use the test tool" }, + }, + { + __typename: "ToolCallContentPart", + toolCall: { + toolCallId: "call_123", + toolCall: { + name: OPENAI_TOOL.function.name, + arguments: JSON.stringify({ foo: "bar" }), + }, + }, + }, + ], + }, + { + role: "TOOL", + content: [ + { + __typename: "ToolResultContentPart", + toolResult: { + toolCallId: "call_123", + result: { bar: "baz" }, + }, + }, + ], + }, + ], + }, + } satisfies FixturePromptVersion; + + const result = mapPromptToSnippet({ + promptVersion: prompt, + language: "TypeScript", + }); + expect(result).toMatchInlineSnapshot(` + "import OpenAI from "openai"; + + const openai = new OpenAI(); + + const messages = [ + { + role: "user", + content: "Use the test tool", + tool_calls: [ + { + id: "call_123", + type: "function", + function: { + name: "test", + arguments: "{\\"foo\\":\\"bar\\"}" + } + } + ] + }, + { + role: "tool", + content: "{\\n \\"bar\\": \\"baz\\"\\n}", + tool_call_id: "call_123" + } + ]; + // ^ apply additional templating to messages if needed + + const response = openai.chat.completions.create({ + model: "gpt-4", + toolChoice: "auto", + messages, + tools: [ + { + type: "function", + function: { + name: "test", + description: "test function", + parameters: { + type: "object", + properties: { + foo: { + type: "string" + } + }, + required: [ + "foo" + ] + } + } + } + ], + }); + + response.then((completion) => console.log(completion.choices[0].message));" + `); + }); + + it("should include invocation parameters", () => { + const prompt = { + ...BASE_MOCK_PROMPT_VERSION, + invocationParameters: { + temperature: 0.7, + max_tokens: 1000, + }, + } satisfies FixturePromptVersion; + + const result = mapPromptToSnippet({ + promptVersion: prompt, + language: "TypeScript", + }); + expect(result).toMatchInlineSnapshot(` + "import OpenAI from "openai"; + + const openai = new OpenAI(); + + const messages = [ + { + role: "user", + content: "Hello" + } + ]; + // ^ apply additional templating to messages if needed + + const response = openai.chat.completions.create({ + model: "gpt-4", + temperature: 0.7, + max_tokens: 1000, + messages, + }); + + response.then((completion) => console.log(completion.choices[0].message));" + `); + }); + + it("should handle response format", () => { + const prompt = { + ...BASE_MOCK_PROMPT_VERSION, + responseFormat: { definition: OPENAI_RESPONSE_FORMAT }, + } satisfies FixturePromptVersion; + + const result = mapPromptToSnippet({ + promptVersion: prompt, + language: "TypeScript", + }); + expect(result).toMatchInlineSnapshot(` + "import OpenAI from "openai"; + + const openai = new OpenAI(); + + const messages = [ + { + role: "user", + content: "Hello" + } + ]; + // ^ apply additional templating to messages if needed + + const response = openai.chat.completions.create({ + model: "gpt-4", + temperature: 0.7, + messages, + response_format: { + type: "json_schema", + json_schema: { + name: "test_format", + description: "test format", + schema: { + type: "object", + properties: { + format: { + type: "string" + } + } + } + } + }, + }); + + response.then((completion) => console.log(completion.choices[0].message));" + `); + }); + }); + + describe("python", () => { + it("should generate basic message template", () => { + const prompt = { + ...BASE_MOCK_PROMPT_VERSION, + template: { + __typename: "PromptChatTemplate", + messages: [ + { + role: "USER", + content: [ + { + __typename: "TextContentPart", + text: { text: "Hello OpenAI" }, + }, + ], + }, + ], + }, + } satisfies FixturePromptVersion; + + const result = mapPromptToSnippet({ + promptVersion: prompt, + language: "Python", + }); + expect(result).toMatchInlineSnapshot(` + "from openai import OpenAI + + client = OpenAI() + + messages=[ + { + "role": "user", + "content": "Hello OpenAI" + } + ] + # ^ apply additional templating to messages if needed + + completion = client.chat.completions.create( + model="gpt-4", + temperature=0.7, + messages=messages, + ) + + print(completion.choices[0].message)" + `); + }); + + it("should handle tool usage", () => { + const prompt = { + ...BASE_MOCK_PROMPT_VERSION, + invocationParameters: { + toolChoice: "auto", + }, + tools: [{ definition: OPENAI_TOOL }], + template: { + __typename: "PromptChatTemplate", + messages: [ + { + role: "USER", + content: [ + { + __typename: "TextContentPart", + text: { text: "Use the test tool" }, + }, + { + __typename: "ToolCallContentPart", + toolCall: { + toolCallId: "call_123", + toolCall: { + name: OPENAI_TOOL.function.name, + arguments: JSON.stringify({ foo: "bar" }), + }, + }, + }, + ], + }, + { + role: "TOOL", + content: [ + { + __typename: "ToolResultContentPart", + toolResult: { + toolCallId: "call_123", + result: { bar: "baz" }, + }, + }, + ], + }, + ], + }, + } satisfies FixturePromptVersion; + + const result = mapPromptToSnippet({ + promptVersion: prompt, + language: "Python", + }); + expect(result).toMatchInlineSnapshot(` + "from openai import OpenAI + + client = OpenAI() + + messages=[ + { + "role": "user", + "content": "Use the test tool", + "tool_calls": [ + { + "id": "call_123", + "type": "function", + "function": { + "name": "test", + "arguments": "{\\"foo\\":\\"bar\\"}" + } + } + ] + }, + { + "role": "tool", + "content": "{\\n \\"bar\\": \\"baz\\"\\n}", + "tool_call_id": "call_123" + } + ] + # ^ apply additional templating to messages if needed + + completion = client.chat.completions.create( + model="gpt-4", + toolChoice="auto", + messages=messages, + tools=[ + { + "type": "function", + "function": { + "name": "test", + "description": "test function", + "parameters": { + "type": "object", + "properties": { + "foo": { + "type": "string" + } + }, + "required": [ + "foo" + ] + } + } + } + ], + ) + + print(completion.choices[0].message)" + `); + }); + + it("should include invocation parameters", () => { + const prompt = { + ...BASE_MOCK_PROMPT_VERSION, + invocationParameters: { + temperature: 0.7, + max_tokens: 1000, + }, + } satisfies FixturePromptVersion; + + const result = mapPromptToSnippet({ + promptVersion: prompt, + language: "Python", + }); + expect(result).toMatchInlineSnapshot(` + "from openai import OpenAI + + client = OpenAI() + + messages=[ + { + "role": "user", + "content": "Hello" + } + ] + # ^ apply additional templating to messages if needed + + completion = client.chat.completions.create( + model="gpt-4", + temperature=0.7, + max_tokens=1000, + messages=messages, + ) + + print(completion.choices[0].message)" + `); + }); + + it("should handle response format", () => { + const prompt = { + ...BASE_MOCK_PROMPT_VERSION, + responseFormat: { definition: OPENAI_RESPONSE_FORMAT }, + } satisfies FixturePromptVersion; + + const result = mapPromptToSnippet({ + promptVersion: prompt, + language: "Python", + }); + expect(result).toMatchInlineSnapshot(` + "from openai import OpenAI + + client = OpenAI() + + messages=[ + { + "role": "user", + "content": "Hello" + } + ] + # ^ apply additional templating to messages if needed + + completion = client.chat.completions.create( + model="gpt-4", + temperature=0.7, + messages=messages, + response_format={ + "type": "json_schema", + "json_schema": { + "name": "test_format", + "description": "test format", + "schema": { + "type": "object", + "properties": { + "format": { + "type": "string" + } + } + } + } + }, + ) + + print(completion.choices[0].message)" + `); + }); + }); + }); +}); diff --git a/app/src/pages/prompt/promptCodeSnippets.tsx b/app/src/pages/prompt/promptCodeSnippets.tsx index 64cf19d5fa..afe502de2b 100644 --- a/app/src/pages/prompt/promptCodeSnippets.tsx +++ b/app/src/pages/prompt/promptCodeSnippets.tsx @@ -3,10 +3,10 @@ import { template } from "lodash"; import { CodeLanguage } from "@phoenix/components/code"; import { fromOpenAIMessage, - LlmProviderMessage, OpenAIMessage, promptMessageToOpenAI, } from "@phoenix/schemas/messageSchemas"; +import { isObject } from "@phoenix/typeUtils"; import type { PromptCodeExportCard__main$data as PromptVersion } from "./__generated__/PromptCodeExportCard__main.graphql"; @@ -26,7 +26,7 @@ export type PromptToSnippetParams = ({ | "tools" > & { template: { - messages: LlmProviderMessage[]; + messages: unknown[]; }; }) => string; @@ -72,6 +72,17 @@ const jsonFormatter = ({ return fmt; }; +type LanguageConfig = { + assignmentOperator: string; + removeKeyQuotes: boolean; + stringQuote: string; + template: (params: { + tab: string; + args: string[]; + messages: string; + }) => string; +}; + const openaiTemplatePython = template( ` from openai import OpenAI @@ -106,13 +117,146 @@ response.then((completion) => console.log(completion.choices[0].message)); `.trim() ); +const anthropicTemplatePython = template( + ` +from anthropic import Anthropic + +client = Anthropic() + +messages=<%= messages %> +# ^ apply additional templating to messages if needed + +completion = client.messages.create( +<% _.forEach(args, function(arg) { %><%= tab %><%= arg %>, +<% }); %>) + +print(completion.content) +`.trim() +); + +const anthropicTemplateTypeScript = template( + ` +import Anthropic from "@anthropic-ai/sdk"; + +const client = new Anthropic(); + +const messages = <%= messages %>; +// ^ apply additional templating to messages if needed + +const response = await client.messages.create({ +<% _.forEach(args, function(arg) { %><%= tab %><%= arg %>, +<% }); %>}); + +console.log(response.content); +`.trim() +); + +const languageConfigs: Record> = { + python: { + openai: { + assignmentOperator: "=", + removeKeyQuotes: false, + stringQuote: '"', + template: openaiTemplatePython, + }, + anthropic: { + assignmentOperator: "=", + removeKeyQuotes: false, + stringQuote: '"', + template: anthropicTemplatePython, + }, + }, + typescript: { + openai: { + assignmentOperator: ": ", + removeKeyQuotes: true, + stringQuote: '"', + template: openaiTemplateTypeScript, + }, + anthropic: { + assignmentOperator: ": ", + removeKeyQuotes: true, + stringQuote: '"', + template: anthropicTemplateTypeScript, + }, + }, +}; + +const preparePromptData = ( + prompt: Parameters[0], + config: LanguageConfig +) => { + if (!("messages" in prompt.template)) { + throw new Error("Prompt template does not contain messages"); + } + + const args: string[] = []; + const { assignmentOperator, removeKeyQuotes, stringQuote } = config; + + if (prompt.modelName) { + args.push( + `model${assignmentOperator}${stringQuote}${prompt.modelName}${stringQuote}` + ); + } + + if (prompt.invocationParameters) { + const invocationArgs = Object.entries(prompt.invocationParameters).map( + ([key, value]) => + typeof value === "string" + ? `${key}${assignmentOperator}${stringQuote}${value}${stringQuote}` + : isObject(value) + ? `${key}${assignmentOperator}${jsonFormatter({ + json: value, + level: 1, + removeKeyQuotes, + })}` + : `${key}${assignmentOperator}${value}` + ); + args.push(...invocationArgs); + } + + let messages = ""; + if (prompt.template.messages.length > 0) { + const fmt = jsonFormatter({ + json: prompt.template.messages, + level: 0, + removeKeyQuotes, + }); + messages = fmt; + args.push(assignmentOperator === "=" ? "messages=messages" : "messages"); + } + + if (prompt.tools && prompt.tools.length > 0) { + const fmt = jsonFormatter({ + json: prompt.tools.map((tool) => tool.definition), + level: 1, + removeKeyQuotes, + }); + args.push(`tools${assignmentOperator}${fmt}`); + } + + if (prompt.responseFormat && "definition" in prompt.responseFormat) { + const fmt = jsonFormatter({ + json: prompt.responseFormat.definition, + level: 1, + removeKeyQuotes, + }); + args.push(`response_format${assignmentOperator}${fmt}`); + } + + return { args, messages }; +}; + /** - * Stringify the arguments of a message's tool calls + * Convert OpenAI messages to OpenAI SDK messages, for use in the native SDK * - * @param message the message to stringify - * @returns the message with stringified tool call arguments + * @todo The playground really needs to manage messages fully in Phoenix Prompt format, or, in + * native SDK format. This in-between format is a mess. + * + * @param message the message to convert + * @returns the converted message */ -const stringifyOpenAIToolCallArguments = (message: OpenAIMessage) => { +const convertOpenAIMessageToOpenAISDKMessage = (message: OpenAIMessage) => { if ("tool_calls" in message && message.tool_calls) { return { ...message, @@ -129,77 +273,33 @@ const stringifyOpenAIToolCallArguments = (message: OpenAIMessage) => { } }; -/** - * A map of languages to model providers to code snippets - * - * @todo when we implement more langs / providers, replace with a react-like DSL, for example something like the following: - * @example - * ```tsx - * code( - * { language, provider }, - * [ - * providerSetup(), - * messages({messages}), - * providerCompletion(null, [argument({messages}), argument({tools}), argument({response_format})]) - * ] - * ) - * ``` - * where each function takes a props object and optional children, and returns a string. - * - * That way, each component can manage how to emit its portion of the string based on language and model provider, - * accessible via context from the top level code component. - */ export const promptCodeSnippets: Record< string, Record > = { python: { openai: (prompt) => { - if (!("messages" in prompt.template)) { - throw new Error("Prompt template does not contain messages"); - } - // collect args to the provider completion fn call from the incoming template - const args: string[] = []; - if (prompt.modelName) { - args.push(`model="${prompt.modelName}"`); - } - if (prompt.invocationParameters) { - const invocationArgs = Object.entries(prompt.invocationParameters).map( - ([key, value]) => - typeof value === "string" ? `${key}="${value}"` : `${key}=${value}` - ); - args.push(...invocationArgs); - } - // messages are special, they are passed as a kwarg to the provider completion fn - // but defined in the template as a top level variable first - let messages = ""; - if (prompt.template.messages.length > 0) { - const fmt = jsonFormatter({ - json: prompt.template.messages.map((m) => - stringifyOpenAIToolCallArguments(m as OpenAIMessage) + const config = languageConfigs.python.openai; + const convertedPrompt = { + ...prompt, + template: { + ...prompt.template, + messages: prompt.template.messages.map((m) => + convertOpenAIMessageToOpenAISDKMessage(m as OpenAIMessage) ), - level: 0, - }); - messages = `${fmt}`; - args.push(`messages=messages`); - } - if (prompt.tools && prompt.tools.length > 0) { - const fmt = jsonFormatter({ - json: prompt.tools.map((tool) => tool.definition), - level: 1, - }); - args.push(`tools=${fmt}`); - } - if (prompt.responseFormat && "definition" in prompt.responseFormat) { - const fmt = jsonFormatter({ - json: prompt.responseFormat.definition, - level: 1, - }); - args.push(`response_format=${fmt}`); - } - - // now emit the template with the collected args and messages - return openaiTemplatePython({ + }, + }; + const { args, messages } = preparePromptData(convertedPrompt, config); + return config.template({ + tab: TAB, + args, + messages, + }); + }, + anthropic: (prompt) => { + const config = languageConfigs.python.anthropic; + const { args, messages } = preparePromptData(prompt, config); + return config.template({ tab: TAB, args, messages, @@ -208,56 +308,27 @@ export const promptCodeSnippets: Record< }, typescript: { openai: (prompt) => { - if (!("messages" in prompt.template)) { - throw new Error("Prompt template does not contain messages"); - } - // collect args to the provider completion fn call from the incoming template - const args: string[] = []; - if (prompt.modelName) { - args.push(`model: "${prompt.modelName}"`); - } - if (prompt.invocationParameters) { - const invocationArgs = Object.entries(prompt.invocationParameters).map( - ([key, value]) => - typeof value === "string" - ? `${key}: "${value}"` - : `${key}: ${value}` - ); - args.push(...invocationArgs); - } - // messages are special, they are passed as a kwarg to the provider completion fn - // but defined in the template as a top level variable first - let messages = ""; - if (prompt.template.messages.length > 0) { - const fmt = jsonFormatter({ - json: prompt.template.messages.map((m) => - stringifyOpenAIToolCallArguments(m as OpenAIMessage) + const config = languageConfigs.typescript.openai; + const convertedPrompt = { + ...prompt, + template: { + ...prompt.template, + messages: prompt.template.messages.map((m) => + convertOpenAIMessageToOpenAISDKMessage(m as OpenAIMessage) ), - level: 0, - removeKeyQuotes: true, - }); - messages = `${fmt}`; - args.push(`messages`); - } - if (prompt.tools && prompt.tools.length > 0) { - const fmt = jsonFormatter({ - json: prompt.tools.map((tool) => tool.definition), - level: 1, - removeKeyQuotes: true, - }); - args.push(`tools: ${fmt}`); - } - if (prompt.responseFormat && "definition" in prompt.responseFormat) { - const fmt = jsonFormatter({ - json: prompt.responseFormat.definition, - level: 1, - removeKeyQuotes: true, - }); - args.push(`response_format: ${fmt}`); - } - - // now emit the template with the collected args and messages - return openaiTemplateTypeScript({ + }, + }; + const { args, messages } = preparePromptData(convertedPrompt, config); + return config.template({ + tab: TAB, + args, + messages, + }); + }, + anthropic: (prompt) => { + const config = languageConfigs.typescript.anthropic; + const { args, messages } = preparePromptData(prompt, config); + return config.template({ tab: TAB, args, messages, @@ -270,7 +341,7 @@ export const mapPromptToSnippet = ({ promptVersion, language, }: { - promptVersion: PromptVersion; + promptVersion: Omit; language: CodeLanguage; }) => { const generator = diff --git a/app/src/schemas/messageSchemas.ts b/app/src/schemas/messageSchemas.ts index 7b680f5269..7e8d60feec 100644 --- a/app/src/schemas/messageSchemas.ts +++ b/app/src/schemas/messageSchemas.ts @@ -24,13 +24,20 @@ import { } from "./promptSchemas"; import { anthropicToolCallSchema, - anthropicToolCallToOpenAI, openAIToolCallSchema, openAIToolCallToAnthropic, } from "./toolCallSchemas"; type ModelProvider = keyof typeof ModelProviders; +/** + * This file contains the schemas for the different message format SDKs. + * + * It is not used for playground validation / transformations. + * + * It is likely not complete, and may drift from the actual provider SDKs. + */ + /** * OpenAI Message Schemas */ @@ -70,12 +77,18 @@ export const anthropicMessageRoleSchema = z.enum(["user", "assistant", "tool"]); export type AnthropicMessageRole = z.infer; -export const anthropicBlockSchema = z.object({ +/** + * Object that represents all possible Anthropic message block type schemas + * + * @todo use discriminated union instead of including all properties as optional + */ +export const anthropicMessageBlockSchema = anthropicToolCallSchema.extend({ type: z.string(), text: z.string().optional(), id: z.string().optional(), + tool_use_id: z.string().optional(), name: z.string().optional(), - input: z.record(jsonLiteralSchema).optional(), + input: z.record(z.unknown()).optional(), source: z .object({ type: z.string(), @@ -85,14 +98,10 @@ export const anthropicBlockSchema = z.object({ .optional(), }); -export const anthropicMessageSchema = z - .object({ - role: anthropicMessageRoleSchema, - content: z.union([z.string(), z.array(anthropicBlockSchema)]), - tool_calls: z.array(anthropicToolCallSchema).optional(), - tool_call_id: z.string().optional(), - }) - .passthrough(); +export const anthropicMessageSchema = z.object({ + role: anthropicMessageRoleSchema, + content: z.union([z.string(), z.array(anthropicMessageBlockSchema)]), +}); export type AnthropicMessage = z.infer; @@ -146,77 +155,38 @@ export const promptMessagesJSONSchema = zodToJsonSchema(promptMessagesSchema, { removeAdditionalStrategy: "passthrough", }); -/** - * Conversion Functions - * - * These follow a hub-and-spoke model where OpenAI is the hub format. - * All conversions between different formats go through OpenAI as an intermediate step. - */ - -/** - * Spoke → Hub: Convert an Anthropic message to OpenAI format - */ -export const anthropicMessageToOpenAI = anthropicMessageSchema.transform( - (anthropic): OpenAIMessage => { - const base: OpenAIMessage = { - role: anthropic.role as OpenAIMessageRole, - content: Array.isArray(anthropic.content) - ? anthropic.content - .filter((block) => block.type === "text" && block.text) - .map((block) => block.text!) - .join("\n") - : anthropic.content, - }; - - if (anthropic.tool_calls) { - return { - ...base, - tool_calls: anthropic.tool_calls.map((tc) => - anthropicToolCallToOpenAI.parse(tc) - ), - }; - } - - if (anthropic.tool_call_id) { - return { - ...base, - tool_call_id: anthropic.tool_call_id, - }; - } - - return base; - } -); - /** * Hub → Spoke: Convert an OpenAI message to Anthropic format */ export const openAIMessageToAnthropic = openAIMessageSchema.transform( (openai): AnthropicMessage => { - const base: AnthropicMessage = { - role: - openai.role === "system" - ? "user" - : (openai.role as AnthropicMessageRole), + const base = { + role: openai.role === "assistant" ? "assistant" : "user", content: openai.content ? [{ type: "text", text: openai.content }] : [], - }; + } satisfies AnthropicMessage; if (openai.tool_calls) { return { ...base, - tool_calls: openai.tool_calls.map((tc) => - openAIToolCallToAnthropic.parse(tc) - ), + content: [ + ...base.content, + ...openai.tool_calls.map((tc) => openAIToolCallToAnthropic.parse(tc)), + ], }; } if (openai.tool_call_id) { return { ...base, - tool_call_id: openai.tool_call_id, + content: [ + { + type: "tool_result", + tool_use_id: openai.tool_call_id, + content: openai.content, + }, + ], }; } - return base; } ); @@ -357,8 +327,6 @@ export const openAIMessageToPrompt = openAIMessageSchema.transform( } ); -type MessageProvider = ModelProvider | "UNKNOWN"; - type MessageWithProvider = | { provider: Extract; @@ -400,29 +368,6 @@ export const detectMessageProvider = ( return { provider: "UNKNOWN", validatedMessage: null }; }; -/** - * Convert from any message format to OpenAI format if possible - */ -export const toOpenAIMessage = ( - message: LlmProviderMessage -): OpenAIMessage | null => { - const { provider, validatedMessage } = detectMessageProvider(message); - switch (provider as MessageProvider) { - case "AZURE_OPENAI": - case "OPENAI": - return validatedMessage as OpenAIMessage; - case "ANTHROPIC": - return anthropicMessageToOpenAI.parse(validatedMessage); - case "GEMINI": - // TODO: Add Gemini message support - return null; - case "UNKNOWN": - return null; - } - // This will never happen due to the exhaustive switch above - return assertUnreachable(provider as never); -}; - /** * Convert from OpenAI message format to any other format */ @@ -467,14 +412,6 @@ type ProviderToMessageMap = { GEMINI: JSONLiteral; }; -/** - * Convert an Anthropic message to Prompt format via OpenAI - */ -export const anthropicMessageToPrompt = anthropicMessageSchema.transform( - (anthropic): PromptMessage => - openAIMessageToPrompt.parse(anthropicMessageToOpenAI.parse(anthropic)) -); - /** * Convert a Prompt message to Anthropic format via OpenAI */