From 30e18180f24abd80891a37f1985f2dfa76f34169 Mon Sep 17 00:00:00 2001
From: "devin-ai-integration[bot]"
<158243242+devin-ai-integration[bot]@users.noreply.github.com>
Date: Mon, 2 Dec 2024 23:26:05 +0000
Subject: [PATCH] docs: add span events documentation for chat completion
traces
---
api-reference/traces/POST-trace.mdx | 104 ++++++++++++++++++++++-
tracing/send_traces.mdx | 127 ++++++++++++++++++++++++----
2 files changed, 212 insertions(+), 19 deletions(-)
diff --git a/api-reference/traces/POST-trace.mdx b/api-reference/traces/POST-trace.mdx
index 9708262..64b243c 100644
--- a/api-reference/traces/POST-trace.mdx
+++ b/api-reference/traces/POST-trace.mdx
@@ -82,7 +82,45 @@ curl -X POST 'https://app.langtrace.ai/api/trace' \
"llm.response.choices": {
"stringValue": "[{\"role\":\"assistant\",\"content\":\"I'm doing well, thank you for asking!\"}]"
}
- }
+ },
+ "events": [
+ {
+ "name": "token_stream",
+ "timeUnixNano": "1701555556000000000",
+ "attributes": {
+ "llm.token.text": {
+ "stringValue": "I'm"
+ },
+ "llm.token.index": {
+ "intValue": 0
+ }
+ }
+ },
+ {
+ "name": "token_stream",
+ "timeUnixNano": "1701555556200000000",
+ "attributes": {
+ "llm.token.text": {
+ "stringValue": " doing"
+ },
+ "llm.token.index": {
+ "intValue": 1
+ }
+ }
+ },
+ {
+ "name": "function_call",
+ "timeUnixNano": "1701555556500000000",
+ "attributes": {
+ "llm.function.name": {
+ "stringValue": "get_current_weather"
+ },
+ "llm.function.arguments": {
+ "stringValue": "{\"location\":\"San Francisco\",\"unit\":\"celsius\"}"
+ }
+ }
+ }
+ ]
}]
}]
}]
@@ -122,7 +160,33 @@ payload = {
"llm.request.type": {"stringValue": "chat_completion"},
"llm.request.messages": {"stringValue": json.dumps([{"role": "user", "content": "Hello, how are you?"}])},
"llm.response.choices": {"stringValue": json.dumps([{"role": "assistant", "content": "I'm doing well, thank you for asking!"}])}
- }
+ },
+ "events": [
+ {
+ "name": "token_stream",
+ "timeUnixNano": "1701555556000000000",
+ "attributes": {
+ "llm.token.text": {"stringValue": "I'm"},
+ "llm.token.index": {"intValue": 0}
+ }
+ },
+ {
+ "name": "token_stream",
+ "timeUnixNano": "1701555556200000000",
+ "attributes": {
+ "llm.token.text": {"stringValue": " doing"},
+ "llm.token.index": {"intValue": 1}
+ }
+ },
+ {
+ "name": "function_call",
+ "timeUnixNano": "1701555556500000000",
+ "attributes": {
+ "llm.function.name": {"stringValue": "get_current_weather"},
+ "llm.function.arguments": {"stringValue": "{\"location\":\"San Francisco\",\"unit\":\"celsius\"}"}
+ }
+ }
+ ]
}]
}]
}]
@@ -133,6 +197,42 @@ print(response.json())
```
+### Span Events
+
+The trace endpoint supports capturing detailed events within spans for chat completions:
+
+
+ Array of events that occurred during the span
+
+
+
+ Name of the event (e.g., "token_stream", "function_call")
+
+
+
+ Timestamp of the event in Unix nanoseconds
+
+
+
+ Attributes specific to the event type
+
+
+#### Event Types
+
+**Token Stream Events**
+- name: "token_stream"
+- attributes:
+ - llm.token.text: Text content of the token
+ - llm.token.index: Position in the response sequence
+ - llm.token.logprob: Token log probability (optional)
+
+**Function Call Events**
+- name: "function_call"
+- attributes:
+ - llm.function.name: Name of the called function
+ - llm.function.arguments: JSON string of function arguments
+ - llm.function.response: Function response (optional)
+
### Response
diff --git a/tracing/send_traces.mdx b/tracing/send_traces.mdx
index cd50abf..31fcaa0 100644
--- a/tracing/send_traces.mdx
+++ b/tracing/send_traces.mdx
@@ -62,9 +62,9 @@ Once the SDK is initialized, Langtrace will automatically capture traces from su
### Example: Sending Traces from an OpenAI Application
-Here's an example of how to send traces from an LLM application using the OpenAI API. This example demonstrates how to use the Langtrace SDK to send traces from an OpenAI completion request.
+Here's an example of how to send traces from an LLM application using the OpenAI API. This example demonstrates how to use the Langtrace SDK to send traces from an OpenAI completion request, including span events that capture detailed response information.
-
+
Make Sure to pip or npm install the required packages before running the code.
```bash
@@ -81,7 +81,6 @@ npm install @langtrase/typescript-sdk openai
```python Python
from langtrace_python_sdk import langtrace, with_langtrace_root_span
-
from openai import OpenAI
langtrace.init(
@@ -89,17 +88,47 @@ langtrace.init(
@with_langtrace_root_span()
def example():
- client = OpenAI()
- response = client.chat.completions.create(
- model="gpt-3.5-turbo",
- messages=[
- {
- "role": "system",
- "content": "How many states of matter are there?"
- }
- ],
- )
- print(response.choices[0].message.content)
+ client = OpenAI()
+ # Enable stream to capture token-by-token events
+ response = client.chat.completions.create(
+ model="gpt-3.5-turbo",
+ messages=[
+ {
+ "role": "system",
+ "content": "You are a helpful assistant that can also check the weather."
+ },
+ {
+ "role": "user",
+ "content": "What's the weather like in San Francisco?"
+ }
+ ],
+ stream=True, # Enable streaming for token events
+ functions=[{ # Define functions to demonstrate function calling
+ "name": "get_current_weather",
+ "description": "Get the current weather in a given location",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "location": {
+ "type": "string",
+ "description": "The city and state, e.g. San Francisco, CA"
+ },
+ "unit": {
+ "type": "string",
+ "enum": ["celsius", "fahrenheit"]
+ }
+ },
+ "required": ["location"]
+ }
+ }]
+ )
+
+ # Process streamed response
+ for chunk in response:
+ if chunk.choices[0].delta.content:
+ print(chunk.choices[0].delta.content, end="")
+ if chunk.choices[0].delta.function_call:
+ print(f"\nFunction Call: {chunk.choices[0].delta.function_call}")
example()
```
@@ -107,22 +136,86 @@ example()
```javascript Javascript
import * as Langtrace from "@langtrase/typescript-sdk";
import OpenAI from "openai";
+
Langtrace.init({
api_key: "",
});
+
const openai = new OpenAI();
async function example() {
- const completion = await openai.chat.completions.create({
+ const stream = await openai.chat.completions.create({
model: "gpt-3.5-turbo",
messages: [
{
role: "system",
- content: "How many states of matter are there?",
+ content: "You are a helpful assistant that can also check the weather.",
+ },
+ {
+ role: "user",
+ content: "What's the weather like in San Francisco?",
},
],
+ stream: true, // Enable streaming for token events
+ functions: [{ // Define functions to demonstrate function calling
+ name: "get_current_weather",
+ description: "Get the current weather in a given location",
+ parameters: {
+ type: "object",
+ properties: {
+ location: {
+ type: "string",
+ description: "The city and state, e.g. San Francisco, CA",
+ },
+ unit: {
+ type: "string",
+ enum: ["celsius", "fahrenheit"],
+ },
+ },
+ required: ["location"],
+ },
+ }],
});
- console.log(completion.choices[0]);
+
+ // Process streamed response
+ for await (const chunk of stream) {
+ if (chunk.choices[0].delta.content) {
+ process.stdout.write(chunk.choices[0].delta.content);
+ }
+ if (chunk.choices[0].delta.function_call) {
+ console.log("\nFunction Call:", chunk.choices[0].delta.function_call);
+ }
+ }
+}
+
+example().then(() => {
+ console.log("\nDone");
+});
+ type: "object",
+ properties: {
+ location: {
+ type: "string",
+ description: "The city and state, e.g. San Francisco, CA"
+ },
+ unit: {
+ type: "string",
+ enum: ["celsius", "fahrenheit"]
+ }
+ },
+ required: ["location"]
+ }
+ }]
+ });
+
+ // Process streamed response
+ for (const chunk of completion) {
+ if (chunk.choices[0].delta.content) {
+ process.stdout.write(chunk.choices[0].delta.content);
+ }
+ if (chunk.choices[0].delta.function_call) {
+ console.log(`\nFunction Call: ${chunk.choices[0].delta.function_call}`);
+ }
+ }
}
example().then(() => {