From 74b245002cdda6abe5579a0f788dc607ae666e4d Mon Sep 17 00:00:00 2001
From: Dan Farrelly
Date: Fri, 13 Dec 2024 10:45:32 -0500
Subject: [PATCH] Link to external AgentKit docs. Add redirect. (#1029)
---
app/ai/page.tsx | 11 +-
next.config.mjs | 2 +
...ai-orchestration-with-agentkit-step-ai.mdx | 4 +-
.../ai-agent-network-state-routing.mdx | 279 ------------------
pages/docs/agent-kit/ai-agents-tools.mdx | 230 ---------------
pages/docs/agent-kit/overview.mdx | 100 -------
.../steps-workflows/step-ai-orchestration.mdx | 14 +-
shared/Docs/Navigation.tsx | 9 +-
shared/Docs/navigationStructure.ts | 47 +--
9 files changed, 31 insertions(+), 665 deletions(-)
delete mode 100644 pages/docs/agent-kit/ai-agent-network-state-routing.mdx
delete mode 100644 pages/docs/agent-kit/ai-agents-tools.mdx
delete mode 100644 pages/docs/agent-kit/overview.mdx
diff --git a/app/ai/page.tsx b/app/ai/page.tsx
index 8616bf7f6..50c120885 100644
--- a/app/ai/page.tsx
+++ b/app/ai/page.tsx
@@ -170,7 +170,7 @@ export default function Page() {
text="Define complex AI workflows in code, including agentic orchestration, and let the AgentKit handle the heavy lifting of managing dependencies, retries, and failures with ease."
ctas={[
{
- href: `/docs/agent-kit/overview?ref=${ref}`,
+ href: `https://agentkit.inngest.com/?ref=${ref}`,
text: "Get started",
},
]}
@@ -325,7 +325,7 @@ export default inngest.createFunction(
title: "AgentKit",
description:
"Learn how to use AgentKit to build, test and deploy reliable AI workflows.",
- url: `/docs/agent-kit/overview?ref=${ref}`,
+ url: `https://agentkit.inngest.com/?ref=${ref}`,
},
{
type: "blog",
@@ -386,10 +386,13 @@ function Hero() {
backend complexity.
-
diff --git a/next.config.mjs b/next.config.mjs
index 416b807ce..5bea6d834 100644
--- a/next.config.mjs
+++ b/next.config.mjs
@@ -72,6 +72,8 @@ const permanentRedirects = [
"/docs/features/middleware/encryption-middleware?guide=python",
],
["/blog/nextjs-openai-o1", "/blog/agentic-workflow-example"],
+
+ ["/docs/agent-kit/overview", "https://agentkit.inngest.com"],
];
async function redirects() {
diff --git a/pages/blog/_posts/ai-orchestration-with-agentkit-step-ai.mdx b/pages/blog/_posts/ai-orchestration-with-agentkit-step-ai.mdx
index b9d0a8a5d..aa9141269 100644
--- a/pages/blog/_posts/ai-orchestration-with-agentkit-step-ai.mdx
+++ b/pages/blog/_posts/ai-orchestration-with-agentkit-step-ai.mdx
@@ -13,7 +13,7 @@ Building AI applications that actually work in production is surprisingly comple
Today, we're excited to announce two new AI-native features:
-- [AgentKit](/docs/agent-kit/overview), the fastest way to develop, test, and ship complex AI workflows to production, in early access
+- [AgentKit](https://agentkit.inngest.com), the fastest way to develop, test, and ship complex AI workflows to production, in early access
- A new set of core APIs in our platform: [step.ai](/docs/features/inngest-functions/steps-workflows/step-ai-orchestration), which brings the same world-class developer experience from our existing product to AI-native applications.
Just as Inngest manages your background jobs, webhooks, and scheduled tasks with built-in retries and state management, [step.ai](/docs/features/inngest-functions/steps-workflows/step-ai-orchestration) now handles your AI model calls with the same production-ready infrastructure — with built in observability and support for agentic workflows out of the box.
@@ -49,7 +49,7 @@ export default inngest.createFunction(
);
```
-You can read the docs here, and [see the code on GitHub](https://github.com/inngest/agent-kit). We cannot wait to see how you work with AgentKit. We're iterating daily, and to speak with us or give feedback join us in our #ai discord
+You can read the docs here, and [see the code on GitHub](https://github.com/inngest/agent-kit). We cannot wait to see how you work with AgentKit. We're iterating daily, and to speak with us or give feedback join us in our #ai discord
## New step APIs - now available for early access: `step.ai.wrap` and `step.ai.infer`
diff --git a/pages/docs/agent-kit/ai-agent-network-state-routing.mdx b/pages/docs/agent-kit/ai-agent-network-state-routing.mdx
deleted file mode 100644
index aea79bfab..000000000
--- a/pages/docs/agent-kit/ai-agent-network-state-routing.mdx
+++ /dev/null
@@ -1,279 +0,0 @@
-import { Callout, GuideSelector, GuideSection, CodeGroup, VersionBadge } from "src/shared/Docs/mdx";
-
-# Networks, state, and routing
-
-
- Use Networks to create complex workflows with one or more agents.
-
-
-Networks are a simple class that turns agents into powerful stateful workflows. A network contains three components:
-
-- The agents that the network can access
-- The network’s state, including past messages and a key value store (read more below)
-- The network’s router, which chooses whether to quit or the next agent to run in the loop (read more below)
-
-It also has an optional default model, which will be used when your Agents have no model provided, and a setting to cap the number of model calls via `maxIter`.
-
-Here's a simple example:
-
-```tsx
-import { createNetwork, agenticOpenai as openai } from "@inngest/agent-kit";
-
-// Create a network with two agents.
-const network = createNetwork({
- agents: [searchAgent, summaryAgent],
- defaultModel: openai({ model: "gpt-4o", step }), // Optional: used for routing and agents if they have no model
- maxIter: 10, // Optional: max number of agent calls
-});
-
-// Run the network with a user prompt
-await network.run("What happened in the 2024 Super Bowl?");
-```
-
-Similar to agents, you call `.run` on a network with some user input. The network then runs a core loop to call one or more agents to find a suitable answer.
-
-## How networks work
-
-Networks are designed to be powerful while being easy to understand:
-
-1. You create a network with a list of available agents. Each agent can use a different model and inference provider.
-2. You give the network a user prompt by calling `.run`
-3. The network runs its core loop, and:
- - Calls the router, which picks the first agent to run
- - Runs the agent with your input. This also runs the agent’s lifecycles, and any tools selected.
- - Stores the result in the network's state
- - Re-calls the router with the new state, which either quits or runs another agent
-
-
-
-## Network state
-
-
- Network state passes context between different Agents in a series of calls. It stores ouputs from prior calls, and has a KV store for additional data from tools or custom code.
-
-
-Network state is the memory of your agent workflow. It stores two things:
-
-1. **History Management**: It keeps track of all agent interactions, including prompts, responses, and tool calls
-2. **Key-Value Storage**: It provides a simple key-value store for sharing data and facts between different agent calls
-
-Without this state, each agent call would be isolated and unable to build upon previous interactions to solve complex tasks. The history and key value stores are used automatically by the network to give each agent context about what happened before. We’ll dive into both.
-
-### History
-
-The history system maintains a chronological record of all agent interactions in your network. Each interaction is stored as an `InferenceResult`, which includes:
-
-- `agent`: The agent that created this result
-- `input`: The original input
-- `system`: System instructions
-- `prompt`: The complete prompt
-- `output`: Agent output
-- `toolCalls`: Tool calls and their results
-- `raw`: Raw API responses
-
-### Using state in agent prompts
-
-The network state keeps track of every agent interaction, building a chronological list of messages in memory. When the network runs, it calls the `onStart` lifecycle hook with the given agent, network, user input, system, and history from memory:
-
-```jsx
-const agent = createAgent({
- name: "Code writer",
- lifecycles: {
- onStart: ({
- agent,
- network, // has the entire state
- input,
-
- system, // The system prompt for the agent
- history, // An array of messages
- }) => {
- // Return the system prompt (the first message), and any history added to the
- // model's conversation.
- return { system, history };
- }
- },
-});
-```
-
-This lifecycle hook can be used to modify the system prompt and history used within each agent.
-
-### The state key-value store
-
-The KV store is a simple way to store information between agent calls. Think of it like a shared memory that all your agents and tools can access. Here's how to use it:
-
-```tsx
-// Store a value
-network.state.kv.set("user-name", "Alice");
-
-// Get a value
-const name = network.state.kv.get("user-name");
-
-// Delete a value
-network.state.kv.delete("user-name");
-
-// Check if a value exists
-const exists = network.state.kv.has("user-name");
-```
-
-Common uses for the KV store include:
-
-- Storing intermediate results that other agents might need within lifecycles
-- Storing user preferences or context
-- Passing data between tools and agents
-
-Here’s a tool described using a Zod schema which stores files created by a coding agent the network’s key-value state:
-
-```ts
-const writeFiles = createTypedTool({
- name: "write_files",
- description: "Write code with the given filenames",
- parameters: z.object({
- files: z.array(z.object({
- filename: z.string(),
- content: z.string(),
- })),
- }),
- handler: (output, { network }) => {
- // files is the output from the model's response in the format above.
- // Here, we store OpenAI's generated files in the response.
- const files = network?.state.kv.get("files") || {};
- for (const file of output.files) {
- files[file.filename] = file.content;
- }
- network?.state.kv.set("files", files);
- }
-})
-```
-
-Remember: The KV store persists for the entire network run, but is cleared when you create a new network.
-
-This combination of history and key-value storage makes networks powerful for creating complex, stateful agent workflows. Agents can build on each other's work, store and retrieve information, and make decisions based on what's happened before.
-
-
-
-## Network routing
-
-
- Network routers decide what Agent to call next based off of the current network state.
-
-
-A router is a function that gets called after each agent runs, which decides whether to:
-
-1. Stop the network (by returning `undefined`)
-2. Call another agent (by returning an `Agent`)
-
-The routing function gets access to everything it needs to make this decision:
-
-```ts
-interface RouterArgs {
- network: Network; // The entire network, including the state and history
- stack: Agent[]; // Future agents to be called
- callCount: number; // Number of times we've called agents
- lastResult?: InferenceResult; // The last agent's response
-}
-```
-
-### Code-based routers (supervised routing)
-
-The simplest way to route is to write code that makes decisions. Here's an example that routes between a classifier and a writer:
-
-```ts
-const network = createNetwork({
- agents: [classifier, writer],
- router: ({ lastResult, callCount }) => {
- // First call: use the classifier
- if (callCount === 0) {
- return classifier;
- }
- // Second call: if it's a question, use the writer
- if (callCount === 1 && lastResult?.output === "question") {
- return writer;
- }
- // Otherwise, we're done!
- return undefined;
- },
-});
-```
-
-Code-based routing is great when you want deterministic, predictable behavior. It's also the fastest option since there's no LLM calls involved.
-
-### Agent routers (autonomous routing)
-
-Sometimes you want your network to be more dynamic. Agent-based routing uses an LLM to decide what to do next. The network comes with a built-in routing agent that you can use:
-
-```tsx
-import { Network, agenticOpenai as openai } from "@inngest/agent-kit";
-
-const network = createNetwork({
- agents: [classifier, writer],
- defaultModel: model,
- router: ({ lastResult, callCount }) => {
- return defaultAgenticRouter;
- },
-});
-```
-
-The routing agent looks at:
-
-- The original input
-- What agents are available
-- The conversation history
-- Each agent's description
-
-It then decides whether to call another agent or stop. This is great for building autonomous workflows where you're not sure what steps are needed up front. Note that the default agentic router is a starting point. In production apps it’s likely that you define your own agentic router props specifically for the network’s use case.
-
-### Hybrid code and agent routers (semi-supervised routing)
-
-And, of course, you can mix code and agent-based routing. Here's an example that uses code for the first step, then lets an agent take over:
-
-```tsx
-const network = createNetwork({
- agents: [classifier, writer],
- router: ({ lastResult, callCount }) => {
- // Always start with the classifier
- if (callCount === 0) {
- return classifier;
- }
- // Then let the routing agent take over
- return defaultAgenticRouter;
- },
-});
-
-```
-
-This gives you the best of both worlds:
-
-- Predictable first steps when you know what needs to happen
-- Flexibility when the path forward isn't clear
-
-### Using state in routing
-
-The router is the brain of your network - it decides which agent to call next. You can use state to make smart routing decisions:
-
-```tsx
-const router = ({ network, lastResult }): Agent | undefined => {
- // Check if we've solved the problem
- const solution = network.state.kv.get("solution");
- if (solution) {
- // We're done - return undefined to stop the network
- return undefined;
- }
-
- // Check the last result to decide what to do next
- if (lastResult?.output[0].content.includes("need more context")) {
- return contextAgent;
- }
-
- return mathAgent;
-};
-```
-
-### Tips for routing
-
-- Start simple with code-based routing
-- Use agent-based routing when you need flexibility
-- Remember that routers can access the network's state
-- You can return agents that weren't in the original network
-- The router runs after each agent call
-
-That's it! Routing is what makes networks powerful - it lets you build workflows that can be as simple or complex as you need.
diff --git a/pages/docs/agent-kit/ai-agents-tools.mdx b/pages/docs/agent-kit/ai-agents-tools.mdx
deleted file mode 100644
index 74bec8ae8..000000000
--- a/pages/docs/agent-kit/ai-agents-tools.mdx
+++ /dev/null
@@ -1,230 +0,0 @@
-import { Callout, GuideSelector, GuideSection, CodeGroup, VersionBadge } from "src/shared/Docs/mdx";
-
-# Agents and Agent Tools
-
-
- You can think of an Agent as a wrapper over a single model, with instructions and tools. Calling `.run` on an Agent
- passes the system prompt, tools, and input to the Agent's model.
-
-
-Agents are the core of AgentKit. An Agent is used to call a single model with a system prompt and, optionally, set of tools. The an agent’s additional properties like a name, description, and lifecycle hooks make calls more powerful and composable. An Agent has the following structure:
-
-- `name`: the name of the agent shown in tracing
-- `description`: an optional description for the agent, used for LLM-based routing to help the network pick which agent to run next
-- `system`: the system prompt, as a string or function. Functions let you change prompts based off of state and memory
-- `tools`: a set of tools that this agent has access to
-
-
-## Understanding how an agent makes calls
-
-Here’s a simple agent, which makes a single model call using system prompts and user input:
-
-```jsx
-import { Agent, agenticOpenai as openai, createAgent } from "@inngest/agent-kit";
-
-const agent = createAgent({
- name: "Code writer",
- system: "You are an expert TypeScript programmer. Given a set of asks, you think step-by-step to plan clean, " +
- "idiomatic TypeScript code, with comments and tests as necessary." +
- "Do not respond with anything else other than the following XML tags:" +
- "- If you would like to write code, add all code within the following tags (replace $filename and $contents appropriately):" +
- " $contents",
- model: openai("gpt-4o-mini"),
-});
-
-```
-
-You can run an agent individually. This creates a new inference request with its system prompt as the first message, and the input as the user message:
-
-```jsx
-// Run the agent:
-const { output } = await agent.run("Write a function that trims a string");
-
-// This is similar to:
-// const chatCompletion = await step.ai.infer("Code writer", {
-// model: openai("gpt-4o-mini"),
-// body: {
-// messages: [
-// { role: "system", content: "You are an expert..." },
-// { role: "user", content: "Write a function that trims a string" }
-// ],
-// },
-// });
-```
-
-Under the hood, the agent will call your model provider using an Inngest step. This gives you all of the benefits of Inngest: reliability, durability, automatic retries, and observability.
-
-## How agents work
-
-Agents themselves are relatively simple. When you call `run` on an agent, there are several steps that happen:
-
-1. The prompt is created from the system, input, and any *network state* (if the agent is part of a network)
- * The agent’s `onStart` lifecycle is called, if defined. This lets you modify the agent’s prompts before inference
-2. An inference is made as an Inngest step — with retries and durability built in. The inference result is parsed into an `InferenceResult` class, containing standardized messages, any tool call responses, and the raw API response in the format of your provider
- * The agent’s `onResponse` lifecycle is called with the result. This lets you modify and manage the result prior to calling tools
-4. If any tools were specified in the response, those tools are automatically called. The outputs are added to the result
- * The agent’s `onFinish` lifecycle is called with the new result. This lets you inspect the output of tool use
-5. The result is returned to the caller
-
-## Agent system prompts
-
-You can define an agent's system prompt as a string or as an async callback which can inspect network state and return custom instructions. This is useful in an agentic workflow: multiple models are called in a loop, impacting network state that can adjust prompts.
-
-Here's an example:
-
-```ts
-import { Agent, Network, agenticOpenai as openai, createAgent } from "@inngest/agent-kit";
-
-const systemPrompt =
- "You are an expert TypeScript programmer. Given a set of asks, think step-by-step to plan clean, " +
- "idiomatic TypeScript code, with comments and tests as necessary."
-
-const agent = createAgent({
- name: "Code writer",
-
- // description helps LLM routers choose the right agents to run.
- description: "An expert TypeScript programmer which can write and debug code",
-
- // system defines a system prompt. This function is called by the network each time
- // the agent runs, and allows you to customize the instructions based off of past state.
- system: async ({ network }) => {
- // Inspect the network state to see if we have any existing code saved as files.
- const files: Record | undefined = network?.state.kv.get("files")
- if (files === undefined) {
- return systemPrompt;
- }
-
- // There are files present in the network's state, so add them to the promp to help
- // provide previous context automatically.
- let prompt = systemPrompt + "The following code already exists:"
- for (const [name, contents] of Object.entries(record)) {
- prompt += `$contents`
- }
- return prompt;
- },
-});
-
-```
-
-
-## Agent tools
-
-
- Tools are a vital part of agents, and have two core uses:
-
- * Tools are exceptional at turn unstructured inputs into structured responses
- * Tools can call arbitrary code, allowing models to interact with other systems
-
-
-When you create an agent you can specify any number of tools that the agent can use. Tools follow the standard formats that OpenAI and Anthropic provide: a name and a description, plus typed parameters.
-
-In AgentKit, you also define a `handler` function which is called when the tool is invoked. Because AgentKit runs in the backend (on your own infrastructure) these handlers can run almost any code you define.
-
-
-### Complex agents with tools
-
-A more complex agent used in a network defines a description, lifecycle hooks, tools, and a dynamic set of instructions based off of network state:
-
-```ts
-import { Agent, Network, agenticOpenai as openai, createAgent } from "@inngest/agent-kit";
-
-const systemPrompt =
- "You are an expert TypeScript programmer. Given a set of asks, think step-by-step to plan clean, " +
- "idiomatic TypeScript code, with comments and tests as necessary."
-
-const agent = createAgent({
- name: "Code writer",
-
- // description helps LLM routers choose the right agents to run.
- description: "An expert TypeScript programmer which can write and debug code",
-
- // system defines a system prompt. This function is called by the network each time
- // the agent runs, and allows you to customize the instructions based off of past state.
- system: systemPrompt;
-
- // tools are provided to the model and are automatically called.
- tools: [
- // This tool forces the model to generate file content as structured data.
- // createTypedTool is a utility that forces typescript to strictly type the handler.
- createTypedTool({
- name: "write_files",
- description: "Write code with the given filenames",
- parameters: z.object({
- files: z.array(z.object({
- filename: z.string(),
- content: z.string(),
- })),
- }),
- handler: (output, { network }) => {
- // files is the output from the model's response in the format above.
- // Here, we store OpenAI's generated files in the response.
- const files = network?.state.kv.get("files") || {};
- for (const file of output.files) {
- files[file.filename] = file.content;
- }
- network?.state.kv.set("files", files);
- },
- }),
- ],
-});
-
-```
-
-Calling `.run` on this agent will pass the tools into your provider, allowing the model to select whether to run the `write_files` tool as a result. Tools are automatically called on your behalf.
-
-If the agent is part of a network, the agent’s inference calls are automatically added to the network’s state as memory, and the network’s state is used to adjust the prompt at any call. This is one way of building a complex network of agents, which learns as the network solves the problem.
-
-
- Networks manage shared state between a sequence of Agent calls, and allow you to manage Agent calls over time as
- state changes.
-
-
-### Step functions in tools
-
-AgentKit also exposes Inngest’s `step` tooling directly within tools. This lets you build complex step functions as tools, including human-in-the-loop tasks via `step.waitForEvent` or invoking other Inngest functions with `step.invoke`:
-
-```jsx
-createTypedTool({
- name: "request_refund_approval",
- description: "Request refund approval",
- parameters: z.array(z.object({
- refund_id: z.string(),
- }).required(),
- handler: async (output, { network, agent, step }) => {
- await step.run("request approval in slack", async () => {
- // XXX: Send a message in slack which has an "approve/reject" button.
- });
- // wait 1 hour for the approval
- const approval = await step.waitForEvent("wait for approval event", {
- event: "api/refund.approved",
- if: `async.data.refund_id == "${output.refund_id}"`,
- timeout: "1h",
- });
- if (approval === null) {
- // This was _not_ approved.
- return { approved: false };
- }
- return { approved: true };
- }
-})
-```
-
-This example shows how Inngest’s orchestration allows for long running, stateful agentic workflows. For more information on Inngest’s step tooling, read the documentation here.
-
-### Handler typing and reference
-
-AgentKit exposes a `createTypedTool` utility that forces the `output` parameter in a handler to be typed according to your parameter’s Zod schema:
-
-```jsx
-createTypedTool({
- name: "list_charges",
- description: "Returns all of a user's charges",
- parameters: z.array(z.object({
- id: z.string(),
- amount: z.number(),
- })),
- handler: async (output, { network, agent, step }) => {
- // output is strongly typed to match the parameter type.
- }
-})
-```
diff --git a/pages/docs/agent-kit/overview.mdx b/pages/docs/agent-kit/overview.mdx
deleted file mode 100644
index 8bdc91869..000000000
--- a/pages/docs/agent-kit/overview.mdx
+++ /dev/null
@@ -1,100 +0,0 @@
-import { Callout, GuideSelector, GuideSection, CodeGroup, VersionBadge } from "src/shared/Docs/mdx";
-
-# AgentKit overview
-
-
- This page introduces the APIs and concepts to AgentKit. AgentKit is in early access, and is improving
- on a daily basis.
-
-
-The AgentKit SDK lets developers build, test, and deploy reliable AI applications at scale — from single model calls to multi-agent workflows that use tools. Using the SDK lets you focus on AI code instead of technical details like orchestration, state, or infrastructure.
-
-Here’s how you can generate a single inference call using the SDK:
-
-```ts
-import { Agent, agenticOpenai as openai } from "@inngest/agent-kit";
-
-export default inngest.createFunction(
- { id: "summarizer" },
- { event: "api/post.created" },
- async ({ event, step }) => {
-
- const writer = new Agent({
- name: "writer",
- system: "You are an expert writer. You write readable, concise, simple content.",
- model: openai({ model: "gpt-4o", step }),
- });
-
- const { output } = await writer.run(
- "Describe the ideas behind the given input into clear topics, and explain any insight: " +
- `${event.data.input}`
- );
-
- },
-);
-```
-
-And here’s how you can create a network of agents, each of which has different tools and instructions, to complete complex tasks:
-
-```ts
-import { Network, agenticOpenai as openai } from "@inngest/agent-kit";
-import { navigator, classifier, summarizer } from "./src/agents";
-
-export default inngest.createFunction(
- { id: "summarizer" },
- { event: "api/summary.requested" },
- async ({ event, step }) => {
-
- // Create a network of agents with separate tasks and instructions
- // to solve // a specific task.
- const network = new Network({
- agents: [navigator, classifier, summarizer],
- defaultModel: openai({ model: "gpt-4o", step }),
- })
-
- const input = `Classify then summarize the latest 10 blog posts
- on https://www.deeplearning.ai/blog/`
-
- const result = await network.run(input, ({ network }) => {
- // Use an agent which figures out the specific agent to call
- // based off of the network's history.
- return defaultRoutingAgent;
- });
-
- },
-);
-```
-
-
-
-## Concepts
-
-It’s helpful to familiarize yourself with several concepts in order to effectively use AgentKit:
-
-### Agents
-
-An Agent is used to call a single model with a system prompt and a set of tools. When an agent runs, it calls the model passing in the prompt, user input, and any tools. Depending on the response, the agent will automatically call tools and return a standardized output. Agents can be run individually or combined into a Network of Agents which can work together to achieve more complex goals.
-
-[Learn more about agents](/docs/agent-kit/ai-agents-tools)
-
-### Networks
-
-A network is a group of agents which can work together using shared state to solve complex tasks. Networks iteratively call individual agents and their tools until the task is complete, using a router to determine the best next step. This lets you solve tasks in ways that may be hard with a single LLM request.
-
-[Learn more about networks](/docs/agent-kit/ai-agent-network-state-routing)
-
-### Network state
-
-In a network, there’s typically more than one inference call. The network stores state, which includes the *memory* of all past inference calls and a key-value store for *facts, thoughts, and observations* returned in each call. State allows you to transfer reasoning from one agent to another during *routing*, and allows you to complete complex tasks.
-
-[Learn more about network state](/docs/agent-kit/ai-agent-network-state-routing#network-state)
-
-### Network Routers
-
-A network calls different agents, many times, in a loop. The router helps determine which agent should be called next, based off of the current network state, the input, and the available agents. Examples of routers are:
-
-- Callback code which inspects state and returns agents (supervised networks)
-- Another agent which inspects state, other available agents in the network, then returns another agent it recommends next (fully autonomous networks)
-- Or a mixture of code and routing agents (semi-autonomous networks)
-
-[Learn more about network state](/docs/agent-kit/ai-agent-network-state-routing#network-routers)
diff --git a/pages/docs/features/inngest-functions/steps-workflows/step-ai-orchestration.mdx b/pages/docs/features/inngest-functions/steps-workflows/step-ai-orchestration.mdx
index 8a4db1e3f..8f0134b62 100644
--- a/pages/docs/features/inngest-functions/steps-workflows/step-ai-orchestration.mdx
+++ b/pages/docs/features/inngest-functions/steps-workflows/step-ai-orchestration.mdx
@@ -4,15 +4,15 @@ import { Callout, GuideSelector, GuideSection, CodeGroup, VersionBadge } from "s
You can build complex AI workflows and call model providers as steps using two step methods, `step.ai.infer()` and `step.ai.wrap()`, or our AgentKit SDK. They work with any model provider, and all offer full AI observability:
-- [AgentKit](/docs/agent-kit/overview) allows you to easily create single model calls or agentic workflows. Read the AgentKit docs here
+- [AgentKit](https://agentkit.inngest.com) allows you to easily create single model calls or agentic workflows. Read the AgentKit docs here
- `step.ai.wrap()` wraps other AI SDKs (OpenAI, Anthropic, and Vercel AI SDK) as a step, augmenting the observability of your Inngest Functions with information such as prompts and tokens used.
- `step.ai.infer()` offloads the inference request to Inngest's infrastructure, pausing your function execution until the request finishes. This can be a significant cost saver if you deploy to serverless functions
### Benefits
-Using [AgentKit](/docs/agent-kit/overview) and `step.ai` allows you to:
+Using [AgentKit](https://agentkit.inngest.com) and `step.ai` allows you to:
-- Automatically monitor AI usage in production to ensure quality output
+- Automatically monitor AI usage in production to ensure quality output
- Easily iterate and test prompts in the dev server
- Track requests and responses from foundational inference providers
- Track how inference calls work together in multi-step or agentic workflows
@@ -26,7 +26,7 @@ Using [AgentKit](/docs/agent-kit/overview) and `step.ai` allows you to:
**In TypeScript, we strongly recommend using AgentKit, our AI SDK which adds multiple AI capabilities to Inngest.** AgentKit allows you to call single-shot inference APIs with a simple self-documenting class, and also allows you to create semi or fully autonomous agent workflows using a network of agents.
- [AgentKit GitHub repo](https://github.com/inngest/agent-kit)
- - [AgentKit docs](//docs/agent-kit/overview)
+ - [AgentKit docs](/https://agentkit.inngest.com)
## AgentKit: AI and agent orchestration
@@ -60,7 +60,7 @@ export default inngest.createFunction(
```
-[Read the full AgentKit docs here](/docs/agent-kit/overview) and [see the code on GitHub](https://github.com/inngest/agent-kit).
+[Read the full AgentKit docs here](https://agentkit.inngest.com) and [see the code on GitHub](https://github.com/inngest/agent-kit).
## Step tools: `step.ai`
@@ -116,7 +116,7 @@ export default inngest.createFunction(
{ id: "summarize-contents" },
{ event: "app/ticket.created" },
async ({ event, step }) => {
-
+
// This calls `generateText` with the given arguments, adding AI observability,
// metrics, datasets, and monitoring to your calls.
const { text } = await step.ai.wrap("using-vercel-ai", generateText, {
@@ -135,7 +135,7 @@ export default inngest.createFunction(
{ id: "summarize-contents" },
{ event: "app/ticket.created" },
async ({ event, step }) => {
-
+
// This calls `generateText` with the given arguments, adding AI observability,
// metrics, datasets, and monitoring to your calls.
const result = await step.ai.wrap("using-anthropic", anthropic.messages.create, {
diff --git a/shared/Docs/Navigation.tsx b/shared/Docs/Navigation.tsx
index 47a2965db..53d686be8 100644
--- a/shared/Docs/Navigation.tsx
+++ b/shared/Docs/Navigation.tsx
@@ -11,6 +11,7 @@ import Link from "next/link";
import { useRouter } from "next/router";
import clsx from "clsx";
import { AnimatePresence, m, motion, useIsPresent } from "framer-motion";
+import { RiExternalLinkLine } from "@remixicon/react";
// import { Button } from "./Button";
import { Button } from "../Button";
@@ -116,11 +117,13 @@ function NavLink({
target?: string;
children: React.ReactNode;
}) {
+ const isExternal = target === "_blank" || href.match(/^https?:\/\//);
+ const linkTarget = target ?? href.match(/^https?:\/\//) ? "_blank" : null;
return (
{!isAnchorLink && }
-
{children}
{tag && (
{tag}
)}
+ {isExternal && }
);
}
@@ -343,7 +346,7 @@ function NavigationGroup({
>
{group.title}
- {nestingLevel > 0 && }
+ {nestingLevel > 0 && }
diff --git a/shared/Docs/navigationStructure.ts b/shared/Docs/navigationStructure.ts
index 57ed568e5..94f1127eb 100644
--- a/shared/Docs/navigationStructure.ts
+++ b/shared/Docs/navigationStructure.ts
@@ -33,7 +33,7 @@ export type NavSection = NavLink & {
}[];
};
-const sectionReference: NavGroup[] = [
+const sectionReference: (NavGroup | NavLink)[] = [
{
title: "Workflow Kit",
links: [
@@ -319,21 +319,11 @@ const sectionReference: NavGroup[] = [
},
{
title: "Go SDK",
- links: [
- {
- title: "Documentation",
- href: "https://pkg.go.dev/github.com/inngest/inngestgo",
- },
- ],
+ href: "https://pkg.go.dev/github.com/inngest/inngestgo",
},
{
title: "REST API",
- links: [
- {
- title: "Documentation",
- href: "https://api-docs.inngest.com/docs/inngest-api/1j9i5603g5768-introduction",
- },
- ],
+ href: "https://api-docs.inngest.com/docs/inngest-api/1j9i5603g5768-introduction",
},
{
title: "System events",
@@ -816,35 +806,12 @@ const sectionHome: (NavGroup | NavLink)[] = [
],
},
{
- title: "AgentKit",
+ title: "AI",
links: [
{
- title: "AgentKit Foundations",
- links: [
- {
- title: "Overview",
- href: "/docs/agent-kit/overview",
- },
- /*
- {
- title: "Getting started",
- href: "/docs/agent-kit/getting-started",
- },
- */
- ],
- },
- {
- title: "Build with AgentKit",
- links: [
- {
- title: "Agents and Tools",
- href: "/docs/agent-kit/ai-agents-tools",
- },
- {
- title: "Networks, state, and routing",
- href: "/docs/agent-kit/ai-agent-network-state-routing",
- },
- ],
+ title: "AgentKit",
+ href: "https://agentkit.inngest.com",
+ target: "_blank",
},
],
},