From d3439dc8d29b41ab55624c7a0e870cb1e4522ba2 Mon Sep 17 00:00:00 2001 From: Matt Gregerson Date: Wed, 10 Apr 2024 15:19:52 -0400 Subject: [PATCH] Updated to match current design on Mintlify. Ensured all links are pointing to the correct page and not breaking. --- .../prompt-management/fetching-prompts.mdx | 4 +- .../prompt-management/prompt-registry.mdx | 4 +- .../prompt-management/quickstart.mdx | 10 +- fern/pages/openllmetry/integrations/axiom.mdx | 2 +- .../integrations/azure-insights.mdx | 2 +- .../openllmetry/integrations/datadog.mdx | 2 +- .../openllmetry/integrations/dynatrace.mdx | 14 +- .../openllmetry/integrations/grafana.mdx | 22 +-- .../openllmetry/integrations/honeycomb.mdx | 2 +- .../openllmetry/integrations/hyperdx.mdx | 2 +- .../openllmetry/integrations/instana.mdx | 10 +- .../openllmetry/integrations/new-relic.mdx | 2 +- .../integrations/opentelemetry-collector.mdx | 2 +- .../openllmetry/integrations/overview.mdx | 2 +- .../pages/openllmetry/integrations/sentry.mdx | 2 +- .../openllmetry/integrations/service-now.mdx | 2 +- .../pages/openllmetry/integrations/signoz.mdx | 2 +- .../pages/openllmetry/integrations/splunk.mdx | 16 +- .../openllmetry/integrations/traceloop.mdx | 2 +- .../prompts-completions-embeddings.mdx | 2 +- fern/pages/openllmetry/privacy/telemetry.mdx | 2 +- fern/pages/openllmetry/quickstart/go.mdx | 15 +- fern/pages/openllmetry/quickstart/next.mdx | 142 +++++++------- fern/pages/openllmetry/quickstart/node.mdx | 18 +- fern/pages/openllmetry/quickstart/python.mdx | 14 +- fern/pages/openllmetry/quickstart/ruby.mdx | 12 +- .../quickstart/sdk-initialization.mdx | 7 +- .../tracing/workflows-tasks-agents-tools.mdx | 182 +++++++++--------- 28 files changed, 241 insertions(+), 257 deletions(-) diff --git a/fern/pages/documentation/prompt-management/fetching-prompts.mdx b/fern/pages/documentation/prompt-management/fetching-prompts.mdx index 91beae8..2e99d4b 100644 --- a/fern/pages/documentation/prompt-management/fetching-prompts.mdx +++ b/fern/pages/documentation/prompt-management/fetching-prompts.mdx @@ -12,7 +12,7 @@ To disable polling all together, set the `TRACELOOP_SYNC_ENABLED` environment va Make sure you’ve configured the SDK with the right environment and API Key. See the [SDK documentation](/docs/openllmetry/integrations/traceloop) for more information. - + The SDK uses smart caching mechanisms to provide zero latency for fetching prompts. @@ -67,6 +67,6 @@ Then, you can retrieve it with in your code using `get_prompt`: - + The returned variable `prompt_args` is compatible with the API used by the foundation models SDKs (OpenAI, Anthropic, etc.) which means you should directly plug in the response to the appropriate API call. \ No newline at end of file diff --git a/fern/pages/documentation/prompt-management/prompt-registry.mdx b/fern/pages/documentation/prompt-management/prompt-registry.mdx index 649a05b..c880037 100644 --- a/fern/pages/documentation/prompt-management/prompt-registry.mdx +++ b/fern/pages/documentation/prompt-management/prompt-registry.mdx @@ -16,7 +16,7 @@ The prompt configuration is composed of two parts: - The model configuration (`temperature`, `top_p`, etc.) -Your prompt template can include variables. Variables are defined according to the syntax of the parser specified. For example, if using `jinjia2` the syntax will be `{{ variable_name }}`. You can then pass variable values to the SDK when calling `get_prompt`. See the example on the [SDK Usage](/fetching-prompts) section. +Your prompt template can include variables. Variables are defined according to the syntax of the parser specified. For example, if using `jinjia2` the syntax will be `{{ variable_name }}`. You can then pass variable values to the SDK when calling `get_prompt`. See the example on the [SDK Usage](/docs/documentation/prompt-management/fetching-prompts) section. Initially, prompts are created in `Draft Mode`. In this mode, you can make changes to the prompt and configuration. You can also test your prompt in the playground (see below). @@ -53,7 +53,7 @@ Here, you can see all recent prompt versions, and which environments they are de As a safeguard, you cannot deploy a prompt to the `Staging` environment before first deploying it to `Development`. Similarly, you cannot deploy to `Production` without first deploying to `Staging`. -To fetch prompts from a specific environment, you must supply that environment’s API key to the Traceloop SDK. See the [SDK Configuration](/docs/openllmetry/integrations/traceloop) for details +To fetch prompts from a specific environment, you must supply that environment’s API key to the Traceloop SDK. See the [SDK Configuration](/openllmetry/integrations/traceloop) for details ## Prompt Versions diff --git a/fern/pages/documentation/prompt-management/quickstart.mdx b/fern/pages/documentation/prompt-management/quickstart.mdx index 3254ca6..a9c1716 100644 --- a/fern/pages/documentation/prompt-management/quickstart.mdx +++ b/fern/pages/documentation/prompt-management/quickstart.mdx @@ -4,7 +4,7 @@ You can use Traceloop to manage your prompts and model configurations. That way you can easily experiment with different prompts, and rollout changes gradually and safely. - + Make sure you’ve created an API key and set it as an environment variable `TRACELOOP_API_KEY` before you start. Check out the SDK’s [getting started guide](/docs/openllmetry/quick-start/python) for more information. @@ -17,9 +17,9 @@ Click **New Prompt** to create a new prompt. Give it a name, which will be used Set the system and/or user prompt. You can use variables in your prompt by following the [Jinja format](https://jinja.palletsprojects.com/en/3.1.x/templates/) of `{{ variable_name }}`. The values of these variables will be passed in when you retrieve the prompt in your code. -For more information see the [Registry Documentation](/prompt-registry). +For more information see the [Registry Documentation](/docs/documentation/prompt-management/prompt-registry). - + This screen is also a prompt playground. Give the prompt a try by clicking **Test** at the bottom. @@ -90,9 +90,9 @@ Retrieve your prompt by using the `get_prompt` function. For example, if you’v - + The returned variable `prompt_args` is compatible with the API used by the foundation models SDKs (OpenAI, Anthropic, etc.) which means you can directly plug in the response to the appropriate API call. -For more information see the [SDK Usage Documentation](/fetching-prompts). +For more information see the [SDK Usage Documentation](/docs/documentation/prompt-management/fetching-prompts). \ No newline at end of file diff --git a/fern/pages/openllmetry/integrations/axiom.mdx b/fern/pages/openllmetry/integrations/axiom.mdx index ba52d83..61f7a34 100644 --- a/fern/pages/openllmetry/integrations/axiom.mdx +++ b/fern/pages/openllmetry/integrations/axiom.mdx @@ -1,5 +1,5 @@ --- -excerpt: LLM Observability with Axiom and OpenLLMetry +title: LLM Observability with Axiom and OpenLLMetry --- diff --git a/fern/pages/openllmetry/integrations/azure-insights.mdx b/fern/pages/openllmetry/integrations/azure-insights.mdx index 61407f0..48b7e76 100644 --- a/fern/pages/openllmetry/integrations/azure-insights.mdx +++ b/fern/pages/openllmetry/integrations/azure-insights.mdx @@ -1,5 +1,5 @@ --- -excerpt: Azure Application Insights +title: Azure Application Insights --- Traceloop supports sending traces to Azure Application Insights via standard OpenTelemetry integrations. diff --git a/fern/pages/openllmetry/integrations/datadog.mdx b/fern/pages/openllmetry/integrations/datadog.mdx index fc92e25..ad4d2b5 100644 --- a/fern/pages/openllmetry/integrations/datadog.mdx +++ b/fern/pages/openllmetry/integrations/datadog.mdx @@ -1,5 +1,5 @@ --- -excerpt: LLM Observability with Datadog and OpenLLMetry +title: LLM Observability with Datadog and OpenLLMetry --- With datadog, there are 2 options - you can either export directly to a Datadog Agent in your cluster, or through an OpenTelemetry Collector (which requires that you deploy one in your cluster). diff --git a/fern/pages/openllmetry/integrations/dynatrace.mdx b/fern/pages/openllmetry/integrations/dynatrace.mdx index 12f5eb4..94f7c9d 100644 --- a/fern/pages/openllmetry/integrations/dynatrace.mdx +++ b/fern/pages/openllmetry/integrations/dynatrace.mdx @@ -1,27 +1,27 @@ --- -excerpt: LLM Observability with Dynatrace and OpenLLMetry +title: LLM Observability with Dynatrace and OpenLLMetry --- ![integrations-dynatrace](https://fern-image-hosting.s3.amazonaws.com/traceloop/integrations-dynatrace.png) -Analyze all collected LLM traces within Dynatrace by using the native OpenTelemetry ingest endpoint of your Dynatrace environment. +- Analyze all collected LLM traces within Dynatrace by using the native OpenTelemetry ingest endpoint of your Dynatrace environment. -Go to your Dynatrace environment and create a new access token under **Manage Access Tokens**. +- Go to your Dynatrace environment and create a new access token under **Manage Access Tokens**. -The access token needs the following permission scopes that allow the ingest of OpenTelemetry spans, metrics and logs (`openTelemetryTrace.ingest`, `metrics.ingest`, `logs.ingest`). +- The access token needs the following permission scopes that allow the ingest of OpenTelemetry spans, metrics and logs (`openTelemetryTrace.ingest`, `metrics.ingest`, `logs.ingest`). -Set `TRACELOOP_BASE_URL` environment variable to the URL of your Dynatrace OpenTelemetry ingest endpoint. +- Set `TRACELOOP_BASE_URL` environment variable to the URL of your Dynatrace OpenTelemetry ingest endpoint. ``` TRACELOOP_BASE_URL=https://.live.dynatrace.com\api\v2\otlp ``` -Set the `TRACELOOP_HEADERS` environment variable to include your previously created access token +- Set the `TRACELOOP_HEADERS` environment variable to include your previously created access token ``` TRACELOOP_HEADERS=Authorization=Api-Token%20 ``` -You're all set! All the exported spans along with their span attributes will show up within the Dynatrace trace view. \ No newline at end of file +- You're all set! All the exported spans along with their span attributes will show up within the Dynatrace trace view. \ No newline at end of file diff --git a/fern/pages/openllmetry/integrations/grafana.mdx b/fern/pages/openllmetry/integrations/grafana.mdx index 2202ca6..0dca010 100644 --- a/fern/pages/openllmetry/integrations/grafana.mdx +++ b/fern/pages/openllmetry/integrations/grafana.mdx @@ -1,24 +1,8 @@ --- -excerpt: LLM Observability with Grafana and OpenLLMetry +title: LLM Observability with Grafana and OpenLLMetry --- -## Access Grafana Cloud Account for Tempo Integration - -Go to the Grafana Cloud account page under `https://grafana.com/orgs/`, and click on **Send Traces** under Tempo - -### Retrieve URL from Grafana Data Source Settings - -In **Grafana Data Source settings**, note the **URL** value - -### Generate API Key for Tempo Integration - -Click **Generate now** to generate an API key and copy it - -### Record Stack ID for Integration Configuration - -Note also the **Stack ID** value. - -You can find it in the URL `https://grafana.com/orgs//stacks/`. +First, go to the Grafana Cloud account page under `https://grafana.com/orgs/`, and click on **Send Traces** under Tempo. In **Grafana Data Source settings**, note the **URL** value. Click **Generate now** to generate an API key and copy it. Note also the **Stack ID** value. You can find it in the URL `https://grafana.com/orgs//stacks/`. ## With Grafana Agent @@ -41,7 +25,7 @@ traces: grpc: ``` - + Note the endpoint. The URL you need to use is without `https` and the trailing `/`. So `https://tempo-us-central1.grafana.net/tempo` should be used as `tempo-us-central1.grafana.net:443`. diff --git a/fern/pages/openllmetry/integrations/honeycomb.mdx b/fern/pages/openllmetry/integrations/honeycomb.mdx index b9a7933..750b623 100644 --- a/fern/pages/openllmetry/integrations/honeycomb.mdx +++ b/fern/pages/openllmetry/integrations/honeycomb.mdx @@ -1,5 +1,5 @@ --- -excerpt: LLM Observability with Honeycomb and OpenLLMetry +title: LLM Observability with Honeycomb and OpenLLMetry --- diff --git a/fern/pages/openllmetry/integrations/hyperdx.mdx b/fern/pages/openllmetry/integrations/hyperdx.mdx index f4fa595..3291a6b 100644 --- a/fern/pages/openllmetry/integrations/hyperdx.mdx +++ b/fern/pages/openllmetry/integrations/hyperdx.mdx @@ -1,5 +1,5 @@ --- -excerpt: LLM Observability with HyperDX and OpenLLMetry +title: LLM Observability with HyperDX and OpenLLMetry --- diff --git a/fern/pages/openllmetry/integrations/instana.mdx b/fern/pages/openllmetry/integrations/instana.mdx index 3b12f32..f894c86 100644 --- a/fern/pages/openllmetry/integrations/instana.mdx +++ b/fern/pages/openllmetry/integrations/instana.mdx @@ -1,5 +1,5 @@ --- -excerpt: LLM Observability with Instana and OpenLLMetry +title: LLM Observability with Instana and OpenLLMetry --- @@ -8,8 +8,6 @@ excerpt: LLM Observability with Instana and OpenLLMetry With Instana, you can export directly to an Instana Agent in your cluster. The Instana Agent will report back the tracing and metrics to the Instana Backend and display them on the Instana UI. -## Edit the agent config file - After an Instana OS agent is installed, edit the agent config file `configuration.yaml` under the `/opt/instana/agent/etc/instana folder`. ```bash @@ -17,7 +15,7 @@ cd /opt/instana/agent/etc/instana vi configuration.yaml ``` -## Add the following to the file +Add the following to the file: ```yaml com.instana.plugin.opentelemetry: @@ -26,7 +24,7 @@ com.instana.plugin.opentelemetry: enabled: true ``` -## Restart the Instana agent +Restart the Instana agent: ``` systemctl restart instana-agent.service @@ -34,8 +32,6 @@ systemctl restart instana-agent.service The Instana agent should be ready for OpenTelemetry data at `port 4317`. -## Set your TRACELOOP_BASE_URL variable - Finally, set this env var, and you’re done! ``` diff --git a/fern/pages/openllmetry/integrations/new-relic.mdx b/fern/pages/openllmetry/integrations/new-relic.mdx index 6c054f9..d04077a 100644 --- a/fern/pages/openllmetry/integrations/new-relic.mdx +++ b/fern/pages/openllmetry/integrations/new-relic.mdx @@ -1,5 +1,5 @@ --- -excerpt: LLM observability with New Relic and OpenLLMetry +title: LLM observability with New Relic and OpenLLMetry --- diff --git a/fern/pages/openllmetry/integrations/opentelemetry-collector.mdx b/fern/pages/openllmetry/integrations/opentelemetry-collector.mdx index 065810a..10ce608 100644 --- a/fern/pages/openllmetry/integrations/opentelemetry-collector.mdx +++ b/fern/pages/openllmetry/integrations/opentelemetry-collector.mdx @@ -1,5 +1,5 @@ --- -excerpt: LLM observability with OpenTelemetry Collector +title: LLM observability with OpenTelemetry Collector --- Since Traceloop is emitting standard OTLP HTTP (standard OpenTelemetry protocol), you can use any OpenTelemetry Collector, which gives you the flexibility to then connect to any backend you want. First, [deploy an OpenTelemetry Collector](https://opentelemetry.io/docs/kubernetes/operator/automatic/#create-an-opentelemetry-collector-optional) in your cluster. Then, point the output of the Traceloop SDK to the collector by setting: diff --git a/fern/pages/openllmetry/integrations/overview.mdx b/fern/pages/openllmetry/integrations/overview.mdx index 0ace824..b400b4d 100644 --- a/fern/pages/openllmetry/integrations/overview.mdx +++ b/fern/pages/openllmetry/integrations/overview.mdx @@ -17,7 +17,7 @@ Since Traceloop SDK is using OpenTelemetry under the hood, you can see everythin - + diff --git a/fern/pages/openllmetry/integrations/sentry.mdx b/fern/pages/openllmetry/integrations/sentry.mdx index c7eaff7..d32aefa 100644 --- a/fern/pages/openllmetry/integrations/sentry.mdx +++ b/fern/pages/openllmetry/integrations/sentry.mdx @@ -1,5 +1,5 @@ --- -excerpt: LLM observability with Sentry +title: LLM Observability with Sentry and OpenLLMetry --- ## Install Sentry SDK with OpenTelemetry support diff --git a/fern/pages/openllmetry/integrations/service-now.mdx b/fern/pages/openllmetry/integrations/service-now.mdx index 3c01cd5..c4f73bf 100644 --- a/fern/pages/openllmetry/integrations/service-now.mdx +++ b/fern/pages/openllmetry/integrations/service-now.mdx @@ -1,5 +1,5 @@ --- -excerpt: LLM Observability with Service Now Cloud Observability and OpenLLMetry +title: LLM Observability with Service Now Cloud Observability and OpenLLMetry --- diff --git a/fern/pages/openllmetry/integrations/signoz.mdx b/fern/pages/openllmetry/integrations/signoz.mdx index 6acdea1..6043a54 100644 --- a/fern/pages/openllmetry/integrations/signoz.mdx +++ b/fern/pages/openllmetry/integrations/signoz.mdx @@ -1,5 +1,5 @@ --- -excerpt: LLM Observability with SigNoz and OpenLLMetry +title: LLM Observability with SigNoz and OpenLLMetry --- diff --git a/fern/pages/openllmetry/integrations/splunk.mdx b/fern/pages/openllmetry/integrations/splunk.mdx index b0c0763..21903d6 100644 --- a/fern/pages/openllmetry/integrations/splunk.mdx +++ b/fern/pages/openllmetry/integrations/splunk.mdx @@ -1,5 +1,5 @@ --- -excerpt: LLM Observability with Splunk and OpenLLMetr +excerpt: LLM Observability with Splunk and OpenLLMetry --- @@ -8,8 +8,6 @@ excerpt: LLM Observability with Splunk and OpenLLMetr Collecting and analyzing LLM traces in [Splunk Observability Cloud](https://www.splunk.com/en_us/products/observability.html) can be achieved by configuring the `TRACELOOP_BASE_URL` environment variable to point to the [Splunk OpenTelemetry Collector](https://github.com/signalfx/splunk-otel-collector/releases) OTLP endpoint. -## Configure Collector for OTLP Reception - Have the Collector run in agent or gateway mode and ensure the OTLP receiver is configured, see [Get data into Splunk Observability Cloud](https://docs.splunk.com/observability/en/gdi/get-data-in/get-data-in.html). ```yaml @@ -22,9 +20,7 @@ receivers: endpoint: "0.0.0.0:4318" ``` -## Set Up OTLP Exporter for Splunk Cloud - -Ensure the OTLP exporter is configured to send to Splunk Observability Cloud: +Secondly, ensure the OTLP exporter is configured to send to Splunk Observability Cloud: ```yaml exporters: @@ -36,9 +32,7 @@ exporters: num_consumers: 32 ``` -## Integrate OTLP in Traces Pipeline - -Make sure otlp is defined in the traces pipeline: +Thirdly, make sure otlp is defined in the traces pipeline: ```yaml pipelines: @@ -51,9 +45,7 @@ Make sure otlp is defined in the traces pipeline: exporters: [sapm] ``` -## Define Endpoint Environment Variable - -Define the `TRACELOOP_BASE_URL` environment variable to point to the Splunk OpenTelemetry Collector OTLP endpoint: +Finally, define the `TRACELOOP_BASE_URL` environment variable to point to the Splunk OpenTelemetry Collector OTLP endpoint: ``` TRACELOOP_BASE_URL=http://:4318 diff --git a/fern/pages/openllmetry/integrations/traceloop.mdx b/fern/pages/openllmetry/integrations/traceloop.mdx index 3c88ed1..7296d59 100644 --- a/fern/pages/openllmetry/integrations/traceloop.mdx +++ b/fern/pages/openllmetry/integrations/traceloop.mdx @@ -1,5 +1,5 @@ --- -excerpt: LLM Observability with Traceloop +title: LLM Observability with Traceloop --- diff --git a/fern/pages/openllmetry/privacy/prompts-completions-embeddings.mdx b/fern/pages/openllmetry/privacy/prompts-completions-embeddings.mdx index ad576cd..c395ad9 100644 --- a/fern/pages/openllmetry/privacy/prompts-completions-embeddings.mdx +++ b/fern/pages/openllmetry/privacy/prompts-completions-embeddings.mdx @@ -54,7 +54,7 @@ You can decide to selectively enable or disable prompt logging for specific user ### Using the Traceloop Platform -We have an API to enable content tracing for specific users, as defined by [association entities](/docs/openllmetry/tracing/associating-entities-with-traces). See the [Traceloop API documentation](/dashboard-api/endpoints) for more information. +We have an API to enable content tracing for specific users, as defined by [association entities](/docs/openllmetry/tracing/associating-entities-with-traces). See the [Traceloop API documentation](/docs/dashboard-api/endpoints) for more information. ### Without the Traceloop Platform diff --git a/fern/pages/openllmetry/privacy/telemetry.mdx b/fern/pages/openllmetry/privacy/telemetry.mdx index c183ee8..aaa5927 100644 --- a/fern/pages/openllmetry/privacy/telemetry.mdx +++ b/fern/pages/openllmetry/privacy/telemetry.mdx @@ -1,6 +1,6 @@ OpenLLMetry contains a telemetry feature that collects anonymous usage information. - + Not to be confused with OpenTelemetry. Telemetry refers to anonymous product usage statistics we collect. It is a completely different stream of data, and is not related to OpenTelemetry, traces, or instrumentations. diff --git a/fern/pages/openllmetry/quickstart/go.mdx b/fern/pages/openllmetry/quickstart/go.mdx index 2d696a2..1a2d3fc 100644 --- a/fern/pages/openllmetry/quickstart/go.mdx +++ b/fern/pages/openllmetry/quickstart/go.mdx @@ -2,8 +2,9 @@ excerpt: Install OpenLLMetry for Go by following these 3 easy steps and get instant monitoring. --- + -## Install the SDK +### Install the SDK Run the following command in your terminal: @@ -27,7 +28,7 @@ func main() { } ``` -## Log your prompts +### Log your prompts ![openllmetry-go](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-go.png) @@ -95,13 +96,13 @@ func call_llm() { ``` -## Configure Trace Exporting +### Configure Trace Exporting Lastly, you’ll need to configure where to export your traces. The 2 environment variables controlling this are `TRACELOOP_API_KEY` and `TRACELOOP_BASE_URL`. -For Traceloop, read on. For other options, see [Exporting](/docs/openllmetry/integrations/overview). +For Traceloop, read on. For other options, see [Exporting](/openllmetry/integrations/overview). -## Using Traceloop Cloud +

Using Traceloop Cloud

Go to [Traceloop](https://app.traceloop.com/), and create a new account. Then, click on **Environments** on the left-hand navigation bar. Or go to directly to https://app.traceloop.com/settings/api-keys. Click **Generate API Key** to generate an API key for the development environment and click **Copy API Key** to copy it over. @@ -115,4 +116,6 @@ Make sure to copy it as it won’t be shown again. Set the copied Traceloop’s API key as an environment variable in your app named `TRACELOOP_API_KEY`. -You're all set! You’ll get instant visibility into everything that’s happening with your LLM. If you’re calling a vector DB, or any other external service or database, you’ll also see it in the Traceloop dashboard. \ No newline at end of file +You're all set! You’ll get instant visibility into everything that’s happening with your LLM. If you’re calling a vector DB, or any other external service or database, you’ll also see it in the Traceloop dashboard. + +
\ No newline at end of file diff --git a/fern/pages/openllmetry/quickstart/next.mdx b/fern/pages/openllmetry/quickstart/next.mdx index b3cac67..6caf493 100644 --- a/fern/pages/openllmetry/quickstart/next.mdx +++ b/fern/pages/openllmetry/quickstart/next.mdx @@ -4,7 +4,8 @@ excerpt: Install OpenLLMetry for Next.js by following these 3 easy steps and get You can check out our full working example with Next.js 13 [here](https://github.com/traceloop/openllmetry-nextjs-demo). -## Install the SDK + +### Install the SDK Run the following command in your terminal: @@ -26,8 +27,8 @@ Run the following command in your terminal: - -### With Pages Router + + Create a file named `instrumentation.ts` in the root of your project (i.e., outside of the `pages` or `app` directory) and add the following code: ```javascript @@ -78,74 +79,75 @@ Run the following command in your terminal: See official Next.js [OpenTelemetry docs](https://nextjs.org/docs/pages/building-your-application/optimizing/open-telemetry) for more information. + + + Install the following packages by running the following commands in your terminal: + + + + ```bash + npm install --save-dev node-loader + npm i supports-color@8.1.1 + ``` + + + ```bash + pnpm add -D node-loader + pnpm add supports-color@8.1.1 + ``` + + + ```bash + yarn add -D node-loader + yarn add supports-color@8.1.1 + ``` + + + + Edit your `next.config.js` file and add the following webpack configuration: -### With App Router + ```javascript + const nextConfig = { + webpack: (config, { isServer }) => { + config.module.rules.push({ + test: /\.node$/, + loader: "node-loader", + }); + if (isServer) { + config.ignoreWarnings = [{ module: /opentelemetry/ }]; + } + return config; + }, + }; + ``` -Install the following packages by running the following commands in your terminal: + On every app API route you want to instrument, add the following code at the top of the file: - - - ```bash - npm install --save-dev node-loader - npm i supports-color@8.1.1 - ``` - - - ```bash - pnpm add -D node-loader - pnpm add supports-color@8.1.1 - ``` - - - ```bash - yarn add -D node-loader - yarn add supports-color@8.1.1 - ``` - - - -Edit your `next.config.js` file and add the following webpack configuration: + ```javascript + import * as traceloop from "@traceloop/node-server-sdk"; + import OpenAI from "openai"; + // Make sure to import the entire module you want to instrument, like this: + // import * as LlamaIndex from "llamaindex"; -```javascript -const nextConfig = { -webpack: (config, { isServer }) => { - config.module.rules.push({ - test: /\.node$/, - loader: "node-loader", + traceloop.initialize({ + appName: "app", + disableBatch: true, + instrumentModules: { + openAI: OpenAI, + // Add any other modules you'd like to instrument here + // for example: + // llamaIndex: LlamaIndex, + }, }); - if (isServer) { - config.ignoreWarnings = [{ module: /opentelemetry/ }]; - } - return config; -}, -}; -``` - -On every app API route you want to instrument, add the following code at the top of the file: - -```javascript -import * as traceloop from "@traceloop/node-server-sdk"; -import OpenAI from "openai"; -// Make sure to import the entire module you want to instrument, like this: -// import * as LlamaIndex from "llamaindex"; - -traceloop.initialize({ - appName: "app", - disableBatch: true, - instrumentModules: { - openAI: OpenAI, - // Add any other modules you'd like to instrument here - // for example: - // llamaIndex: LlamaIndex, - }, -}); -``` + ``` - -See official Next.js [OpenTelemetry docs](https://nextjs.org/docs/pages/building-your-application/optimizing/open-telemetry) for more information. - + + See official Next.js [OpenTelemetry docs](https://nextjs.org/docs/pages/building-your-application/optimizing/open-telemetry) for more information. + + + -## Annotate your workflows +### Annotate your workflows ![openllmetry-next](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-next.png) @@ -157,7 +159,7 @@ We have a set of [methods and decorators](/docs/openllmetry/tracing/workflows-ta We also have compatible Typescript decorators for class methods which are more convenient. - + If you’re using an LLM framework like Haystack, Langchain or LlamaIndex - we’ll do that for you. No need to add any annotations to your code. @@ -183,15 +185,15 @@ If you’re using an LLM framework like Haystack, Langchain or LlamaIndex - we -For more information, see the [dedicated section in the docs](/docs/openllmetry/tracing/workflows-tasks-agents-and-tools). +For more information, see the [dedicated section in the docs](/openllmetry/tracing/workflows-tasks-agents-and-tools). -## Configure Trace Exporting +### Configure Trace Exporting Lastly, you’ll need to configure where to export your traces. The 2 environment variables controlling this are `TRACELOOP_API_KEY` and `TRACELOOP_BASE_URL`. -For Traceloop, read on. For other options, see [Exporting](/docs/openllmetry/integrations/overview). +For Traceloop, read on. For other options, see [Exporting](/openllmetry/integrations/overview). -## Using Traceloop Cloud +

Using Traceloop Cloud

Go to [Traceloop](https://app.traceloop.com/), and create a new account. Then, click on **Environments** on the left-hand navigation bar. Or go to directly to https://app.traceloop.com/settings/api-keys. Click **Generate API Key** to generate an API key for the development environment and click **Copy API Key** to copy it over. @@ -206,4 +208,4 @@ Make sure to copy it as it won’t be shown again. Set the copied Traceloop’s API key as an environment variable in your app named `TRACELOOP_API_KEY`. You're all set! You’ll get instant visibility into everything that’s happening with your LLM. If you’re calling a vector DB, or any other external service or database, you’ll also see it in the Traceloop dashboard. - +
\ No newline at end of file diff --git a/fern/pages/openllmetry/quickstart/node.mdx b/fern/pages/openllmetry/quickstart/node.mdx index 71f08b9..2695148 100644 --- a/fern/pages/openllmetry/quickstart/node.mdx +++ b/fern/pages/openllmetry/quickstart/node.mdx @@ -2,11 +2,12 @@ excerpt: Install OpenLLMetry for Node.js by following these 3 easy steps and get instant monitoring. --- - -If you’re on Next.js, follow the [Next.js guide](/next). + +If you’re on Next.js, follow the [Next.js guide](/docs/openllmetry/quick-start/next-js). -## Install the SDK + +### Install the SDK Run the following command in your terminal: @@ -45,7 +46,7 @@ If you’re running this locally, you may want to disable batch sending, so you traceloop.initialize({ disableBatch: true }); ``` -## Annotate your workflows +### Annotate your workflows ![openllmetry-node](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-node.png) @@ -83,13 +84,13 @@ If you’re using an LLM framework like Haystack, Langchain or LlamaIndex - we -For more information, see the [dedicated section in the docs](/docs/openllmetry/tracing/workflows-tasks-agents-and-tools). +For more information, see the [dedicated section in the docs](/openllmetry/tracing/workflows-tasks-agents-and-tools). -## Configure Trace Exporting +### Configure Trace Exporting Lastly, you’ll need to configure where to export your traces. The 2 environment variables controlling this are `TRACELOOP_API_KEY` and `TRACELOOP_BASE_URL`. -For Traceloop, read on. For other options, see [Exporting](/docs/openllmetry/integrations/overview). +For Traceloop, read on. For other options, see [Exporting](/openllmetry/integrations/overview). ## Using Traceloop Cloud @@ -105,4 +106,5 @@ Make sure to copy it as it won’t be shown again. Set the copied Traceloop’s API key as an environment variable in your app named `TRACELOOP_API_KEY`. -You're all set! You’ll get instant visibility into everything that’s happening with your LLM. If you’re calling a vector DB, or any other external service or database, you’ll also see it in the Traceloop dashboard. \ No newline at end of file +You're all set! You’ll get instant visibility into everything that’s happening with your LLM. If you’re calling a vector DB, or any other external service or database, you’ll also see it in the Traceloop dashboard. + \ No newline at end of file diff --git a/fern/pages/openllmetry/quickstart/python.mdx b/fern/pages/openllmetry/quickstart/python.mdx index 6334c52..028c1fa 100644 --- a/fern/pages/openllmetry/quickstart/python.mdx +++ b/fern/pages/openllmetry/quickstart/python.mdx @@ -4,7 +4,8 @@ excerpt: Install OpenLLMetry for Python by following these 3 easy steps and get You can also check out our full working example of a RAG pipeline with Pinecone [here](https://github.com/traceloop/pinecone-demo). -## Install the SDK + +### Install the SDK Run the following command in your terminal: @@ -35,7 +36,7 @@ If you’re running this locally, you may want to disable batch sending, so you Traceloop.init(disable_batch=True) ``` -## Annotate your workflows +### Annotate your workflows ![openllmetry-python](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-python.png) @@ -45,7 +46,7 @@ If you have complex workflows or chains, you can annotate them to get a better u We have a set of [decorators](/docs/openllmetry/tracing/workflows-tasks-agents-and-tools) to make this easier. Assume you have a function that renders a prompt and calls an LLM, simply add `@workflow` (or for asynchronous methods - `@aworkflow`). - + If you’re using an LLM framework like Haystack, Langchain or LlamaIndex - we’ll do that for you. No need to add any annotations to your code. @@ -60,13 +61,12 @@ def suggest_answers(question: str): For more information, see the [dedicated section in the docs](/docs/openllmetry/tracing/workflows-tasks-agents-and-tools). -## Configure trace exporting +### Configure trace exporting Lastly, you’ll need to configure where to export your traces. The 2 environment variables controlling this are `TRACELOOP_API_KEY` and `TRACELOOP_BASE_URL`. For Traceloop, read on. For other options, see [Exporting](/docs/openllmetry/integrations/overview). - ## Using Traceloop Cloud Go to [Traceloop](https://app.traceloop.com/), and create a new account. Then, click on **Environments** on the left-hand navigation bar. Or go to directly to https://app.traceloop.com/settings/api-keys. Click **Generate API Key** to generate an API key for the developement environment and click **Copy API Key** to copy it over. @@ -81,4 +81,6 @@ Make sure to copy it as it won’t be shown again. Set the copied Traceloop’s API key as an environment variable in your app named `TRACELOOP_API_KEY`. -You're all set! You’ll get instant visibility into everything that’s happening with your LLM. If you’re calling a vector DB, or any other external service or database, you’ll also see it in the Traceloop dashboard. \ No newline at end of file +You're all set! You’ll get instant visibility into everything that’s happening with your LLM. If you’re calling a vector DB, or any other external service or database, you’ll also see it in the Traceloop dashboard. + + \ No newline at end of file diff --git a/fern/pages/openllmetry/quickstart/ruby.mdx b/fern/pages/openllmetry/quickstart/ruby.mdx index 4da7849..1379aa9 100644 --- a/fern/pages/openllmetry/quickstart/ruby.mdx +++ b/fern/pages/openllmetry/quickstart/ruby.mdx @@ -2,11 +2,12 @@ excerpt: Install OpenLLMetry for Ruby by following these 3 easy steps and get instant monitoring. --- - + This is still in beta. Give us feedback at [dev@traceloop.com](mailto:dev@traceloop.com) -## Install the SDK + +### Install the SDK Run the following command in your terminal: @@ -35,7 +36,7 @@ traceloop = Traceloop::SDK::Traceloop.new If you’re using Rails, this needs to be in `config/initializers/traceloop.rb` -## Log your prompts +### Log your prompts ![openllmetry-ruby](https://fern-image-hosting.s3.amazonaws.com/traceloop/openllmetry-ruby.png) @@ -67,13 +68,13 @@ traceloop.llm_call() do |tracer| end ``` -## Configure Trace Exporting +### Configure Trace Exporting Lastly, you’ll need to configure where to export your traces. The 2 environment variables controlling this are `TRACELOOP_API_KEY` and `TRACELOOP_BASE_URL`. For Traceloop, read on. For other options, see [Exporting](/docs/openllmetry/integrations/overview). -## Using Traceloop Cloud +

Using Traceloop Cloud

Go to [Traceloop](https://app.traceloop.com/), and create a new account. Then, click on **Environments** on the left-hand navigation bar. Or go to directly to https://app.traceloop.com/settings/api-keys. Click **Generate API Key** to generate an API key for the development environment and click **Copy API Key** to copy it over. @@ -88,3 +89,4 @@ Make sure to copy it as it won’t be shown again. Set the copied Traceloop’s API key as an environment variable in your app named `TRACELOOP_API_KEY`. You're all set! You’ll get instant visibility into everything that’s happening with your LLM. If you’re calling a vector DB, or any other external service or database, you’ll also see it in the Traceloop dashboard. + \ No newline at end of file diff --git a/fern/pages/openllmetry/quickstart/sdk-initialization.mdx b/fern/pages/openllmetry/quickstart/sdk-initialization.mdx index bef81c1..413813b 100644 --- a/fern/pages/openllmetry/quickstart/sdk-initialization.mdx +++ b/fern/pages/openllmetry/quickstart/sdk-initialization.mdx @@ -8,6 +8,8 @@ Most configuration options can be set via environment variables or via the SDK The SDK initialization options always take precedence over the environment variables.
+See below for the list of options. + ## Application Name You can customize the application name that will be logged with the traces. This is useful to identify if you have multiple services with OpenLLMetry installed. @@ -237,7 +239,4 @@ from traceloop.sdk.instruments import Instruments Traceloop.init(instruments={Instruments.OPENAI, Instruments.PINECONE}) ``` - - - - + \ No newline at end of file diff --git a/fern/pages/openllmetry/tracing/workflows-tasks-agents-tools.mdx b/fern/pages/openllmetry/tracing/workflows-tasks-agents-tools.mdx index 2423342..c159995 100644 --- a/fern/pages/openllmetry/tracing/workflows-tasks-agents-tools.mdx +++ b/fern/pages/openllmetry/tracing/workflows-tasks-agents-tools.mdx @@ -16,7 +16,8 @@ If you’re using a framework like Langchain, Haystack or LlamaIndex - no need t Sometimes called a “chain”, intended for a multi-step process that can be traced as a single unit. -### Python + + Use it as `@workflow(name="my_workflow")` or `@task(name="my_task")`. @@ -56,57 +57,9 @@ def joke_workflow(): print(pirate_joke + "\n\n" + signature) ``` - -### Typescript/Javascript - -Use it as `withWorkflow("my_workflow", {}, () => ...)` or `withTask(name="my_task", () => ...)`. The function passed to `withWorkflow` or `withTask` will be part of the workflow or task and can be async or sync. - -```Typescript -import * as traceloop from "@traceloop/node-server-sdk"; - -async function create_joke() { - return await traceloop.withTask({ name: "joke_creation" }, async () => { - completion = await openai.chat.completions({ - model: "gpt-3.5-turbo", - messages: [ - { role: "user", content: "Tell me a joke about opentelemetry" }, - ], - }); - - return completion.choices[0].message.content; - }); -} - -async function generate_signature(joke: string) { - return await traceloop.withTask( - { name: "signature_generation" }, - async () => { - completion = await openai.completions.create({ - model: "davinci-002", - prompt: "add a signature to the joke:\n\n" + joke, - }); - - return completion.choices[0].text; - } - ); -} - -async function joke_workflow() { - return await traceloop.withWorkflow( - { name: "pirate_joke_generator" }, - async () => { - eng_joke = create_joke(); - pirate_joke = await translate_joke_to_pirate(eng_joke); - signature = await generate_signature(pirate_joke); - console.log(pirate_joke + "\n\n" + signature); - } - ); -} -``` - -### Typescript with decorators - - + + + This feature is only available in Typescript. Unless you’re on Nest.js, you’ll need to update your `tsconfig.json` to enable decorators. @@ -122,7 +75,7 @@ Update `tsconfig.json` to enable decorators: Use it in your code `@traceloop.workflow({ name: "my_workflow" })` for class methods only. We will consider every call to OpenAI as a distinct step (or task). You can even annotate the task with a name, using `@traceloop.task("my_task")`. - + The `name` argument is optional. If you don’t provide it, we will use the function name as the workflow or task name. @@ -161,11 +114,59 @@ class JokeCreation { } } ``` + + +Use it as `withWorkflow("my_workflow", {}, () => ...)` or `withTask(name="my_task", () => ...)`. The function passed to `withWorkflow` or `withTask` will be part of the workflow or task and can be async or sync. -## Agents and Tools +```Typescript +import * as traceloop from "@traceloop/node-server-sdk"; + +async function create_joke() { + return await traceloop.withTask({ name: "joke_creation" }, async () => { + completion = await openai.chat.completions({ + model: "gpt-3.5-turbo", + messages: [ + { role: "user", content: "Tell me a joke about opentelemetry" }, + ], + }); + + return completion.choices[0].message.content; + }); +} + +async function generate_signature(joke: string) { + return await traceloop.withTask( + { name: "signature_generation" }, + async () => { + completion = await openai.completions.create({ + model: "davinci-002", + prompt: "add a signature to the joke:\n\n" + joke, + }); + + return completion.choices[0].text; + } + ); +} + +async function joke_workflow() { + return await traceloop.withWorkflow( + { name: "pirate_joke_generator" }, + async () => { + eng_joke = create_joke(); + pirate_joke = await translate_joke_to_pirate(eng_joke); + signature = await generate_signature(pirate_joke); + console.log(pirate_joke + "\n\n" + signature); + } + ); +} +``` + + -### Python +## Agents and Tools + + Similarily, if you use autonomous agents, you can use the `@agent` decorator to trace them as a single unit. Each tool should be marked with `@tool`. ```python @@ -195,43 +196,11 @@ def history_jokes_tool(): return completion.choices[0].message.content ``` - -### Javascript/Typescript - -Similarily, if you use autonomous agents, you can use the `withAgent` to trace them as a single unit. Each tool should be in `withTool`. - -```typescript -import * as traceloop from "@traceloop/node-server-sdk"; - -async function translate_joke_to_pirate(joke: str) { - return await withAgent({name: "joke_translation" }, () => { - completion = await openai.chat.completions.create({ - model="gpt-3.5-turbo", - messages=[{"role": "user", "content": f"Translate the below joke to pirate-like english:\n\n{joke}"}], - }); - - history_jokes_tool(); - - return completion.choices[0].message.content; - }); -} - -async function history_jokes_tool() { - return await withTool({ name: "history_jokes" }, () => { - completion = await openai.chat.completions.create({ - model="gpt-3.5-turbo", - messages=[{"role": "user", "content": f"get some history jokes"}], - }); - - return completion.choices[0].message.content; - }); -} -``` - -### Typescript with Decorators + + Similarily, if you use autonomous agents, you can use the `@agent` decorator to trace them as a single unit. Each tool should be marked with `@tool`. - + If you’re not on Nest.js, remember to set `experimentalDecorators` to `true` in your `tsconfig.json`. @@ -262,6 +231,39 @@ class Agent { } } ``` + + +Similarily, if you use autonomous agents, you can use the `withAgent` to trace them as a single unit. Each tool should be in `withTool`. + +```typescript +import * as traceloop from "@traceloop/node-server-sdk"; + +async function translate_joke_to_pirate(joke: str) { + return await withAgent({name: "joke_translation" }, () => { + completion = await openai.chat.completions.create({ + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": f"Translate the below joke to pirate-like english:\n\n{joke}"}], + }); + + history_jokes_tool(); + + return completion.choices[0].message.content; + }); +} + +async function history_jokes_tool() { + return await withTool({ name: "history_jokes" }, () => { + completion = await openai.chat.completions.create({ + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": f"get some history jokes"}], + }); + + return completion.choices[0].message.content; + }); +} +``` + + ## Async methods