diff --git a/.github/workflows/primary.yml b/.github/workflows/primary.yml
index b674ae321..f7ee7649c 100644
--- a/.github/workflows/primary.yml
+++ b/.github/workflows/primary.yml
@@ -10,6 +10,7 @@ on:
- ".github/workflows/primary.yml"
branches:
- canary
+ workflow_dispatch: {}
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index b58a9eaf5..bfde59405 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -11,9 +11,12 @@ concurrency:
cancel-in-progress: true
permissions:
- contents: read
+ contents: write
id-token: write
-
+env:
+ DEBUG: napi:*
+ APP_NAME: baml
+ MACOSX_DEPLOYMENT_TARGET: "10.13"
jobs:
build-wasm:
runs-on: ubuntu-latest
@@ -39,7 +42,7 @@ jobs:
uses: actions/setup-node@v3
with:
cache: "pnpm"
- node-version: 18
+ node-version: 20
cache-dependency-path: |
typescript/**/pnpm-lock.yaml
- name: Install Dependencies
@@ -63,7 +66,8 @@ jobs:
uses: actions/upload-artifact@v4
with:
name: baml-vscode.vsix
- path: typescript/vscode-ext/packages/baml-${{ steps.build.outputs.version }}.vsix
+ path: typescript/vscode-ext/packages/baml-extension-${{ steps.build.outputs.version }}.vsix
+ if-no-files-found: error
# Upload the artifact (helpful for debugging and manual downloads)
- name: Upload VSCode Extension Artifact
@@ -71,6 +75,7 @@ jobs:
with:
name: baml-out
path: typescript/vscode-ext/packages/vscode/out
+ if-no-files-found: error
# upload the lang server artifact
- name: Upload VSCode Lang Server Extension Artifact
@@ -78,12 +83,14 @@ jobs:
with:
name: language-server
path: typescript/vscode-ext/packages/language-server/out
+ if-no-files-found: error
- name: VSCode Playground Artifact
uses: actions/upload-artifact@v4
with:
name: vscode-playground
path: typescript/vscode-ext/packages/web-panel/dist
+ if-no-files-found: error
build-release:
strategy:
@@ -99,9 +106,10 @@ jobs:
# host: windows-latest
# node_build: pnpm build --target aarch64-pc-windows-msvc
- - target: aarch64-unknown-linux-gnu
- host: ubuntu-latest
- node_build: pnpm build --target aarch64-unknown-linux-gnu --use-napi-cross
+ # maturin doesn't support aarch64-linux-gnu
+ # - target: aarch64-unknown-linux-gnu
+ # host: ubuntu-latest
+ # node_build: pnpm build --target aarch64-unknown-linux-gnu --use-napi-cross
- target: x86_64-apple-darwin
host: macos-latest
@@ -135,7 +143,6 @@ jobs:
cache: pnpm
cache-dependency-path: |
engine/language_client_typescript/pnpm-lock.yaml
- typescript/**/pnpm-lock.yaml
# Install rust
- uses: dtolnay/rust-toolchain@stable
with:
@@ -206,8 +213,9 @@ jobs:
python-version: "3.8"
- uses: actions/download-artifact@v4
with:
- name: wheels-*
+ pattern: wheels-*
+ - run: mkdir dist && mv wheels-*/* dist
# authz is managed via OIDC configured at https://pypi.org/manage/project/baml-py/settings/publishing/
# it is pinned to this github actions filename, so renaming this file is not safe!!
- name: Publish package to PyPI
@@ -218,6 +226,8 @@ jobs:
environment: release
needs: [build-release, build-wasm]
runs-on: ubuntu-latest
+ env:
+ NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
@@ -245,7 +255,8 @@ jobs:
- uses: actions/download-artifact@v4
with:
- name: bindings-*
+ pattern: bindings-*
+ path: engine/language_client_typescript/artifacts
- name: create npm dirs
run: pnpm napi create-npm-dirs
@@ -259,68 +270,76 @@ jobs:
if: ${{ !startsWith(github.ref, 'refs/tags/test-release') }}
run: |
npm publish --access public
+ working-directory: engine/language_client_typescript
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
NPM_TOKEN: ${{ secrets.NPM_TOKEN }}
- publish-vscode:
- environment: release
- needs: [build-release, build-wasm]
- if: ${{ !startsWith(github.ref, 'refs/tags/test-release') }}
+ # publish-vscode:
+ # environment: release
+ # needs: [build-release, build-wasm]
+ # if: ${{ !startsWith(github.ref, 'refs/tags/test-release') }}
+ # runs-on: ubuntu-latest
+ # steps:
+ # - uses: actions/checkout@v4
+
+ # # Get all the artifacts
+ # - name: Get artifact
+ # uses: actions/download-artifact@v4
+ # with:
+ # name: baml-vscode.vsix
+ # path: typescript/vscode-ext/packages
+ # - name: Get artifact
+ # uses: actions/download-artifact@v4
+ # with:
+ # name: baml-out
+ # path: typescript/vscode-ext/packages/vscode/out
+ # - name: Get artifact
+ # uses: actions/download-artifact@v4
+ # with:
+ # name: language-server
+ # path: typescript/vscode-ext/packages/language-server/out
+ # - name: Get artifact
+ # uses: actions/download-artifact@v4
+ # with:
+ # pattern: vscode-playground
+ # path: typescript/vscode-ext/packages/web-panel/dist
+
+ # - name: setup pnpm
+ # uses: pnpm/action-setup@v3
+ # with:
+ # version: 9.0.6
+ # package_json_file: typescript/package.json
+ # run_install: false
+ # # Set up Node.js
+ # - name: Setup Node.js
+ # uses: actions/setup-node@v3
+ # with:
+ # cache: "pnpm"
+ # node-version: 20
+ # cache-dependency-path: typescript/pnpm-lock.yaml
+
+ # - name: Install Dependencies
+ # run: pnpm install --frozen-lockfile
+ # working-directory: typescript/
+ # - name: Publish
+ # if: ${{ !startsWith(github.ref, 'refs/tags/test-release') }}
+ # run: |
+ # pnpm run vscode:publish --no-git-tag-version -p ${{ secrets.VSCODE_PAT }}
+ # working-directory: typescript/vscode-ext/packages
+
+ release-github:
runs-on: ubuntu-latest
+ needs: [publish-to-pypi, publish-to-npm]
steps:
- uses: actions/checkout@v4
- # Get all the artifacts
- - name: Get artifact
- uses: actions/download-artifact@v4
- with:
- name: baml-vscode.vsix
- path: typescript/vscode-ext/packages
- - name: Get artifact
- uses: actions/download-artifact@v4
- with:
- name: baml-out
- path: typescript/vscode-ext/packages/vscode/out
- - name: Get artifact
- uses: actions/download-artifact@v4
- with:
- name: language-server
- path: typescript/vscode-ext/packages/language-server/out
- - name: Get artifact
- uses: actions/download-artifact@v4
- with:
- name: vscode-playground
- path: typescript/vscode-ext/packages/web-panel/dist
-
- # Set up Node.js
- - name: Setup Node.js
- uses: actions/setup-node@v3
- with:
- cache: "pnpm"
- node-version: 18
- cache-dependency-path: typescript/pnpm-lock.yaml
-
- - name: Install Dependencies
- run: pnpm install --frozen-lockfile
- working-directory: typescript/
- - name: Publish
- if: ${{ !startsWith(github.ref, 'refs/tags/test-release') }}
+ - name: Get Changelog
+ id: latest_release
run: |
- pnpm run vscode:publish --pre-release --no-git-tag-version -p ${{ secrets.VSCODE_PAT }}
- working-directory: typescript/vscode-ext/packages
-
- release-github:
- runs-on: ubuntu-latest
- needs: [publish-to-pypi, publish-to-npm, publish-vscode]
- steps:
- - name: Build Changelog
- id: github_release
- uses: mikepenz/release-changelog-builder-action@v4
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ echo "::set-output name=changelog::$(awk '/^## \[/{if (p) exit; p=1} p' CHANGELOG.md)"
- name: Create Release
- uses: mikepenz/action-gh-release@v0.2.0-a03 #softprops/action-gh-release
+ uses: mikepenz/action-gh-release@v1 #softprops/action-gh-release
with:
- body: ${{steps.github_release.outputs.changelog}}
+ body: ${{steps.latest_release.outputs.changelog}}
diff --git a/.gitignore b/.gitignore
index 15e8215d2..a82d4d444 100644
--- a/.gitignore
+++ b/.gitignore
@@ -122,6 +122,10 @@ $RECYCLE.BIN/
/dist
/node_modules
/out/
+engine/language_client_typescript/*.d.ts
+engine/language_client_typescript/*.d.ts.map
+engine/language_client_typescript/*.js
+!engine/language_client_typescript/cli.js
engine/language_client_ruby/**/*.bundle
engine/target/
Cargo.lock
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 000000000..a96417897
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,110 @@
+# Changelog
+
+All notable changes to this project will be documented in this file. See [conventional commits](https://www.conventionalcommits.org/) for commit guidelines.
+
+## [0.45.0](https://github.com/boundaryml/baml/compare/0.44.0..0.45.0) - 2024-06-29
+
+### Bug Fixes
+
+- Fixed streaming in Python Client which didn't show result until later (#726) - ([e4f2daa](https://github.com/boundaryml/baml/commit/e4f2daa9e85bb1711d112fb0c87c0d769be0bb2d)) - Anish Palakurthi
+- Improve playground stability on first load (#732) - ([2ac7b32](https://github.com/boundaryml/baml/commit/2ac7b328e89400cba0d9eb4f6d09c6a03feb71a5)) - Anish Palakurthi
+- Add improved static analysis for jinja (#734) - ([423faa1](https://github.com/boundaryml/baml/commit/423faa1af5a594b7f78f7bb5620e3146a8989da5)) - hellovai
+
+### Documentation
+
+- Docs for Dynamic Types (#722) [https://docs.boundaryml.com/docs/calling-baml/dynamic-types](https://docs.boundaryml.com/docs/calling-baml/dynamic-types)
+
+### Features
+
+- Show raw cURL request in Playground (#723) - ([57928e1](https://github.com/boundaryml/baml/commit/57928e178549cb3e5118ce374aab5d0fbad7038b)) - Anish Palakurthi
+- Support bedrock as a provider (#725) - ([c64c665](https://github.com/boundaryml/baml/commit/c64c66522a1d496493a30f593103209acd201364)) - Samuel Lijin
+
+## [0.44.0](https://github.com/boundaryml/baml/compare/0.43.0..0.44.0) - 2024-06-26
+
+### Bug Fixes
+- Fix typebuilder for random enums (#721)
+
+## [0.43.0](https://github.com/boundaryml/baml/compare/0.42.0..0.43.0) - 2024-06-26
+
+### Bug Fixes
+- fix pnpm lockfile issue (#720)
+
+## [0.42.0](https://github.com/boundaryml/baml/compare/0.41.0..0.42.0) - 2024-06-26
+
+### Bug Fixes
+
+- correctly propagate LICENSE to baml-py (#695) - ([3fda880](https://github.com/boundaryml/baml/commit/3fda880bf39b32191b425ae75e8b491d10884cf6)) - Samuel Lijin
+
+### Miscellaneous Chores
+
+- update jsonish readme (#685) - ([b19f04a](https://github.com/boundaryml/baml/commit/b19f04a059ba18d54544cb278b6990b95170d3f3)) - Samuel Lijin
+
+### Vscode
+
+- add link to tracing, show token counts (#703) - ([64aa18a](https://github.com/boundaryml/baml/commit/64aa18a9cc34071655141c8f6e2ad04ac90e7be1)) - Samuel Lijin
+
+## [0.41.0] - 2024-06-20
+
+### Bug Fixes
+
+- rollback git lfs, images broken in docs rn (#534) - ([6945506](https://github.com/boundaryml/baml/commit/694550664fa45b5f76987e2663c9d7e7a9a6a2d2)) - Samuel Lijin
+- search for markdown blocks correctly (#641) - ([6b8abf1](https://github.com/boundaryml/baml/commit/6b8abf1ccf55bbe7c3bc1046c78081126e01f134)) - Samuel Lijin
+- restore one-workspace-per-folder (#656) - ([a464bde](https://github.com/boundaryml/baml/commit/a464bde566199ace45285a78a7f542cd7217fb65)) - Samuel Lijin
+- ruby generator should be ruby/sorbet (#661) - ([0019f39](https://github.com/boundaryml/baml/commit/0019f3951b8fe2b49e62eb11d869516b8088e9cb)) - Samuel Lijin
+- ruby compile error snuck in (#663) - ([0cb2583](https://github.com/boundaryml/baml/commit/0cb25831788eb8b3eb0a38383917f6d1ffb5633a)) - Samuel Lijin
+
+### Documentation
+
+- add typescript examples (#477) - ([532481c](https://github.com/boundaryml/baml/commit/532481c3df4063b37a8834a5fe2bbce3bb37d2f5)) - Samuel Lijin
+- add titles to code blocks for all CodeGroup elems (#483) - ([76c6b68](https://github.com/boundaryml/baml/commit/76c6b68b27ee37972fa226be0b4dfe31f7b4b5ec)) - Samuel Lijin
+- add docs for round-robin clients (#500) - ([221f902](https://github.com/boundaryml/baml/commit/221f9020d850e6d24fe2fd8a684081726a0659af)) - Samuel Lijin
+- add ruby example (#689) - ([16e187f](https://github.com/boundaryml/baml/commit/16e187f6698a1cc86a37eedf2447648d810370ad)) - Samuel Lijin
+
+### Features
+
+- implement `baml version --check --output json` (#444) - ([5f076ac](https://github.com/boundaryml/baml/commit/5f076ace1f92dc2141b231c9e62f4dc23f7fef18)) - Samuel Lijin
+- show update prompts in vscode (#451) - ([b66da3e](https://github.com/boundaryml/baml/commit/b66da3ee355fcd6a8677d834ecb05af44cbf8f20)) - Samuel Lijin
+- add tests to check that baml version --check works (#454) - ([be1499d](https://github.com/boundaryml/baml/commit/be1499dfa82ff8ab923a16d45290758120d95015)) - Samuel Lijin
+- parse typescript versions in version --check (#473) - ([b4b2250](https://github.com/boundaryml/baml/commit/b4b2250c37b900db899256159bbfc3aa2ec819cb)) - Samuel Lijin
+- implement round robin client strategies (#494) - ([599fcdd](https://github.com/boundaryml/baml/commit/599fcdd2a45c5b1e935f36769784ca944566b88c)) - Samuel Lijin
+- add integ-tests support to build (#542) - ([f59cf2e](https://github.com/boundaryml/baml/commit/f59cf2e1a9ec7edbe174f4bc7ff9391f2cff3208)) - Samuel Lijin
+- make ruby work again (#650) - ([6472bec](https://github.com/boundaryml/baml/commit/6472bec231b581076ee7edefaab2e7979b2bf336)) - Samuel Lijin
+- Add RB2B tracking script (#682) - ([54547a3](https://github.com/boundaryml/baml/commit/54547a34d40cd40a43767919dbc9faa68a82faea)) - hellovai
+
+### Miscellaneous Chores
+
+- add nodemon config to typescript/ (#435) - ([231b396](https://github.com/boundaryml/baml/commit/231b3967bc947c4651156bc55fd66552782824c9)) - Samuel Lijin
+- finish gloo to BoundaryML renames (#452) - ([88a7fda](https://github.com/boundaryml/baml/commit/88a7fdacc826e78ef21c6b24745ee469d9d02e6a)) - Samuel Lijin
+- set up lfs (#511) - ([3a43143](https://github.com/boundaryml/baml/commit/3a431431e8e38dfc68763f15ccdcd1d131f23984)) - Samuel Lijin
+- add internal build tooling for sam (#512) - ([9ebacca](https://github.com/boundaryml/baml/commit/9ebaccaa542760cb96382ae2a91d780f1ade613b)) - Samuel Lijin
+- delete clients dir, this is now dead code (#652) - ([ec2627f](https://github.com/boundaryml/baml/commit/ec2627f59c7fe9edfff46fcdb65f9b9f0e2e072c)) - Samuel Lijin
+- consolidate vscode workspace, bump a bunch of deps (#654) - ([82bf6ab](https://github.com/boundaryml/baml/commit/82bf6ab1ad839f84782a7ef0441f21124c368757)) - Samuel Lijin
+- Add RB2B tracking script to propmt fiddle (#681) - ([4cf806b](https://github.com/boundaryml/baml/commit/4cf806bba26563fd8b6ddbd68296ab8bdfac21c4)) - hellovai
+- Adding better release script (#688) - ([5bec282](https://github.com/boundaryml/baml/commit/5bec282d39d2250b39ef4aba5d6bba9830a35988)) - hellovai
+
+### [AUTO
+
+- patch] Version bump for nightly release [NIGHTLY:cli] [NIGHTLY:vscode_ext] [NIGHTLY:client-python] - ([d05a22c](https://github.com/boundaryml/baml/commit/d05a22ca4135887738adbce638193d71abca42ec)) - GitHub Action
+
+### Build
+
+- fix baml-core-ffi script (#521) - ([b1b7f4a](https://github.com/boundaryml/baml/commit/b1b7f4af0991ef6453f888f27930f3faaae337f5)) - Samuel Lijin
+- fix engine/ (#522) - ([154f646](https://github.com/boundaryml/baml/commit/154f6468ec0aa6de1b033ee1cbc76e60acc363ea)) - Samuel Lijin
+
+### Integ-tests
+
+- add ruby test - ([c0bc101](https://github.com/boundaryml/baml/commit/c0bc10126ea32d099f1398f2c5faa08b111554ba)) - Sam Lijin
+
+### Readme
+
+- add function calling, collapse the table (#505) - ([2f9024c](https://github.com/boundaryml/baml/commit/2f9024c28ba438267de37ac43c6570a2f0398b5a)) - Samuel Lijin
+
+### Release
+
+- bump versions for everything (#662) - ([c0254ae](https://github.com/boundaryml/baml/commit/c0254ae680365854c51c7a4e58ea68d1901ea033)) - Samuel Lijin
+
+### Vscode
+
+- check for updates on the hour (#434) - ([c70a3b3](https://github.com/boundaryml/baml/commit/c70a3b373cb2346a0df9a1eba0ebacb74d59b53e)) - Samuel Lijin
+
+
diff --git a/README.md b/README.md
index fb358f9f7..76f366849 100644
--- a/README.md
+++ b/README.md
@@ -8,10 +8,9 @@
# BAML
-An LLM function is a prompt template with some defined input variables, and a specific output type like a class, enum, union, optional string, etc.
-
-**BAML is a configuration file format to write better and cleaner LLM functions.**
+**BAML is a domain-specific-language to write and test LLM functions.**
+An LLM function is a prompt template with some defined input variables, and a specific output type like a class, enum, union, optional string, etc.
With BAML you can write and test a complex LLM function in 1/10 of the time it takes to setup a python LLM testing environment.
## Try it out in the playground -- [PromptFiddle.com](https://promptfiddle.com)
@@ -53,7 +52,7 @@ Share your creations and ask questions in our [Discord](https://discord.gg/BTNBe
## Starter projects
- [BAML + NextJS 14](https://github.com/BoundaryML/baml-examples/tree/main/nextjs-starter)
-- [BAML + FastAPI + Streaming](https://github.com/BoundaryML/baml-examples/tree/main/fastapi-starter)
+- [BAML + FastAPI + Streaming](https://github.com/BoundaryML/baml-examples/tree/main/python-fastapi-starter)
## A BAML LLM Function
@@ -177,7 +176,7 @@ Python: `baml-cli init`
### 4. OR use these starter projects:
- [NextJS 14](https://github.com/BoundaryML/baml-examples/tree/main/nextjs-starter)
-- [FastAPI](https://github.com/BoundaryML/baml-examples/tree/main/fastapi-starter)
+- [FastAPI](https://github.com/BoundaryML/baml-examples/tree/main/python-fastapi-starter)
## Observability
@@ -225,7 +224,7 @@ Here's how BAML differs from these frameworks:
**Aliasing object fields in Zod**
-```
+```typescript
const UserSchema = z.object({
first_name: z.string(),
}).transform((user) => ({
@@ -247,7 +246,7 @@ Zod: not possible
Pydantic:
-```
+```python
class Sentiment(Enum):
HAPPY = ("ecstatic")
SAD = ("sad")
diff --git a/cliff.toml b/cliff.toml
new file mode 100644
index 000000000..3105778ce
--- /dev/null
+++ b/cliff.toml
@@ -0,0 +1,99 @@
+# git-cliff ~ configuration file
+# https://git-cliff.org/docs/configuration
+
+[changelog]
+# template for the changelog footer
+header = """
+# Changelog\n
+All notable changes to this project will be documented in this file. See [conventional commits](https://www.conventionalcommits.org/) for commit guidelines.\n
+"""
+# template for the changelog body
+# https://keats.github.io/tera/docs/#introduction
+body = """
+{% if version %}\
+ {% if previous.version %}\
+ ## [{{ version | trim_start_matches(pat="v") }}]($REPO/compare/{{ previous.version }}..{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }}
+ {% else %}\
+ ## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }}
+ {% endif %}\
+{% else %}\
+ ## [unreleased]
+{% endif %}\
+{% for group, commits in commits | group_by(attribute="group") %}
+ ### {{ group | striptags | trim | upper_first }}
+ {% for commit in commits
+ | filter(attribute="scope")
+ | sort(attribute="scope") %}
+ - **({{commit.scope}})**{% if commit.breaking %} [**breaking**]{% endif %} \
+ {{ commit.message }} - ([{{ commit.id | truncate(length=7, end="") }}]($REPO/commit/{{ commit.id }})) - {{ commit.author.name }}
+ {%- endfor -%}
+ {% raw %}\n{% endraw %}\
+ {%- for commit in commits %}
+ {%- if commit.scope -%}
+ {% else -%}
+ - {% if commit.breaking %} [**breaking**]{% endif %}\
+ {{ commit.message }} - ([{{ commit.id | truncate(length=7, end="") }}]($REPO/commit/{{ commit.id }})) - {{ commit.author.name }}
+ {% endif -%}
+ {% endfor -%}
+{% endfor %}\n
+### UNMATCHED
+{% for commit in commits %}
+ {%- if commit.group -%}
+ {% else -%}
+ - {% if commit.breaking %} [**breaking**]{% endif %}\
+ {{ commit.message | split(pat="\n") | first }} - ([{{ commit.id | truncate(length=7, end="") }}]($REPO/commit/{{ commit.id }})) - {{ commit.author.name }}
+ {% endif -%}
+{% endfor %}\n
+"""
+# template for the changelog footer
+footer = """
+
+"""
+# remove the leading and trailing whitespace from the templates
+trim = true
+# postprocessors
+postprocessors = [
+ { pattern = '\$REPO', replace = "https://github.com/boundaryml/baml" }, # replace repository URL
+]
+
+[git]
+# parse the commits based on https://www.conventionalcommits.org
+conventional_commits = true
+# filter out the commits that are not conventional
+filter_unconventional = false
+# process each line of a commit as an individual commit
+split_commits = false
+# regex for preprocessing the commit messages
+commit_preprocessors = [
+ # { pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](https://github.com/orhun/git-cliff/issues/${2}))"}, # replace issue numbers
+]
+# regex for parsing and grouping commits
+commit_parsers = [
+ { message = "^feat", group = "Features" },
+ { message = "^fix", group = "Bug Fixes" },
+ { message = "^doc", group = "Documentation" },
+ { message = "^perf", group = "Performance" },
+ { message = "^refactor", group = "Refactoring" },
+ { message = "^style", group = "Style" },
+ { message = "^revert", group = "Revert" },
+ { message = "^test", group = "Tests" },
+ { message = "^chore\\(version\\):", skip = true },
+ { message = "^chore", group = "Miscellaneous Chores" },
+ { body = ".*security", group = "Security" },
+]
+# protect breaking changes from being skipped due to matching a skipping commit_parser
+protect_breaking_commits = false
+# filter out the commits that are not matched by commit parsers
+filter_commits = false
+# regex for matching git tags
+tag_pattern = "^[0-9].[0-9]+.[0-9]+$"
+# regex for skipping tags
+skip_tags = ""
+# regex for ignoring tags
+ignore_tags = ""
+# sort the tags topologically
+topo_order = false
+# sort the commits inside sections by oldest/newest order
+sort_commits = "oldest"
+# limit the number of commits included in the changelog.
+# limit_commits = 42
diff --git a/docs/contact.mdx b/docs/contact.mdx
new file mode 100644
index 000000000..be61879fb
--- /dev/null
+++ b/docs/contact.mdx
@@ -0,0 +1,15 @@
+---
+title: "Contact Us"
+---
+
+BAML is here to serve its users: we always want to hear your feedback, whether
+it's a bug, feature request, or just general comment.
+
+You can reach us using:
+
+ - [Discord](/discord) (fastest for a "how do i... ?")
+ - [GitHub](https://github.com/BoundaryML/baml/issues) (for bugs and feature requests)
+ - Email: [contact@boundaryml.com](mailto:contact@boundaryml.com)
+ - Twitter: [@boundaryml](https://twitter.com/boundaryml)
+
+We try our best to respond as quickly as possible, so don't hesitate to reach out!
\ No newline at end of file
diff --git a/docs/docs/calling-baml/calling-functions.mdx b/docs/docs/calling-baml/calling-functions.mdx
new file mode 100644
index 000000000..436b47612
--- /dev/null
+++ b/docs/docs/calling-baml/calling-functions.mdx
@@ -0,0 +1,106 @@
+---
+title: "Calling BAML Functions"
+---
+
+Once you've [generated the BAML client] and set your environment variables,
+you can call BAML functions from your code.
+
+You can check out more examples in the [BAML Examples] repo.
+
+[generated the BAML client]: /docs/calling-baml/generate-baml-client
+[BAML Examples]: https://github.com/BoundaryML/baml-examples/tree/main
+
+We’ll use `function ClassifyMessage(input: string) -> Category` for our example:
+
+
+```rust
+enum Category {
+ Refund
+ CancelOrder
+ TechnicalSupport
+ AccountIssue
+ Question
+}
+
+function ClassifyMessage(input: string) -> Category {
+ client GPT4o
+ prompt #"
+ Classify the following INPUT into ONE
+ of the following categories:
+
+ INPUT: {{ input }}
+
+ {{ ctx.output_format }}
+
+ Response:
+ "#
+}
+```
+
+
+
+
+
+BAML will generate `b.ClassifyMessage()` for you, which you can use like so:
+
+```python main.py
+import asyncio
+
+from baml_client import b
+from baml_client.types import Category
+
+async def main():
+ category = await b.ClassifyMessage("I want to cancel my order")
+ print(category)
+ assert category == Category.CancelOrder
+
+if __name__ == '__main__':
+ asyncio.run(main())
+```
+
+
+
+
+BAML will generate `b.ClassifyMessage()` for you, which you can use like so:
+
+```ts main.ts
+import { b } from './baml_client'
+import { Category } from './baml_client/types'
+import assert from 'assert'
+
+const main = async () => {
+ const category = await b.ClassifyMessage('I want to cancel my order')
+ console.log(category)
+ assert(category == Category.CancelOrder)
+}
+
+if (require.main === module) {
+ main()
+}
+
+```
+
+
+
+
+BAML will generate `Baml.Client.ClassifyMessage()` for you, which you can use like so:
+
+```ruby main.rb
+require_relative "baml_client/client"
+
+$b = Baml.Client
+
+def main
+ category = $b.ClassifyMessage(input: "I want to cancel my order")
+ puts category
+ category == Baml::Types::Category::CancelOrder
+end
+
+if __FILE__ == $0
+ puts main
+end
+
+```
+
+
+
\ No newline at end of file
diff --git a/docs/docs/calling-baml/concurrent-calls.mdx b/docs/docs/calling-baml/concurrent-calls.mdx
new file mode 100644
index 000000000..a673e37ab
--- /dev/null
+++ b/docs/docs/calling-baml/concurrent-calls.mdx
@@ -0,0 +1,85 @@
+---
+title: "Concurrent function calls"
+---
+
+We’ll use `function ClassifyMessage(input: string) -> Category` for our example:
+
+
+```rust
+enum Category {
+ Refund
+ CancelOrder
+ TechnicalSupport
+ AccountIssue
+ Question
+}
+
+function ClassifyMessage(input: string) -> Category {
+ client GPT4o
+ prompt #"
+ Classify the following INPUT into ONE
+ of the following categories:
+
+ INPUT: {{ input }}
+
+ {{ ctx.output_format }}
+
+ Response:
+ "#
+}
+```
+
+
+
+
+
+You can make concurrent `b.ClassifyMessage()` calls like so:
+
+```python main.py
+import asyncio
+
+from baml_client import b
+from baml_client.types import Category
+
+async def main():
+ await asyncio.gather(
+ b.ClassifyMessage("I want to cancel my order"),
+ b.ClassifyMessage("I want a refund")
+ )
+
+if __name__ == '__main__':
+ asyncio.run(main())
+```
+
+
+
+
+You can make concurrent `b.ClassifyMessage()` calls like so:
+
+```ts main.ts
+import { b } from './baml_client'
+import { Category } from './baml_client/types'
+import assert from 'assert'
+
+const main = async () => {
+ const category = await Promise.all(
+ b.ClassifyMessage('I want to cancel my order'),
+ b.ClassifyMessage('I want a refund'),
+ )
+}
+
+if (require.main === module) {
+ main()
+}
+
+```
+
+
+
+
+BAML Ruby (beta) does not currently support async/concurrent calls.
+
+Please [contact us](/contact) if this is something you need.
+
+
+
\ No newline at end of file
diff --git a/docs/docs/calling-baml/dynamic-clients.mdx b/docs/docs/calling-baml/dynamic-clients.mdx
new file mode 100644
index 000000000..e69de29bb
diff --git a/docs/docs/calling-baml/dynamic-types.mdx b/docs/docs/calling-baml/dynamic-types.mdx
new file mode 100644
index 000000000..a07d981e2
--- /dev/null
+++ b/docs/docs/calling-baml/dynamic-types.mdx
@@ -0,0 +1,193 @@
+
+
+Sometimes you have a **output schemas that change at runtime** -- for example if you have a list of Categories that you need to classify that come from a database, or your schema is user-provided.
+
+
+**Dynamic types are types that can be modified at runtime**, which means you can change the output schema of a function at runtime.
+
+Here are the steps to make this work:
+1. Add `@@dynamic` to the class or enum definition to mark it as dynamic
+
+```rust baml
+enum Category {
+ VALUE1 // normal static enum values that don't change
+ VALUE2
+ @@dynamic // this enum can have more values added at runtime
+}
+
+function DynamicCategorizer(input: string) -> Category {
+ client GPT4
+ prompt #"
+ Given a string, classify it into a category
+ {{ input }}
+
+ {{ ctx.output_format }}
+ "#
+}
+
+```
+
+2. Create a TypeBuilder and modify the existing type. All dynamic types you define in BAML will be available as properties of `TypeBuilder`. Think of the typebuilder as a registry of modified runtime types that the baml function will read from when building the output schema in the prompt.
+
+
+
+```python python
+from baml_client.type_builder import TypeBuilder
+from baml_client import b
+
+async def run():
+ tb = TypeBuilder()
+ tb.Category.add_value('VALUE3')
+ tb.Category.add_value('VALUE4')
+ # Pass the typebuilder in the baml_options argument -- the last argument of the function.
+ res = await b.DynamicCategorizer("some input", { "tb": tb })
+ # Now res can be VALUE1, VALUE2, VALUE3, or VALUE4
+ print(res)
+
+```
+
+```typescript TypeScript
+import TypeBuilder from '../baml_client/type_builder'
+import {
+ b
+} from '../baml_client'
+
+async function run() {
+ const tb = new TypeBuilder()
+ tb.Category.addValue('VALUE3')
+ tb.Category.addValue('VALUE4')
+ const res = await b.DynamicCategorizer("some input", { tb: tb })
+ // Now res can be VALUE1, VALUE2, VALUE3, or VALUE4
+ console.log(res)
+}
+```
+
+
+```ruby Ruby (beta)
+Not available yet
+```
+
+
+### Dynamic BAML Classes
+Existing BAML classes marked with @@dynamic will be available as properties of `TypeBuilder`.
+
+```rust BAML
+class User {
+ name string
+ age int
+ @@dynamic
+}
+
+function DynamicUserCreator(user_info: string) -> User {
+ client GPT4
+ prompt #"
+ Extract the information from this chunk of text:
+ "{{ user_info }}"
+
+ {{ ctx.output_format }}
+ "#
+}
+```
+
+Modify the `User` schema at runtime:
+
+
+
+```python python
+from baml_client.type_builder import TypeBuilder
+from baml_client import b
+
+async def run():
+ tb = TypeBuilder()
+ tb.User.add_property('email', 'string')
+ tb.User.add_property('address', 'string')
+ res = await b.DynamicUserCreator("some user info", { "tb": tb })
+ # Now res can have email and address fields
+ print(res)
+
+```
+
+```typescript TypeScript
+import TypeBuilder from '../baml_client/type_builder'
+import {
+ b
+} from '../baml_client'
+
+async function run() {
+ const tb = new TypeBuilder()
+ tb.User.add_property('email', tb.string())
+ tb.User.add_property('address', tb.string())
+ const res = await b.DynamicUserCreator("some user info", { tb: tb })
+ // Now res can have email and address fields
+ console.log(res)
+}
+```
+
+
+### Creating new dynamic classes or enums not in BAML
+Here we create a new `Hobbies` enum, and a new class called `Address`.
+
+
+
+
+```python python
+from baml_client.type_builder import TypeBuilder
+from baml_client import b
+
+async def run():
+ tb = TypeBuilder()
+ const hobbiesEnum = tb.add_enum('Hobbies')
+ hobbiesEnum.add_value('Soccer')
+ hobbiesEnum.add_value('Reading')
+
+ address_class = tb.add_class('Address')
+ address_class.add_property('street', tb.string())
+
+ tb.User.add_property('hobby', hobbiesEnum.type().optional())
+ tb.User.add_property('address', addressClass.type().optional())
+ res = await b.DynamicUserCreator("some user info", { "tb": tb })
+ # Now res might have the hobby property, which can be Soccer or Reading
+ print(res)
+
+```
+
+```typescript TypeScript
+import TypeBuilder from '../baml_client/type_builder'
+import {
+ b
+} from '../baml_client'
+
+async function run() {
+ const tb = new TypeBuilder()
+ const hobbiesEnum = tb.addEnum('Hobbies')
+ hobbiesEnum.addValue('Soccer')
+ hobbiesEnum.addValue('Reading')
+
+ const addressClass = tb.addClass('Address')
+ addressClass.addProperty('street', tb.string())
+
+
+ tb.User.addProperty('hobby', hobbiesEnum.type().optional())
+ tb.User.addProperty('address', addressClass.type())
+ const res = await b.DynamicUserCreator("some user info", { tb: tb })
+ // Now res might have the hobby property, which can be Soccer or Reading
+ console.log(res)
+}
+```
+
+
+### Adding descriptions to dynamic types
+
+
+
+```python python
+tb = TypeBuilder()
+tb.User.add_property("email", tb.string()).description("The user's email")
+```
+
+```typescript TypeScript
+const tb = new TypeBuilder()
+tb.User.addProperty("email", tb.string()).description("The user's email")
+```
+
+
\ No newline at end of file
diff --git a/docs/docs/calling-baml/generate-baml-client.mdx b/docs/docs/calling-baml/generate-baml-client.mdx
new file mode 100644
index 000000000..c797a2aaf
--- /dev/null
+++ b/docs/docs/calling-baml/generate-baml-client.mdx
@@ -0,0 +1,133 @@
+---
+title: "Generate the BAML Client"
+---
+
+This page assumes you've already defined a function in BAML. If you
+haven't done that yet, check out [how to define a function].
+
+[how to define a function]: /docs/snippets/functions
+
+Once you've defined a function in BAML, you need to generate code in your
+language of choice to call that function: we call this generating the BAML client.
+
+If you use VSCode, the [BAML extension] will re-generate the client every time
+you save a BAML file. Otherwise, you can generate the client manually:
+
+[BAML extension]: https://marketplace.visualstudio.com/items?itemName=Boundary.baml-extension
+
+
+
+```bash Python
+pipx run baml-cli generate --from path/to/baml_src
+
+# If using your local installation, venv or conda:
+pip install baml-py
+baml-cli generate --from path/to/baml_src
+
+# If using poetry:
+poetry add baml-py
+poetry run baml-cli generate --from path/to/baml_src
+
+# If using pipenv:
+pipenv install baml-py
+pipenv run baml-cli generate --from path/to/baml_src
+```
+
+```bash TypeScript
+npx @boundaryml/baml generate --from path/to/baml_src
+
+# If using npm:
+npm install @boundaryml/baml
+npm run baml-cli generate --from path/to/baml_src
+
+# If using pnpm:
+pnpm install @boundaryml/baml
+pnpm run baml-cli generate --from path/to/baml_src
+
+# If using pnpm:
+yarn add @boundaryml/baml
+yarn run baml-cli generate --from path/to/baml_src
+```
+
+```bash Ruby (beta)
+bundle add baml
+bundle exec baml-cli generate --from path/to/baml_src
+```
+
+
+
+## Best Practices
+
+### Define a `generator` clause
+
+If you created your project using `baml-cli init`, then one has already been generated for you!
+
+Each `generator` that you define in your BAML project will tell `baml-cli
+generate` to generate code for a specific target language. You can define
+multiple `generator` clauses in your BAML project, and `baml-cli generate` will
+generate code for each of them.
+
+
+
+```rust Python
+generator target {
+ // Valid values: "python/pydantic", "typescript", "ruby/sorbet"
+ output_type "python/pydantic"
+ // Where the generated code will be saved (relative to baml_src/)
+ output_dir "../"
+}
+```
+
+```rust TypeScript
+generator target {
+ // Valid values: "python/pydantic", "typescript", "ruby/sorbet"
+ output_type "typescript"
+ // Where the generated code will be saved (relative to baml_src/)
+ output_dir "../"
+}
+```
+
+```rust Python
+generator target {
+ // Valid values: "python/pydantic", "typescript", "ruby/sorbet"
+ output_type "ruby/sorbet"
+ // Where the generated code will be saved (relative to baml_src/)
+ output_dir "../"
+}
+```
+
+
+
+
+### Generate the BAML client on-demand
+
+Although you can check in the generated BAML client, we recommend that you
+instead add it to your `.gitignore` and generate it on-demand when you
+build/release your code:
+
+ - this will make your PRs more readable;
+ - this will save you from handling merge conflicts in generated code; and
+ - this will ensure a single source-of-truth for your BAML code (and prevent
+ your client from falling out of sync with your BAML code).
+
+To add the generated client to your `.gitignore`, you can run:
+
+```bash
+echo "baml_client" >> .gitignore
+```
+
+and then you just need to run `baml-cli generate` in your CI/CD build/release
+workflows. Here's what that might look like in a GitHub Actions workflow file:
+
+```yaml
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+
+ # Install your Python/Node/Ruby (beta) dependencies here
+
+ - name: Generate BAML client
+ run: baml-cli generate --from baml_src
+```
\ No newline at end of file
diff --git a/docs/docs/calling-baml/multi-modal.mdx b/docs/docs/calling-baml/multi-modal.mdx
new file mode 100644
index 000000000..efc3fc047
--- /dev/null
+++ b/docs/docs/calling-baml/multi-modal.mdx
@@ -0,0 +1,94 @@
+
+## Multi-modal input
+
+### Images
+Calling a BAML function with an `image` input argument type (see [image types](/docs/snippets/supported-types))
+
+```python Python
+from baml_py import Image
+from baml_client import b
+
+async def test_image_input():
+ # from URL
+ res = await b.TestImageInput(
+ img=Image.from_url(
+ "https://upload.wikimedia.org/wikipedia/en/4/4d/Shrek_%28character%29.png"
+ )
+ )
+
+ # Base64 image
+ image_b64 = "iVBORw0K...."
+ res = await b.TestImageInput(
+ img=Image.from_base64("image/png", image_b64)
+ )
+```
+
+```typescript TypeScript
+import { b } from '../baml_client'
+import { Image } from "@boundaryml/baml"
+...
+
+ // URL
+ let res = await b.TestImageInput(
+ Image.fromUrl('https://upload.wikimedia.org/wikipedia/en/4/4d/Shrek_%28character%29.png'),
+ )
+
+ // Base64
+ const image_b64 = "iVB0R..."
+ let res = await b.TestImageInput(
+ Image.fromBase64('image/png', image_b64),
+ )
+
+```
+
+```ruby Ruby (beta)
+we're working on it!
+```
+
+
+
+### Audio
+Calling functions that have `audio` types. See [audio types](/docs/snippets/supported-types)
+
+
+```python Python
+from baml_py import Audio
+from baml_client import b
+
+async def run():
+ # from URL
+ res = await b.TestAudioInput(
+ img=Audio.from_url(
+ "https://upload.wikimedia.org/wikipedia/en/4/4d/Shrek_%28character%29.png"
+ )
+ )
+
+ # Base64
+ b64 = "iVBORw0K...."
+ res = await b.TestAudioInput(
+ img=Audio.from_base64("image/png", b64)
+ )
+```
+
+```typescript TypeScript
+import { b } from '../baml_client'
+import { Audio } from "@boundaryml/baml"
+...
+
+ // URL
+ let res = await b.TestAudioInput(
+ Audio.fromUrl('https://upload.wikimedia.org/wikipedia/en/4/4d/Shrek_%28character%29.mp4'),
+ )
+
+ // Base64
+ const audio_base64 = ".."
+ let res = await b.TestAudioInput(
+ Audio.fromBase64('image/png', audio_base64),
+ )
+
+```
+
+```ruby Ruby (beta)
+we're working on it!
+```
+
diff --git a/docs/docs/calling-baml/set-env-vars.mdx b/docs/docs/calling-baml/set-env-vars.mdx
new file mode 100644
index 000000000..1d2eb91e6
--- /dev/null
+++ b/docs/docs/calling-baml/set-env-vars.mdx
@@ -0,0 +1,81 @@
+---
+title: "Set Environment Variables"
+---
+
+
+The generated BAML client will capture all environment variables when you import it,
+and will not be able to see any environment variables you load after importing
+the BAML client.
+
+Any of the following strategies are compatible with BAML:
+
+ - set environment variables in your `Dockerfile`
+ - set environment variables in your `next.config.js`
+ - set environment variables in your Kubernetes manifest
+ - load environment variables from secrets-store.csi.k8s.io
+ - load environment variables from a secrets provider such as [Infisical](https://infisical.com/) / [Doppler](https://www.doppler.com/)
+ - dotenv (`.env` file) cli (e.g. `dotenv -e .env python myscript.py`)
+
+If BAML doesn't work for your use case, please [contact us]!
+
+
+## Loading env variables in your program
+
+If you do anything to load environment variables in-process - e.g. using a
+`.env` file - make sure to do it before importing the BAML client.
+
+
+
+```python Python
+import dotenv
+dotenv.load_dotenv()
+
+# Wait to import the BAML client until after loading environment variables
+from baml_client import b
+```
+
+```typescript TypeScript
+import dotenv from 'dotenv'
+dotenv.config()
+
+// Wait to import the BAML client until after loading environment variables
+import { b } from 'baml-client'
+```
+
+```ruby Ruby (beta)
+require 'dotenv/load'
+
+# Wait to import the BAML client until after loading environment variables
+require 'baml_client'
+```
+
+
+
+
+
+## Environment Variables in BAML
+
+Environment variables are primarily used in clients to propagate authorization
+credentials, such as API keys, like so:
+
+```rust
+
+client GPT4o {
+ provider baml-openai-chat
+ options {
+ model gpt-4o
+ api_key env.OPENAI_API_KEY
+ }
+}
+```
+
+We do not currently support any other mechanisms for providing authorization
+credentials, including but not limited to:
+
+ - exchanging refresh tokens for ephemeral authorization tokens
+ - fetching credentials from a secret storage service, such as AWS Secrets
+ Manager or HashiCorp Vault
+
+Please [contact us] if you need support for these use cases.
+
+[contact us]: /contact
\ No newline at end of file
diff --git a/docs/docs/calling-baml/streaming.mdx b/docs/docs/calling-baml/streaming.mdx
new file mode 100644
index 000000000..d0acde4f5
--- /dev/null
+++ b/docs/docs/calling-baml/streaming.mdx
@@ -0,0 +1,229 @@
+---
+title: "Streaming BAML Functions"
+---
+
+Now that we know how to [call BAML functions], let's learn how to stream
+BAML function calls.
+
+You can check out more examples in the [BAML Examples] repo.
+
+[call BAML functions]: /docs/calling-baml/calling-functions
+[BAML Examples]: https://github.com/BoundaryML/baml-examples/tree/main
+
+This time, we'll use `function ExtractReceiptInfo(email: string) -> ReceiptInfo` for our example:
+
+
+
+```rust
+class ReceiptItem {
+ name string
+ description string?
+ quantity int
+ price float
+}
+
+class ReceiptInfo {
+ items ReceiptItem[]
+ total_cost float?
+}
+
+function ExtractReceiptInfo(email: string) -> ReceiptInfo {
+ client GPT4o
+ prompt #"
+ Given the receipt below:
+
+ {{ email }}
+
+ {{ ctx.output_format }}
+ "#
+}
+```
+
+
+
+
+
+BAML will generate `b.stream.ExtractReceiptInfo()` for you, which you can use like so:
+
+```python main.py
+import asyncio
+from baml_client import b, partial_types, types
+
+# Using both async iteration and get_final_response() from a stream
+async def example1(receipt: str):
+ stream = b.stream.ExtractReceiptInfo(receipt)
+
+ async for partial in stream:
+ print(f"partial: parsed {len(partial.items)} items (object: {partial})")
+
+ final = await stream.get_final_response()
+ print(f"final: {len(final.items)} items (object: {final})")
+
+# Using only async iteration of a stream
+async def example2(receipt: str):
+ async for partial in b.stream.ExtractReceiptInfo(receipt):
+ print(f"partial: parsed {len(partial.items)} items (object: {partial})")
+
+# Using only get_final_response() of a stream
+#
+# In this case, you should just use b.ExtractReceiptInfo(receipt) instead,
+# which is faster and more efficient.
+async def example3(receipt: str):
+ final = await b.stream.ExtractReceiptInfo(receipt).get_final_response()
+ print(f"final: {len(final.items)} items (object: {final})")
+
+receipt = """
+04/14/2024 1:05 pm
+
+Ticket: 220000082489
+Register: Shop Counter
+Employee: Connor
+Customer: Sam
+Item # Price
+Guide leash (1 Pair) uni UNI
+1 $34.95
+The Index Town Walls
+1 $35.00
+Boot Punch
+3 $60.00
+Subtotal $129.95
+Tax ($129.95 @ 9%) $11.70
+Total Tax $11.70
+Total $141.65
+"""
+
+if __name__ == '__main__':
+ asyncio.run(example1(receipt))
+ asyncio.run(example2(receipt))
+ asyncio.run(example3(receipt))
+```
+
+
+
+BAML will generate `b.stream.ExtractReceiptInfo()` for you, which you can use like so:
+
+```ts main.ts
+import { b } from './baml_client'
+
+// Using both async iteration and getFinalResponse() from a stream
+const example1 = async (receipt: string) => {
+ const stream = b.stream.ExtractReceiptInfo(receipt)
+
+ for await (const partial of stream) {
+ console.log(`partial: ${partial.items?.length} items (object: ${partial})`)
+ }
+
+ const final = await stream.getFinalResponse()
+ console.log(`final: ${final.items.length} items (object: ${final})`)
+}
+
+// Using only async iteration of a stream
+const example2 = async (receipt: string) => {
+ for await (const partial of b.stream.ExtractReceiptInfo(receipt)) {
+ console.log(`partial: ${partial.items?.length} items (object: ${partial})`)
+ }
+}
+
+// Using only getFinalResponse() of a stream
+//
+// In this case, you should just use b.ExtractReceiptInfo(receipt) instead,
+// which is faster and more efficient.
+const example3 = async (receipt: string) => {
+ const final = await b.stream.ExtractReceiptInfo(receipt).getFinalResponse()
+ console.log(`final: ${final.items.length} items (object: ${final})`)
+}
+
+const receipt = `
+04/14/2024 1:05 pm
+
+Ticket: 220000082489
+Register: Shop Counter
+Employee: Connor
+Customer: Sam
+Item # Price
+Guide leash (1 Pair) uni UNI
+1 $34.95
+The Index Town Walls
+1 $35.00
+Boot Punch
+3 $60.00
+Subtotal $129.95
+Tax ($129.95 @ 9%) $11.70
+Total Tax $11.70
+Total $141.65
+`
+
+if (require.main === module) {
+ example1(receipt)
+ example2(receipt)
+ example3(receipt)
+}
+```
+
+
+
+BAML will generate `Baml.Client.stream.ExtractReceiptInfo()` for you,
+which you can use like so:
+
+```ruby main.rb
+require_relative "baml_client/client"
+
+$b = Baml.Client
+
+# Using both iteration and get_final_response() from a stream
+def example1(receipt)
+ stream = $b.stream.ExtractReceiptInfo(receipt)
+
+ stream.each do |partial|
+ puts "partial: #{partial.items&.length} items"
+ end
+
+ final = stream.get_final_response
+ puts "final: #{final.items.length} items"
+end
+
+# Using only iteration of a stream
+def example2(receipt)
+ $b.stream.ExtractReceiptInfo(receipt).each do |partial|
+ puts "partial: #{partial.items&.length} items"
+ end
+end
+
+# Using only get_final_response() of a stream
+#
+# In this case, you should just use BamlClient.ExtractReceiptInfo(receipt) instead,
+# which is faster and more efficient.
+def example3(receipt)
+ final = $b.stream.ExtractReceiptInfo(receipt).get_final_response
+ puts "final: #{final.items.length} items"
+end
+
+receipt = <<~RECEIPT
+ 04/14/2024 1:05 pm
+
+ Ticket: 220000082489
+ Register: Shop Counter
+ Employee: Connor
+ Customer: Sam
+ Item # Price
+ Guide leash (1 Pair) uni UNI
+ 1 $34.95
+ The Index Town Walls
+ 1 $35.00
+ Boot Punch
+ 3 $60.00
+ Subtotal $129.95
+ Tax ($129.95 @ 9%) $11.70
+ Total Tax $11.70
+ Total $141.65
+RECEIPT
+
+if __FILE__ == $0
+ example1(receipt)
+ example2(receipt)
+ example3(receipt)
+end
+```
+
+
+
\ No newline at end of file
diff --git a/docs/docs/home/comparisons/langchain.mdx b/docs/docs/comparisons/langchain.mdx
similarity index 100%
rename from docs/docs/home/comparisons/langchain.mdx
rename to docs/docs/comparisons/langchain.mdx
diff --git a/docs/docs/home/comparisons/marvin.mdx b/docs/docs/comparisons/marvin.mdx
similarity index 90%
rename from docs/docs/home/comparisons/marvin.mdx
rename to docs/docs/comparisons/marvin.mdx
index 55bc0018c..deec96766 100644
--- a/docs/docs/home/comparisons/marvin.mdx
+++ b/docs/docs/comparisons/marvin.mdx
@@ -77,12 +77,7 @@ enum RequestType {
INQUIRY @alias("general inquiry")
}
-function ClassifyRequest {
- input string
- output RequestType
-}
-
-impl {
+function ClassifyRequest(input: string) -> RequestType {
client GPT4 // choose even open source models
prompt #"
You are an expert classifier that always maintains as much semantic meaning
@@ -91,11 +86,10 @@ impl {
TEXT:
---
- Reset my password
+ {{ input }}
---
- LABELS:
- {#print_enum(RequestType)}
+ {{ ctx.output_format }}
The best label for the text is:
"#
@@ -129,5 +123,3 @@ Marvin was a big source of inspiration for us -- their approach is simple and el
BAML does have some limitations we are continuously working on. Here are a few of them:
1. It is a new language. However, it is fully open source and getting started takes less than 10 minutes. We are on-call 24/7 to help with any issues (and even provide prompt engineering tips)
1. Developing requires VSCode. You _could_ use vim and we have workarounds but we don't recommend it.
-1. Explicitly defining system / and user prompts. We have worked with many customers across healthcare and finance and have not seen any issues but we will support this soon.
-1. BAML does not support images. Until this is available you can definitely use BAML alongside other frameworks.
\ No newline at end of file
diff --git a/docs/docs/home/comparisons/pydantic.mdx b/docs/docs/comparisons/pydantic.mdx
similarity index 98%
rename from docs/docs/home/comparisons/pydantic.mdx
rename to docs/docs/comparisons/pydantic.mdx
index 3fae14463..658afce8c 100644
--- a/docs/docs/home/comparisons/pydantic.mdx
+++ b/docs/docs/comparisons/pydantic.mdx
@@ -342,10 +342,7 @@ Here we use a "GPT4" client, but you can use any model. See [client docs](/docs/
{/*
```rust
-function ExtractResume {
- input (resume_text: string)
- output Resume
-}
+
class Education {
school string
@@ -359,18 +356,18 @@ class Resume {
education Education[]
}
-impl version1 {
+function ExtractResume(resume_text: string) -> Resume {
client GPT4
prompt #"
Parse the following resume and return a structured representation of the data in the schema below.
Resume:
---
- {#input.resume_text}
+ {{ input.resume_text }}
---
Output in this JSON format:
- {#print_type(output)}
+ {{ ctx.output_format }}
Output JSON:
"#
diff --git a/docs/docs/get-started/debugging/enable-logging.mdx b/docs/docs/get-started/debugging/enable-logging.mdx
new file mode 100644
index 000000000..a1ebd19f0
--- /dev/null
+++ b/docs/docs/get-started/debugging/enable-logging.mdx
@@ -0,0 +1,10 @@
+You can add logging to determine what the BAML runtime is doing.
+
+To enable logging, set the `BAML_LOG` environment variable:
+```
+BAML_LOG=info
+```
+
+BAML uses the rust `log` crate for logging. You can see more information on how to better configure the `log` crate [here](https://rust-lang-nursery.github.io/rust-cookbook/development_tools/debugging/config_log.html).
+
+Just instead of using the `RUST_LOG` environment variable, use the `BAML_LOG` environment variable.
diff --git a/docs/docs/get-started/debugging/vscode-playground.mdx b/docs/docs/get-started/debugging/vscode-playground.mdx
new file mode 100644
index 000000000..891f7798b
--- /dev/null
+++ b/docs/docs/get-started/debugging/vscode-playground.mdx
@@ -0,0 +1,28 @@
+## General debugging strategy
+- Check [Discord (#announcements channel)](https://discord.gg/BTNBeXGuaS) / [Github](https://github.com/BoundaryML/baml/issues) for any known issues
+- Close the playground and reopen it
+- Try reloading the entire window by pressing `Ctrl + Shift + P` or `Cmd + Shift + P` and typing `Developer: Reload Window`
+- Ensure your VSCode Extension for BAML is up-to-date (It should should it its up-to-date in the Extensions tab in VSCode)
+
+
+- If nothing works, please file an issue on [Github](https://github.com/BoundaryML/baml/issues), ideally with a screenshot of the error and the steps to reproduce it.
+
+## Common Issues
+### No code lens in BAML files
+
+This can happen in two cases:
+1. You have syntax error in some `.baml` file. You can check the error in the `Problems` tab in VSCode or running the `generate` command in the terminal (See [Generate](/docs/calling-baml/generate-baml-client))
+
+2. BAML extension is broken. Please try the tools above!
+
+### BAML extension is not working
+
+### Tests hanging
+
+We've seen sparse repros of this, but closing the playground and reopening it should fix it.
+
+### Tests failing to run
+
+You can debug the actual network request being made by BAML by opening developer tools:
+
+
diff --git a/docs/docs/get-started/deploying/docker.mdx b/docs/docs/get-started/deploying/docker.mdx
new file mode 100644
index 000000000..362bc2732
--- /dev/null
+++ b/docs/docs/get-started/deploying/docker.mdx
@@ -0,0 +1,30 @@
+
+When you develop with BAML, the BAML VScode extension generates a `baml_client` directory (on every save) with all the generated code you need to use your AI functions in your application.
+
+We recommend you add `baml_client` to your `.gitignore` file to avoid committing generated code to your repository, and re-generate the client code when you build and deploy your application.
+
+You _could_ commit the generated code if you're starting out to not deal with this, just make sure the VSCode extension version matches your baml package dependency version (e.g. `baml-py` for python and `@boundaryml/baml` for TS) so there are no compatibility issues.
+
+To build your client you can use the following command. See also [Generating Clients](/docs/calling-baml/generate-baml-client):
+
+
+
+```dockerfile python Dockerfile
+RUN baml-cli generate --from path-to-baml_src
+```
+
+```dockerfile TypeScript Dockerfile
+# Do this early on in the dockerfile script before transpiling to JS
+RUN npx baml-cli generate --from path-to-baml_src
+```
+
+```dockerfile Ruby Dockerfile
+RUN bundle add baml
+RUN bundle exec baml-cli generate --from path/to/baml_src
+```
+
+
+
+### Current limitations
+- We do not yet support `alpine` images. BAML will not properly build for those platforms. Let us know if you need to support lighter weight alpine images, and we'll prioritize it.
+
diff --git a/docs/docs/get-started/deploying/nextjs.mdx b/docs/docs/get-started/deploying/nextjs.mdx
new file mode 100644
index 000000000..40e542651
--- /dev/null
+++ b/docs/docs/get-started/deploying/nextjs.mdx
@@ -0,0 +1,41 @@
+To deploy a NextJS with BAML, take a look at the starter template:
+https://github.com/BoundaryML/baml-examples/tree/main/nextjs-starter
+
+All you need is to modify the `nextjs.config.mjs` to allow BAML to run properly:
+```JS
+/** @type {import('next').NextConfig} */
+const nextConfig = {
+ experimental: {
+ serverComponentsExternalPackages: ["@boundaryml/baml"],
+ },
+ webpack: (config, { dev, isServer, webpack, nextRuntime }) => {
+ config.module.rules.push({
+ test: /\.node$/,
+ use: [
+ {
+ loader: "nextjs-node-loader",
+ options: {
+ outputPath: config.output.path,
+ },
+ },
+ ],
+ });
+
+ return config;
+ },
+};
+
+export default nextConfig;
+```
+
+and change your `package.json` to build the baml client automatically (and enable logging in dev mode if you want):
+
+```json
+ "scripts": {
+ "dev": "BAML_LOG=info next dev",
+ "build": "pnpm generate && next build",
+ "start": "next start",
+ "lint": "next lint",
+ "generate": "baml-cli generate --from ./baml_src"
+ },
+```
\ No newline at end of file
diff --git a/docs/docs/get-started/interactive-demos.mdx b/docs/docs/get-started/interactive-demos.mdx
new file mode 100644
index 000000000..f6a627cc7
--- /dev/null
+++ b/docs/docs/get-started/interactive-demos.mdx
@@ -0,0 +1,19 @@
+---
+title: "Interactive Demos"
+---
+
+## Interactive playground
+You can try BAML online over at [Prompt Fiddle](https://www.promptfiddle.com)
+
+
+## Examples built with BAML
+
+You can find the code here: https://github.com/BoundaryML/baml-examples/tree/main/nextjs-starter
+
+
+| Example | Link |
+| - | - |
+| Streaming Simple Objects | https://baml-examples.vercel.app/examples/stream-object |
+| RAG + Citations | https://baml-examples.vercel.app/examples/rag |
+| Generative UI / Streaming charts | https://baml-examples.vercel.app/examples/book-analyzer |
+| Getting a recipe | https://baml-examples.vercel.app/examples/get-recipe |
diff --git a/docs/docs/get-started/quickstart/editors-other.mdx b/docs/docs/get-started/quickstart/editors-other.mdx
new file mode 100644
index 000000000..23ca4f837
--- /dev/null
+++ b/docs/docs/get-started/quickstart/editors-other.mdx
@@ -0,0 +1,11 @@
+---
+title: "Other Editors"
+---
+
+We currently don't have support for other editors, but we are working on it. If you have a favorite editor that you would like to see support for, please let us know by [opening an issue](https://github.com/boundaryml/baml/issues/new?title=Add%20%20Editor%20Support&body=Hi%21%20I%20use%20%3Ceditor%3E%20please%20add%20support.).
+
+To get around this you can:
+
+1. Use [Prompt Fiddle](https://www.promptfiddle.com) to write your code and then copy it to your editor.
+
+2. Use the CLI. See [Generate the BAML Client](/docs/calling-baml/generate-baml-client)
diff --git a/docs/docs/get-started/quickstart/editors-vscode.mdx b/docs/docs/get-started/quickstart/editors-vscode.mdx
new file mode 100644
index 000000000..799572744
--- /dev/null
+++ b/docs/docs/get-started/quickstart/editors-vscode.mdx
@@ -0,0 +1,72 @@
+---
+title: "VSCode"
+---
+
+We provide a BAML VSCode extension: https://marketplace.visualstudio.com/items?itemName=Boundary.baml-extension
+
+
+| Feature | Supported |
+|---------|-----------|
+| Syntax highlighting for BAML files | ✅ |
+| Code snippets for BAML | ✅ |
+| LLM playground for testing BAML functions | ✅ |
+| Jump to definition for BAML files | ✅ |
+| Jump to definition between Python/TS files and BAML files | ✅ |
+| Auto generate `baml_client` on save | ✅ |
+| BAML formatter | ❌ |
+
+
+ For any issues, see the [troubleshooting](/docs/get-started/debugging/vscode-playground) page.
+
+
+## Opening BAML Playground
+
+Once you open a `.baml` file, in VSCode, you should see a small button over every BAML function: `Open Playground`.
+
+
+
+Or type `BAML Playground` in the VSCode Command Bar (`CMD + Shift + P` or `CTRL + Shift + P`) to open the playground.
+
+
+
+## Setting Env Variables
+
+Click on the `Settings` button in top right of the playground and set the environment variables.
+
+It should have an indicator saying how many unset variables are there.
+
+
+
+The playground should persist the environment variables between closing and opening VSCode.
+
+
+ You can set environment variables lazily. If anything is unset you'll get an error when you run the function.
+
+
+
+ Environment Variables are stored in VSCode's local storage! We don't save any additional data to disk, or send them across the network.
+
+
+
+## Running Tests
+
+- Click on the `Run All Tests` button in the playground.
+
+- Press the `▶️` button next to an individual test case to run that just that test case.
+
+
+## Switching Functions
+
+The playground will automatically switch to the function you're currently editing.
+
+To manually change it, click on the current function name in the playground (next to the dropdown) and search for your desired function.
+
+## Switching Test Cases
+
+The test case with the highlighted background is the currently rendered test case. Clicking on a different test case will render that test case.
+
+
+
+You can toggle between seeing the results of all test cases or all test cases for the current function.
+
+
diff --git a/docs/docs/get-started/quickstart/python.mdx b/docs/docs/get-started/quickstart/python.mdx
new file mode 100644
index 000000000..8dee33156
--- /dev/null
+++ b/docs/docs/get-started/quickstart/python.mdx
@@ -0,0 +1,73 @@
+Here's a sample repository:
+https://github.com/BoundaryML/baml-examples/tree/main/python-fastapi-starter
+
+To set up BAML in python do the following:
+
+
+
+ https://marketplace.visualstudio.com/items?itemName=boundary.BAML
+
+ - syntax highlighting
+ - testing playground
+ - prompt previews
+
+
+ In your VSCode User Settings, highly recommend adding this to get better autocomplete for python in general, not just BAML.
+
+ ```json
+ {
+ "python.analysis.typeCheckingMode": "basic"
+ }
+ ```
+
+
+
+ ```bash
+ pip install baml-py
+ ```
+
+
+ This will give you some starter BAML code in a `baml_src` directory.
+
+ ```bash
+ baml-cli init
+ ```
+
+
+
+ This command will help you convert `.baml` files to `.py` files. Everytime you modify your `.baml` files,
+ you must re-run this command, and regenerate the `baml_client` folder.
+
+
+ If you download our [VSCode extension](https://marketplace.visualstudio.com/items?itemName=Boundary.baml-extension), it will automatically generate `baml_client` on save!
+
+
+ ```bash
+ baml-cli generate
+ ```
+
+
+ If `baml_client` doesn't exist, make sure to run the previous step!
+
+ ```python main.py
+ from baml_client import b
+ from baml_client.types import Resume
+
+ async def example(raw_resume: str) -> Resume:
+ # BAML's internal parser guarantees ExtractResume
+ # to be always return a Resume type
+ response = await b.ExtractResume(raw_resume)
+ return response
+
+ async def example_stream(raw_resume: str) -> Resume:
+ stream = b.stream.ExtractResume(raw_resume)
+ async for msg in stream:
+ print(msg) # This will be a PartialResume type
+
+ # This will be a Resume type
+ final = stream.get_final_response()
+
+ return final
+ ```
+
+
\ No newline at end of file
diff --git a/docs/docs/get-started/quickstart/ruby.mdx b/docs/docs/get-started/quickstart/ruby.mdx
new file mode 100644
index 000000000..f1ed0bdf8
--- /dev/null
+++ b/docs/docs/get-started/quickstart/ruby.mdx
@@ -0,0 +1,73 @@
+Here's a sample repository: https://github.com/BoundaryML/baml-examples/tree/main/ruby-example
+
+To set up BAML in ruby do the following:
+
+
+
+ https://marketplace.visualstudio.com/items?itemName=boundary.BAML
+
+ - syntax highlighting
+ - testing playground
+ - prompt previews
+
+
+
+ ```bash
+ bundle init
+ bundle add baml sorbet-runtime sorbet-struct-comparable
+ ```
+
+
+ This will give you some starter BAML code in a `baml_src` directory.
+
+ ```bash
+ bundle exec baml-cli init
+ ```
+
+
+
+
+ This command will help you convert `.baml` files to `.rb` files. Everytime you modify your `.baml` files,
+ you must re-run this command, and regenerate the `baml_client` folder.
+
+
+ If you download our [VSCode extension](https://marketplace.visualstudio.com/items?itemName=Boundary.baml-extension), it will automatically generate `baml_client` on save!
+
+
+ ```bash
+ bundle exec baml-cli generate
+ ```
+
+
+
+ If `baml_client` doesn't exist, make sure to run the previous step!
+
+ ```ruby main.rb
+ require_relative "baml_client/client"
+
+ def example(raw_resume)
+ # r is an instance of Baml::Types::Resume, defined in baml_client/types
+ r = Baml.Client.ExtractResume(resume: raw_resume)
+
+ puts "ExtractResume response:"
+ puts r.inspect
+ end
+
+ def example_stream(raw_resume)
+ stream = Baml.Client.stream.ExtractResume(resume: raw_resume)
+
+ stream.each do |msg|
+ # msg is an instance of Baml::PartialTypes::Resume
+ # defined in baml_client/partial_types
+ puts msg.inspect
+ end
+
+ stream.get_final_response
+ end
+
+ example 'Grace Hopper created COBOL'
+ example_stream 'Grace Hopper created COBOL'
+ ```
+
+
+
diff --git a/docs/docs/get-started/quickstart/typescript.mdx b/docs/docs/get-started/quickstart/typescript.mdx
new file mode 100644
index 000000000..7e16b0da2
--- /dev/null
+++ b/docs/docs/get-started/quickstart/typescript.mdx
@@ -0,0 +1,90 @@
+Here's a sample repository:
+https://github.com/BoundaryML/baml-examples/tree/main/nextjs-starter
+
+To set up BAML in typescript do the following:
+
+
+
+ https://marketplace.visualstudio.com/items?itemName=boundary.BAML
+
+ - syntax highlighting
+ - testing playground
+ - prompt previews
+
+
+
+ ```bash npm
+ npm install @boundaryml/baml
+ ```
+
+ ```bash pnpm
+ pnpm add @boundaryml/baml
+ ```
+
+ ```bash yarn
+ yarn add @boundaryml/baml
+ ```
+
+
+
+ This will give you some starter BAML code in a `baml_src` directory.
+
+ ```bash npm
+ npx baml-cli init
+ ```
+
+ ```bash pnpm
+ pnpx baml-cli init
+ ```
+
+ ```bash yarn
+ yarn baml-cli init
+ ```
+
+
+
+
+ This command will help you convert `.baml` files to `.ts` files. Everytime you modify your `.baml` files,
+ you must re-run this command, and regenerate the `baml_client` folder.
+
+
+ If you download our [VSCode extension](https://marketplace.visualstudio.com/items?itemName=Boundary.baml-extension), it will automatically generate `baml_client` on save!
+
+
+ ```json package.json
+ {
+ "scripts": {
+ // Add a new command
+ "baml-generate": "baml-cli generate",
+ // Always call baml-generate on every build.
+ "build": "npm run baml-generate && tsc --build",
+ }
+ }
+ ```
+
+
+ If `baml_client` doesn't exist, make sure to run `npm run baml-generate`
+
+ ```typescript index.ts
+ import {b} from "baml_client"
+ import type {Resume} from "baml_client/types"
+
+ async function Example(raw_resume: string): Resume {
+ // BAML's internal parser guarantees ExtractResume
+ // to be always return a Resume type
+ const response = await b.ExtractResume(raw_resume);
+ return response;
+ }
+
+ async function ExampleStream(raw_resume: string): Resume {
+ const stream = b.stream.ExtractResume(raw_resume);
+ for await (const msg of stream) {
+ console.log(msg) // This will be a Partial type
+ }
+
+ // This is guaranteed to be a Resume type.
+ return await stream.get_final_response();
+ }
+ ```
+
+
\ No newline at end of file
diff --git a/docs/docs/home/overview.mdx b/docs/docs/get-started/what-is-baml.mdx
similarity index 70%
rename from docs/docs/home/overview.mdx
rename to docs/docs/get-started/what-is-baml.mdx
index 5593c2ca6..72ad88aa9 100644
--- a/docs/docs/home/overview.mdx
+++ b/docs/docs/get-started/what-is-baml.mdx
@@ -1,13 +1,13 @@
---
title: What is BAML?
-"og:description": BAML is a configuration file format to write better and cleaner LLM functions.
+"og:description": BAML is a domain-specific language to get structured data from LLMs
"og:image": https://mintlify.s3-us-west-1.amazonaws.com/gloo/images/v3/AITeam.png
"twitter:image": https://mintlify.s3-us-west-1.amazonaws.com/gloo/images/v3/AITeam.png
---
-An LLM function is a prompt template with some defined input variables, and a specific output type like a class, enum, union, optional string, etc.
+**BAML is a domain-specific language to write and test LLM functions.**
-**BAML is a configuration file format to write better and cleaner LLM functions.**
+In BAML, prompts are treated like functions. An LLM function is a prompt template with some defined input variables, and a specific output type like a class, enum, union, optional string, etc.
With BAML you can write and test a complex LLM function in 1/10 of the time it takes to setup a python LLM testing environment.
@@ -17,12 +17,16 @@ With BAML you can write and test a complex LLM function in 1/10 of the time it t
Share your creations and ask questions in our [Discord](https://discord.gg/BTNBeXGuaS).
+## Demo video
+
+
+
## Features
### Language features
-- **Python and Typescript support**: Plug-and-play BAML with other languages
+- **Python / Typescript / Ruby support**: Plug-and-play BAML with other languages
- **JSON correction**: BAML fixes bad JSON returned by LLMs (e.g. unquoted keys, newlines, comments, extra quotes, and more)
-- **Wide model support**: Ollama, Openai, Anthropic. Tested on small models like Llama2
+- **Wide model support**: Ollama, Openai, Anthropic, Gemini. Tested on small models like Llama2
- **Streaming**: Stream structured partial outputs
- **Resilience and fallback features**: Add retries, redundancy, to your LLM calls
@@ -51,4 +55,4 @@ Share your creations and ask questions in our [Discord](https://discord.gg/BTNBe
- [BAML + FastAPI + Streaming](https://github.com/BoundaryML/baml-examples/tree/main/fastapi-starter)
## First steps
-We recommend checking the examples in [PromptFiddle.com](https://promptfiddle.com). Once you're ready to start, [install the toolchain](./installation) and read the [guides](../guides/overview).
+We recommend checking the examples in [PromptFiddle.com](https://promptfiddle.com). Once you're ready to start, [install the toolchain](/docs/get-started/quickstart/python) and read the [guides](/docs/calling-baml/calling-functions).
diff --git a/docs/docs/guides/hello_world/baml-project-structure.mdx b/docs/docs/guides/hello_world/baml-project-structure.mdx
deleted file mode 100644
index 84643385d..000000000
--- a/docs/docs/guides/hello_world/baml-project-structure.mdx
+++ /dev/null
@@ -1,71 +0,0 @@
----
-title: "BAML Project Structure"
----
-
-At a high level, you will define your AI prompts and interfaces in BAML files.
-The BAML compiler will then generate Python or Typescript code for you to use in
-your application, depending on the generators configured in your `main.baml`:
-
-```rust main.baml
-generator MyGenerator{
- output_type typescript
- output_dir "../"
-}
-```
-
-Here is the typical project structure:
-
-```bash
-.
-├── baml_client/ # Generated code
-├── baml_src/ # Prompts and baml tests live here
-│ └── foo.baml
-# The rest of your project (not generated nor used by BAML)
-├── app/
-│ ├── __init__.py
-│ └── main.py
-└── pyproject.toml
-
-```
-
-1. `baml_src/` is where you write your BAML files with the AI
-function declarations, prompts, retry policies, etc. It also contains
-[generator](/docs/syntax/generator) blocks which configure how and where to
-transpile your BAML code.
-
-2. `baml_client/` is where the BAML compiler will generate code for you,
-based on the types and functions you define in your BAML code. Here's how you'd access the generated functions from baml_client:
-
-
-```python Python
-from baml_client import baml as b
-
-async def use_llm_for_task():
- await b.CallMyLLM()
-```
-
-```typescript TypeScript
-import b from '@/baml_client'
-
-const use_llm_for_task = async () => {
- await b.CallMyLLM();
-};
-```
-
-
-
-
- **You should never edit any files inside baml_client directory** as the whole
- directory gets regenerated on every `baml build` (auto runs on save if using
- the VSCode extension).
-
-
-
- If you ever run into any issues with the generated code (like merge
- conflicts), you can always delete the `baml_client` directory and it will get
- regenerated automatically on save.
-
-
-### imports
-
-BAML by default has global imports. Every entity declared in any `.baml` file is available to all other `.baml` files under the same `baml_src` directory. You **can** have multiple `baml_src` directories, but no promises on how the VSCode extension will behave (yet).
diff --git a/docs/docs/guides/hello_world/testing-ai-functions.mdx b/docs/docs/guides/hello_world/testing-ai-functions.mdx
deleted file mode 100644
index 7fac24b87..000000000
--- a/docs/docs/guides/hello_world/testing-ai-functions.mdx
+++ /dev/null
@@ -1,22 +0,0 @@
----
-title: "Testing AI functions"
----
-
-
-One important way to ensure your AI functions are working as expected is to write unit tests. This is especially important when you're working with AI functions that are used in production, or when you're working with a team.
-
-To test functions:
-1. Install the VSCode extension
-2. Create a test in any .baml file:
-```rust
-test MyTest {
- functions [ExtractResume]
- args {
- resume_text "hello"
- }
-}
-
-```
-3. Run the test in the VSCode extension!
-
-We have more capabilities like assertions coming soon!
\ No newline at end of file
diff --git a/docs/docs/guides/hello_world/writing-ai-functions.mdx b/docs/docs/guides/hello_world/writing-ai-functions.mdx
deleted file mode 100644
index e3584ab5f..000000000
--- a/docs/docs/guides/hello_world/writing-ai-functions.mdx
+++ /dev/null
@@ -1,145 +0,0 @@
----
-title: "BAML AI functions in 2 minutes"
----
-
-
-### Pre-requisites
-
-Follow the [installation](/v3/home/installation) instructions.
-
-{/* The starting project structure will look something like this: */}
-{/* */}
-
-## Overview
-
-Before you call an LLM, ask yourself what kind of input or output youre
-expecting. If you want the LLM to generate text, then you probably want a
-string, but if you're trying to get it to collect user details, you may want it
-to return a complex type like `UserDetails`.
-
-Thinking this way can help you decompose large complex prompts into smaller,
-more measurable functions, and will also help you build more complex workflows
-and agents.
-
-# Extracting a resume from text
-
-The best way to learn BAML is to run an example in our web playground -- [PromptFiddle.com](https://promptfiddle.com).
-
-But at a high-level, BAML is simple to use -- prompts are built using [Jinja syntax](https://jinja.palletsprojects.com/en/3.1.x/) to make working with strings easier. But we extended jinja to add type-support, static analysis of your template variables, and we have a real-time preview of prompts in the BAML VSCode extension no matter how much logic your prompts use.
-
-Here's an example from PromptFiddle:
-
-```rust baml_src/main.baml
-client GPT4Turbo {
- provider openai
- options {
- model gpt-4-turbo
- api_key env.OPENAI_API_KEY
- }
-}
-// Declare the Resume type we want the AI function to return
-class Resume {
- name string
- education Education[] @description("Extract in the same order listed")
- skills string[] @description("Only include programming languages")
-}
-
-class Education {
- school string
- degree string
- year int
-}
-
-// Declare the function signature, with the prompt that will be used to make the AI function work
-function ExtractResume(resume_text: string) -> Resume {
- // An LLM client we define elsewhere, with some parameters and our API key
- client GPT4Turbo
-
- // The prompt uses Jinja syntax
- prompt #"
- Parse the following resume and return a structured representation of the data in the schema below.
-
- Resume:
- ---
- {{ resume_text }}
- ---
-
- {# special macro to print the output instructions. #}
- {{ ctx.output_format }}
-
- JSON:
- "#
-}
-```
-That's it! If you use the VSCode extension, everytime you save this .baml file, it will convert this configuration file into a usable Python or TypeScript function in milliseconds, with full types.
-
-All your types become Pydantic models in Python, or type definitions in Typescript (soon we'll support generating Zod types).
-
-
-## 2. Usage in Python or TypeScript
-
-Our VSCode extension automatically generates a **baml_client** in the language of choice. (Click the tabs for Python or TypeScript)
-
-
-
-```python Python
-from baml_client import baml as b
-# BAML types get converted to Pydantic models
-from baml_client.types import Resume
-import asyncio
-
-async def main():
- resume_text = """Jason Doe
-Python, Rust
-University of California, Berkeley, B.S.
-in Computer Science, 2020
-Also an expert in Tableau, SQL, and C++
-"""
-
- # this function comes from the autogenerated "baml_client".
- # It calls the LLM you specified and handles the parsing.
- resume = await b.ExtractResume(resume_text)
-
- # Fully type-checked and validated!
- assert isinstance(resume, Resume)
-
-
-if __name__ == "__main__":
- asyncio.run(main())
-```
-
-```typescript TypeScript
-import b from 'baml_client'
-
-async function main() {
- const resume_text = `Jason Doe
-Python, Rust
-University of California, Berkeley, B.S.
-in Computer Science, 2020
-Also an expert in Tableau, SQL, and C++
-`
-
- // this function comes from the autogenerated "baml_client".
- // It calls the LLM you specified and handles the parsing.
- const resume = await b.ExtractResume(resume_text)
-
- // Fully type-checked and validated!
- assert resume.name === "Jason Doe"
-}
-
-if (require.main === module) {
- main();
-}
-```
-
-
-
-
- The BAML client exports async versions of your functions, so you can parallelize things easily if you need to. To run async functions sequentially you can easily just wrap them in the `asyncio.run(....)`.
-
- Let us know if you want synchronous versions of your functions instead!
-
-
-## Further reading
-- Browse more PromptFiddle [examples](https://promptfiddle.com)
-- See other types of [function signatures](/docs/syntax/function) possible in BAML.
\ No newline at end of file
diff --git a/docs/docs/guides/improve_results/diagnose.mdx b/docs/docs/guides/improve_results/diagnose.mdx
deleted file mode 100644
index 97da6264e..000000000
--- a/docs/docs/guides/improve_results/diagnose.mdx
+++ /dev/null
@@ -1,16 +0,0 @@
----
-title: "Improve my prompt automatically"
----
-
-Use **Boundary Studio** to automatically improve your prompt by using the **Diagnose** feature! We use **GPT-4 powered analysis** to provide you with improvements you can make to your prompt. We aim to incorporate all the best learnings we've acquired from working with many different customers and models.
-
-We have more improvements here planned, like different suggestions depending on your model being used and task type.
-
-To access it:
-1. Click on the "comment" icon on one of the requests.
-2. Click on the "Diagnose" tab.
-
-
-
-
-This feature is limited for users on the free tier, and available as many times as needed for paid users.
diff --git a/docs/docs/guides/improve_results/fine_tune.mdx b/docs/docs/guides/improve_results/fine_tune.mdx
deleted file mode 100644
index d7c5d3ca5..000000000
--- a/docs/docs/guides/improve_results/fine_tune.mdx
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: "Fine-tune a model using my production data"
----
-
-Reach out to us on Discord if you want to improve performance, reduce costs or latencies using fine-tuned models! We are working on seamless integrations with fine-tuning platforms.
\ No newline at end of file
diff --git a/docs/docs/guides/overview.mdx b/docs/docs/guides/overview.mdx
deleted file mode 100644
index 1e8b2eccd..000000000
--- a/docs/docs/guides/overview.mdx
+++ /dev/null
@@ -1,50 +0,0 @@
----
-title: "Table of contents"
----
-
-These tutorials assume you've already done the [Learn BAML](/docs/guides/hello_world/level0) tutorials first and have a hang of some of the basics.
-
-Ping us on [Discord](https://discord.gg/BTNBeXGuaS) if you have any questions!
-
\ No newline at end of file
diff --git a/docs/docs/guides/prompt_engineering/chat-prompts.mdx b/docs/docs/guides/prompt_engineering/chat-prompts.mdx
deleted file mode 100644
index 3e25606f5..000000000
--- a/docs/docs/guides/prompt_engineering/chat-prompts.mdx
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: "System vs user prompts"
----
-
-See [PromptFiddle demo](https://promptfiddle.com/chat-roles)
\ No newline at end of file
diff --git a/docs/docs/guides/prompt_engineering/conditional_rendering.mdx b/docs/docs/guides/prompt_engineering/conditional_rendering.mdx
deleted file mode 100644
index 8f70ece36..000000000
--- a/docs/docs/guides/prompt_engineering/conditional_rendering.mdx
+++ /dev/null
@@ -1,7 +0,0 @@
----
-title: "Conditionally generate the prompt based on the input variables"
----
-
-Prompts use Jinja syntax to render variables. You can use any jinja syntax you like.
-
-Examples coming soon!
\ No newline at end of file
diff --git a/docs/docs/guides/prompt_engineering/serialize_complex_input.mdx b/docs/docs/guides/prompt_engineering/serialize_complex_input.mdx
deleted file mode 100644
index f82d0b4a5..000000000
--- a/docs/docs/guides/prompt_engineering/serialize_complex_input.mdx
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: "Customize input variables"
----
-
-Examples coming soon!
\ No newline at end of file
diff --git a/docs/docs/guides/prompt_engineering/serialize_list.mdx b/docs/docs/guides/prompt_engineering/serialize_list.mdx
deleted file mode 100644
index b5bda6733..000000000
--- a/docs/docs/guides/prompt_engineering/serialize_list.mdx
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: "Serialize a List of chat messages into a prompt"
----
-
-Example coming soon!
\ No newline at end of file
diff --git a/docs/docs/guides/prompt_engineering/strategies.mdx b/docs/docs/guides/prompt_engineering/strategies.mdx
deleted file mode 100644
index 820db3911..000000000
--- a/docs/docs/guides/prompt_engineering/strategies.mdx
+++ /dev/null
@@ -1 +0,0 @@
-# TODO: add symbol tuning here
\ No newline at end of file
diff --git a/docs/docs/guides/resilience/fallback.mdx b/docs/docs/guides/resilience/fallback.mdx
deleted file mode 100644
index 6fec86273..000000000
--- a/docs/docs/guides/resilience/fallback.mdx
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: "Fall-back to another model on failure"
----
-
-Checkout the [Fallback API reference](/docs/syntax/client/redundancy) to learn how to make a BAML client fall-back to a different LLM on failure.
\ No newline at end of file
diff --git a/docs/docs/guides/resilience/retries.mdx b/docs/docs/guides/resilience/retries.mdx
deleted file mode 100644
index 26b885ce5..000000000
--- a/docs/docs/guides/resilience/retries.mdx
+++ /dev/null
@@ -1,5 +0,0 @@
----
-title: "Add retries to my AI function (and different retry policies)."
----
-
-Checkout the [retry_policy reference](/docs/syntax/client/retry) to add retries to your AI function.
\ No newline at end of file
diff --git a/docs/docs/guides/streaming/streaming.mdx b/docs/docs/guides/streaming/streaming.mdx
deleted file mode 100644
index c96e4d8d2..000000000
--- a/docs/docs/guides/streaming/streaming.mdx
+++ /dev/null
@@ -1,46 +0,0 @@
----
-title: "Streaming structured data"
----
-
-### Streaming partial objects
-The following returns an object that slowly gets filled in as the response comes in. This is useful if you want to start processing the response before it's fully complete.
-You can stream anything from a `string` output type, to a complex object.
-
-Example:
-```
-{"prop1": "hello"}
-{"prop1": "hello how are you"}
-{"prop1": "hello how are you", "prop2": "I'm good, how are you?"}
-{"prop1": "hello how are you", "prop2": "I'm good, how are you?", "prop3": "I'm doing great, thanks for asking!"}
-```
-
-### Python
-```python FastAPI
-from baml_client import b
-
-@app.get("/extract_resume")
-async def extract_resume(resume_text: str):
- async def stream_resume(resume):
- stream = b.stream.ExtractResume(resume_text)
- async for chunk in stream:
- yield str(chunk.model_dump_json()) + "\n"
-
- return StreamingResponse(stream_resume(resume), media_type="text/plain")
-```
-
-
-### TypeScript
-```typescript
-import { b } from '../baml_client'; // or whatever path baml_client is in
-
-export async function streamText() {
- const stream = b.stream.MyFunction(MyInput(...));
- for await (const output of stream) {
- console.log(`streaming: ${output}`); // this is the output type of my function
- }
-
- const finalOutput = await stream.getFinalResponse();
- console.log(`final response: ${finalOutput}`);
-}
-```
-
diff --git a/docs/docs/guides/testing/test_with_assertions.mdx b/docs/docs/guides/testing/test_with_assertions.mdx
deleted file mode 100644
index be3e76f59..000000000
--- a/docs/docs/guides/testing/test_with_assertions.mdx
+++ /dev/null
@@ -1,96 +0,0 @@
----
-title: "Evaluate results with assertions or using LLM Evals"
----
-
-
-
-# Python guide
-To add assertions to your tests, or add more complex testing scenarios, you can use pytest to test your functions, since Playground BAML tests don't currently support assertions.
-
-### Example
-```python test_file.py
-from baml_client import baml as b
-from baml_client.types import Email
-from baml_client.testing import baml_test
-import pytest
-
-# Run `poetry run pytest -m baml_test` in this directory.
-# Setup Boundary Studio to see test details!
-@pytest.mark.asyncio
-async def test_get_order_info():
- order_info = await b.GetOrderInfo(Email(
- subject="Order #1234",
- body="Your order has been shipped. It will arrive on 1st Jan 2022. Product: iPhone 13. Cost: $999.99"
- ))
-
- assert order_info.cost == 999.99
-```
-
- Make sure your test file, the Test class AND/or the test function is prefixed with `Test` or `test` respectively. Otherwise, pytest will not pick up your tests. E.g. `test_foo.py`, `TestFoo`, `test_foo`
-
-
-
-Run `pytest -k 'order_info'` to run this test. To show have pytest show print statements add the `-s` flag.
-
-
- Make sure you are running these commands from your python virtual environment
- (or **`poetry shell`** if you use poetry)
-
-
-For more advanced testing scenarios, helpful commands, and gotchas, check out the [Advanced Guide](./advanced_testing_guide)
-
-
-
-### Using an LLM eval
-You can also declare a new BAML function that you can use in your tests to validate results.
-
-This is helpful for testing more ambiguous LLM free-form text generations. You can measure anything from sentiment, to the tone of of the text.
-
-For example, the following GPT-4-powered function can be used in your tests to assert that a given generated sentence is professional-sounding:
-
-```rust
-enum ProfessionalismRating {
- GREAT
- OK
- BAD
-}
-
-function ValidateProfessionalism {
- // The string to validate
- input string
- output ProfessionalismRating
-}
-
-impl v1 {
- client GPT4
- prompt #"
- Is this text professional-sounding?
-
- Use the following scale:
- {#print_enum(ProfessionalismRating)}
-
- Sentence: {#input}
-
- ProfessionalismRating:
- "#
-}
-```
-
-```python
-from baml_client import baml as b
-from baml_client.types import Email, ProfessionalismRating
-from baml_client.testing import baml_test
-
-@baml_test
-async def test_message_professionalism():
- order_info = await b.GetOrderInfo(Email(
- subject="Order #1234",
- body="Your order has been shipped. It will arrive on 1st Jan 2022. Product: iPhone 13. Cost: $999.99"
- ))
-
- assert order_info.cost == 999.99
-
- professionalism_rating = await b.ValidateProfessionalism(order_info.body)
- assert professionalism_rating == b.ProfessionalismRating.GREAT
-```
-
diff --git a/docs/docs/guides/testing/unit_test.mdx b/docs/docs/guides/testing/unit_test.mdx
deleted file mode 100644
index 808b45880..000000000
--- a/docs/docs/guides/testing/unit_test.mdx
+++ /dev/null
@@ -1,11 +0,0 @@
----
-title: "Test an AI function"
----
-
-
-There are two types of tests you may want to run on your AI functions:
-
-- Unit Tests: Tests a single AI function (using the playground)
-- Integration Tests: Tests a pipeline of AI functions and potentially buisness logic
-
-For integration tests, see the [Integration Testing Guide](/docs/guides/testing/test_with_assertions).
\ No newline at end of file
diff --git a/docs/docs/home/baml-in-2-min.mdx b/docs/docs/home/baml-in-2-min.mdx
deleted file mode 100644
index 6b129cc18..000000000
--- a/docs/docs/home/baml-in-2-min.mdx
+++ /dev/null
@@ -1,4 +0,0 @@
----
-title: "BAML in 2 minutes"
-url: "/docs/guides/hello_world/writing-ai-functions"
----
diff --git a/docs/docs/home/faq.mdx b/docs/docs/home/faq.mdx
deleted file mode 100644
index a890a3943..000000000
--- a/docs/docs/home/faq.mdx
+++ /dev/null
@@ -1,36 +0,0 @@
----
-title: FAQs
----
-
-
-
-You don't! BAML files get converted into Python or Typescript using the BAML CLI. You can run the generated code locally or in the cloud.
-
-
-Contact us at contact@boundaryml.com for more details. We have a free tier available.
-
-
-Nope. We do not proxy LLM calls for you. BAML just generates a bunch of python or TypeScript code you can run on your machine. If you opt-in to our logging and analytics we only send logs to our backend. Deploying your app is like deploying any other python/TS application.
-
-
-
-BAML isn't a full-fledged language -- it's more of a configuration file / templating language. You can load it into your code as if it were YAML. Think of it as an extension of [Jinja](https://jinja.palletsprojects.com/en/3.1.x/) or Handlebars.
-
-Earlier we tried making a YAML-based sdk, and even a Python SDK, but they were not powerful enough.
-
-
-
- We are working on more tools like [PromptFiddle.com](https://promptfiddle.com) to make it easier to edit prompts for non-engineers, but we want to make sure all your prompts can be backed by a file in your codebase and versioned by Git.
-
-
-
- Typescript, Python, and Ruby
- Contact us for more
-
-
-
-
- The VSCode extension and BAML are free to use (Open Source as well!). We only charge for usage of
- Boundary Studio, our observability platform. Contact us for pricing. We do have a hobbyist tier and a startup tier available.
-
-
diff --git a/docs/docs/home/installation.mdx b/docs/docs/home/installation.mdx
deleted file mode 100644
index 819e00d3a..000000000
--- a/docs/docs/home/installation.mdx
+++ /dev/null
@@ -1,53 +0,0 @@
----
-title: Installation
----
-
-
-
- [https://marketplace.visualstudio.com/items?itemName=boundary.BAML](https://marketplace.visualstudio.com/items?itemName=boundary.Baml-extension)
-
- If you are using python, [enable typechecking in VSCode's](https://code.visualstudio.com/docs/python/settings-reference#_python-language-server-settings) `settings.json`:
- ```
- "python.analysis.typecheckingMode": "basic"
- ```
-
-
-
- ```bash Python
- pip install baml-py
- ```
-
- ```bash Typescript
- npm install @boundaryml/baml
- ```
-
-
-
-
- ```bash Python
- # Should be installed via pip install baml-py
- baml-cli init
- ```
-
- ```bash Typescript (npx)
- npx baml-cli init
- ```
-
- ```bash Typescript (pnpx)
- pnpx baml-cli init
- ```
-
-
-
- - [PromptFiddle](https://promptfiddle.com): Interactive examples to learn BAML. (recommended)
- - [BAML Tutorials](docs/guides): Advanced guides on using BAML.
- - [BAML Syntax](/v3/syntax): Documentation for BAML syntax.
- - [BAML Starters for NextJS and FastAPI](https://github.com/BoundaryML/baml-examples/tree/main)
-
-
-
-## Ensure BAML extension can generate your Python / TS client
-
-Save a `.baml` file using VSCode, and you should see a successful generation message pop up!
-
-You can also run `baml-cli generate --from path-to-baml-src` to generate the client code manually.
\ No newline at end of file
diff --git a/docs/docs/home/roadmap.mdx b/docs/docs/home/roadmap.mdx
deleted file mode 100644
index ea05334a8..000000000
--- a/docs/docs/home/roadmap.mdx
+++ /dev/null
@@ -1,15 +0,0 @@
----
-title: "Roadmap"
----
-
-### Language Support
-
-Features are available in all languages at equal parity unless otherwise noted.
-
-| Language Support | Status | Notes |
-| ---------------- | ------ | ----------------------------------- |
-| Python | ✅ | |
-| TypeScript | ✅ | |
-| Ruby | 🚧 | Alpha release, contact us to use it |
-
-Contact us on Discord if you have a language you'd like to see supported.
diff --git a/docs/docs/home/running-tests.mdx b/docs/docs/home/running-tests.mdx
deleted file mode 100644
index 62879ebd6..000000000
--- a/docs/docs/home/running-tests.mdx
+++ /dev/null
@@ -1,26 +0,0 @@
----
-title: "Running Tests"
----
-
-## Using the playground
-
-Use the playground to run tests against individual function impls.
-
-
-
-## From BAML Studio
-
-Coming soon
-You can also create tests from production logs in BAML Studio. Any weird or atypical
-user inputs can be used to create a test case with just 1 click.
-
-## Programmatically
-
-Tests can also be defined using common testing frameworks like pytest. [Learn more](/v3/syntax/function-testing).
diff --git a/docs/docs/observability/overview.mdx b/docs/docs/observability/overview.mdx
new file mode 100644
index 000000000..a62924c39
--- /dev/null
+++ b/docs/docs/observability/overview.mdx
@@ -0,0 +1,14 @@
+---
+title: "Enabling"
+---
+
+To enable observability with BAML, you'll first need to sign up for a [Boundary Studio](https://app.boundaryml.com) account.
+
+Once you've signed up, you'll be able to create a new project and get your project token.
+
+Then simply add the following environment variables prior to running your application:
+
+```bash
+export BOUNDARY_PROJECT_ID=project_uuid
+export BOUNDARY_SECRET=your_token
+```
diff --git a/docs/docs/guides/boundary_studio/tracing-tagging.mdx b/docs/docs/observability/tracing-tagging.mdx
similarity index 94%
rename from docs/docs/guides/boundary_studio/tracing-tagging.mdx
rename to docs/docs/observability/tracing-tagging.mdx
index 1d8c25b35..fd603595e 100644
--- a/docs/docs/guides/boundary_studio/tracing-tagging.mdx
+++ b/docs/docs/observability/tracing-tagging.mdx
@@ -29,10 +29,10 @@ async def pre_process_text(text):
@trace
async def full_analysis(book: Book):
- sentiment = await baml.ClassifySentiment.get_impl("v1").run(
+ sentiment = await baml.ClassifySentiment(
pre_process_text(book.content)
)
- book_analysis = await baml.AnalyzeBook.get_impl("v1").run(book)
+ book_analysis = await baml.AnalyzeBook(book)
return book_analysis
@@ -67,7 +67,7 @@ To add a custom tag, you can import **update_trace_tags(..)** as below:
from baml_client.tracing import set_tags, trace
import typing
-@trace()
+@trace
async def pre_process_text(text):
set_tags(userId="1234")
diff --git a/docs/docs/snippets/class.mdx b/docs/docs/snippets/class.mdx
new file mode 100644
index 000000000..c5f9d0230
--- /dev/null
+++ b/docs/docs/snippets/class.mdx
@@ -0,0 +1,115 @@
+---
+title: "class"
+---
+
+Classes consist of a name, a list of properties, and their [types](/docs/snippets/supported-types).
+In the context of LLMs, classes describe the type of the variables you can inject into prompts and extract out from the response.
+
+
+ Note properties have no `:`
+
+
+
+```llvm Baml
+class Foo {
+ property1 string
+ property2 int?
+ property3 Bar[]
+ property4 MyEnum
+}
+```
+
+```python Python Equivalent
+from pydantic import BaseModel
+from path.to.bar import Bar
+from path.to.my_enum import MyEnum
+
+class Foo(BaseModel):
+ property1: str
+ property2: Optional[int]= None
+ property3: List[Bar]
+ property4: MyEnum
+```
+
+```typescript Typescript Equivalent
+import z from "zod";
+import { BarZod } from "./path/to/bar";
+import { MyEnumZod } from "./path/to/my_enum";
+
+const FooZod = z.object({
+ property1: z.string(),
+ property2: z.number().int().nullable().optional(),
+ property3: z.array(BarZod),
+ property4: MyEnumZod,
+});
+
+type Foo = z.infer;
+```
+
+
+
+## Class Attributes
+
+
+If set, will allow you to add fields to the class dynamically at runtime (in your python/ts/etc code). See [dynamic classes](/docs/calling-baml/dynamic-types) for more information.
+
+
+
+```rust BAML
+class MyClass {
+ property1 string
+ property2 int?
+
+ @@dynamic // allows me to later propert3 float[] at runtime
+}
+```
+
+## Field Attributes
+
+When prompt engineering, you can also alias values and add descriptions.
+
+
+Aliasing renames the field for the llm to potentially "understand" your value better, while keeping the original name in your code, so you don't need to change your downstream code everytime.
+
+This will also be used for parsing the output of the LLM back into the original object.
+
+
+
+This adds some additional context to the field in the prompt.
+
+
+
+```rust BAML
+class MyClass {
+ property1 string @alias("name") @description("The name of the object")
+ age int? @description("The age of the object")
+}
+```
+
+## Constraints
+
+Classes may have any number of properties.
+Property names must follow these rules:
+- Must start with a letter
+- Must contain only letters, numbers, and underscores
+- Must be unique within the class
+- classes cannot be self-referential (cannot have a property of the same type as the class itself)
+
+The type of a property can be any [supported type](/docs/snippets/supported-types)
+
+### Default values
+
+- Not yet supported. For optional properties, the default value is `None` in python.
+
+## Inheritance
+
+Never supported. Like rust, we take the stance that [composition is better than inheritance](https://www.digitalocean.com/community/tutorials/composition-vs-inheritance).
+
diff --git a/docs/docs/snippets/clients/fallback.mdx b/docs/docs/snippets/clients/fallback.mdx
new file mode 100644
index 000000000..c1561110d
--- /dev/null
+++ b/docs/docs/snippets/clients/fallback.mdx
@@ -0,0 +1,75 @@
+---
+title: fallback
+---
+
+You can use the `fallback` provider to add more resilliancy to your application.
+
+A fallback will attempt to use the first client, and if it fails, it will try the second client, and so on.
+
+You can nest fallbacks inside of other fallbacks.
+
+```rust BAML
+client SuperDuperClient {
+ provider fallback
+ options {
+ strategy [
+ ClientA
+ ClientB
+ ClientC
+ ]
+ }
+}
+```
+
+## Options
+
+
+ The list of client names to try in order. Cannot be empty.
+
+
+## retry_policy
+
+Like any other client, you can specify a retry policy for the fallback client. See [retry_policy](retry-policy) for more information.
+
+The retry policy will test the fallback itself, after the entire strategy has failed.
+
+```rust BAML
+client SuperDuperClient {
+ provider fallback
+ retry_policy MyRetryPolicy
+ options {
+ strategy [
+ ClientA
+ ClientB
+ ClientC
+ ]
+ }
+}
+```
+
+## Nesting multiple fallbacks
+
+You can nest multiple fallbacks inside of each other. The fallbacks will just chain as you would expect.
+
+```rust BAML
+client SuperDuperClient {
+ provider fallback
+ options {
+ strategy [
+ ClientA
+ ClientB
+ ClientC
+ ]
+ }
+}
+
+client MegaClient {
+ provider fallback
+ options {
+ strategy [
+ SuperDuperClient
+ ClientD
+ ]
+ }
+}
+```
\ No newline at end of file
diff --git a/docs/docs/snippets/clients/overview.mdx b/docs/docs/snippets/clients/overview.mdx
new file mode 100644
index 000000000..e2a8f88da
--- /dev/null
+++ b/docs/docs/snippets/clients/overview.mdx
@@ -0,0 +1,54 @@
+Clients are used to configure how LLMs are called.
+
+Here's an example of a client configuration:
+
+```rust BAML
+client MyClient {
+ provider openai
+ options {
+ model gpt-4o // Configure which model is used
+ temperature 0.7 // Pass additional options to the model
+ }
+}
+```
+
+Usage:
+
+```rust BAML
+function MakeHaiku(topic: string) -> string {
+ client MyClient
+ prompt #"
+ Write a haiku about {{ topic }}.
+ "#
+}
+```
+
+## Fields
+
+
+This configures which provider to use. The provider is responsible for handling the actual API calls to the LLM service. The provider is a required field.
+
+The configuration modifies the URL request BAML runtime makes.
+
+| Provider Name | Docs | Notes |
+| -------------- | -------------------------------- | ---------------------------------------------------------- |
+| `openai` | [OpenAI](providers/openai) | Anything that follows openai's API exactly |
+| `ollama` | [Ollama](providers/ollama) | Alias for an openai client but with default ollama options |
+| `azure-openai` | [Azure OpenAI](providers/azure) | |
+| `anthropic` | [Anthropic](providers/anthropic) | |
+| `google-ai` | [Google AI](providers/gemini) | |
+| `fallback` | [Fallback](fallback) | Used to chain models conditional on failures |
+| `round-robin` | [Round Robin](round-robin) | Used to load balance |
+
+
+
+
+ The name of the retry policy. See [Retry
+ Policy](/docs/snippets/clients/retry).
+
+
+
+ These vary per provider. Please see provider specific documentation for more
+ information. Generally they are pass through options to the POST request made
+ to the LLM.
+
diff --git a/docs/docs/snippets/clients/providers/anthropic.mdx b/docs/docs/snippets/clients/providers/anthropic.mdx
new file mode 100644
index 000000000..15344d59d
--- /dev/null
+++ b/docs/docs/snippets/clients/providers/anthropic.mdx
@@ -0,0 +1,117 @@
+---
+title: anthropic
+---
+
+The `anthropic` provider supports all APIs that use the same interface for the `/v1/messages` endpoint.
+
+For `ollama` ([Docs](ollama)) or `azure` ([Docs](azure)) we recommend using the respective provider instead as there are more checks.
+
+Example:
+```rust BAML
+client MyClient {
+ provider anthropic
+ options {
+ model "claude-3-5-sonnet-20240620"
+ temperature 0
+ }
+}
+```
+
+The options are passed through directly to the API, barring a few. Here's a shorthand of the options:
+
+## Non-forwarded options
+
+ Will be passed as a bearer token. **Default: `env.ANTHROPIC_API_KEY`**
+
+ `Authorization: Bearer $api_key`
+
+
+
+ The base URL for the API. **Default: `https://api.anthropic.com`**
+
+
+
+ The default role for any prompts that don't specify a role. **Default: `system`**
+
+ We don't have any checks for this field, you can pass any string you wish.
+
+
+
+ Additional headers to send with the request.
+
+ Unless specified with a different value, we inject in the following headers:
+ ```
+ "anthropic-version" "2023-06-01"
+ ```
+
+Example:
+```rust
+client MyClient {
+ provider anthropic
+ options {
+ api_key env.MY_ANTHROPIC_KEY
+ model "claude-3-5-sonnet-20240620"
+ headers {
+ "X-My-Header" "my-value"
+ }
+ }
+}
+```
+
+
+## Forwarded options
+
+ BAML will auto construct this field for you from the prompt, if necessary.
+ Only the first system message will be used, all subsequent ones will be cast to the `assistant` role.
+
+
+
+ BAML will auto construct this field for you from the prompt
+
+
+
+ BAML will auto construct this field for you based on how you call the client in your code
+
+
+
+ The model to use.
+
+| Model | Description |
+| --- | --- |
+| `claude-3-5-sonnet-20240620` | |
+| `claude-3-opus-20240229` | |
+| `claude-3-sonnet-20240229` | |
+| `claude-3-haiku-20240307` | |
+
+
+
+See anthropic docs for the latest list of all models. You can pass any model name you wish, we will not check if it exists.
+
+
+
+ The maximum number of tokens to generate. **Default: `4069`**
+
+
+
+For all other options, see the [official anthropic API documentation](https://docs.anthropic.com/en/api/messages).
\ No newline at end of file
diff --git a/docs/docs/snippets/clients/providers/aws-bedrock.mdx b/docs/docs/snippets/clients/providers/aws-bedrock.mdx
new file mode 100644
index 000000000..c96456d0f
--- /dev/null
+++ b/docs/docs/snippets/clients/providers/aws-bedrock.mdx
@@ -0,0 +1,80 @@
+---
+title: aws-bedrock
+description: AWS Bedrock provider for BAML
+---
+
+The `aws-bedrock` provider supports all text-output models available via the [`Converse` API](https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html).
+
+Example:
+
+```rust BAML
+client MyClient {
+ provider aws-bedrock
+ options {
+ api_key env.MY_OPENAI_KEY
+ model "gpt-3.5-turbo"
+ temperature 0.1
+ }
+}
+```
+
+## Authorization
+
+We use the AWS SDK under the hood, which will respect [all authentication mechanisms supported by the SDK](https://docs.rs/aws-config/latest/aws_config/index.html), including but not limited to:
+
+ - `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` as set in your environment variables
+ - loading the specified `AWS_PROFILE` from `~/.aws/config`
+ - built-in authn for services running in EC2, ECS, Lambda, etc.
+
+## Forwarded options
+
+
+ BAML will auto construct this field for you from the prompt
+
+
+
+ The model to use.
+
+| Model | Description |
+| --------------- | ------------------------------ |
+| `anthropic.claude-3-haiku-20240307-v1:0` | Fastest + Cheapest |
+| `anthropic.claude-3-sonnet-20240307-v1:0` | Smartest |
+| `meta.llama3-8b-instruct-v1:0` | |
+| `meta.llama3-70b-instruct-v1:0` | |
+| `mistral.mistral-7b-instruct-v0:2` | |
+| `mistral.mixtral-8x7b-instruct-v0:1` | |
+
+Run `aws bedrock list-foundation-models | jq '.modelSummaries.[].modelId` to get a list of available foundation models; you can also use any custom models you've deployed.
+
+Note that to use any of these models you'll need to [request model access].
+
+[request model access]: https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html
+
+
+
+
+ Additional inference configuration to send with the request; see [AWS Bedrock documentation](https://docs.rs/aws-sdk-bedrockruntime/latest/aws_sdk_bedrockruntime/types/struct.InferenceConfiguration.html).
+
+Example:
+
+```rust BAML
+client MyClient {
+ provider aws-bedrock
+ options {
+ inference_configuration {
+ max_tokens 1000
+ temperature 1.0
+ top_p 0.8
+ stop_sequence ["_EOF"]
+ }
+ }
+}
+```
+
+
diff --git a/docs/docs/snippets/clients/providers/azure.mdx b/docs/docs/snippets/clients/providers/azure.mdx
new file mode 100644
index 000000000..f4745b2dd
--- /dev/null
+++ b/docs/docs/snippets/clients/providers/azure.mdx
@@ -0,0 +1,111 @@
+---
+title: azure-openai
+---
+
+For `azure-openai`, we provide a client that can be used to interact with the OpenAI API hosted on Azure using the `/chat/completions` endpoint.
+
+Example:
+```rust BAML
+client MyClient {
+ provider azure-openai
+ options {
+ resource_name "my-resource-name"
+ deployment_id "my-deployment-id"
+ // Alternatively, you can use the base_url field
+ // base_url "https://my-resource-name.openai.azure.com/openai/deployments/my-deployment-id"
+ api_version "2024-02-01"
+ api_key env.AZURE_OPENAI_API_KEY
+ }
+}
+```
+
+
+ `api_version` is required. Azure will return not found if the version is not specified.
+
+
+
+The options are passed through directly to the API, barring a few. Here's a shorthand of the options:
+
+## Non-forwarded options
+
+ Will be injected via the header `API-KEY`. **Default: `env.AZURE_OPENAI_API_KEY`**
+
+ `API-KEY: $api_key`
+
+
+
+ The base URL for the API. **Default: `https://${resource_name}.openai.azure.com/openai/deployments/${deployment_id}`**
+
+ May be used instead of `resource_name` and `deployment_id`.
+
+
+
+ See the `base_url` field.
+
+
+
+ See the `base_url` field.
+
+
+
+ The default role for any prompts that don't specify a role. **Default: `system`**
+
+ We don't have any checks for this field, you can pass any string you wish.
+
+
+
+ Will be passed via a query parameter `api-version`.
+
+
+
+ Additional headers to send with the request.
+
+Example:
+```rust BAML
+client MyClient {
+ provider azure-openai
+ options {
+ resource_name "my-resource-name"
+ deployment_id "my-deployment-id"
+ api_version "2024-02-01"
+ api_key env.AZURE_OPENAI_API_KEY
+ headers {
+ "X-My-Header" "my-value"
+ }
+ }
+}
+```
+
+
+## Forwarded options
+
+ BAML will auto construct this field for you from the prompt
+
+
+ BAML will auto construct this field for you based on how you call the client in your code
+
+
+For all other options, see the [official Azure API documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#chat-completions).
diff --git a/docs/docs/snippets/clients/providers/gemini.mdx b/docs/docs/snippets/clients/providers/gemini.mdx
new file mode 100644
index 000000000..48537cafe
--- /dev/null
+++ b/docs/docs/snippets/clients/providers/gemini.mdx
@@ -0,0 +1,89 @@
+---
+title: google-ai
+---
+
+The `google-ai` provider supports the `https://generativelanguage.googleapis.com/v1/models/{model_id}/generateContent` and `https://generativelanguage.googleapis.com/v1/models/{model_id}/streamGenerateContent` endpoint.
+
+
+BAML will automatically pick `streamGenerateContent` if you call the streaming interface.
+
+
+Example:
+```rust BAML
+client MyClient {
+ provider google-ai
+ options {
+ model "gemini-1.5-flash"
+ }
+}
+```
+
+The options are passed through directly to the API, barring a few. Here's a shorthand of the options:
+
+## Non-forwarded options
+
+ Will be passed as the `x-goog-api-key` header. **Default: `env.GOOGLE_API_KEY`**
+
+ `x-goog-api-key: $api_key`
+
+
+
+ The base URL for the API. **Default: `https://generativelanguage.googleapis.com/v1`**
+
+
+
+ The default role for any prompts that don't specify a role. **Default: `user`**
+
+ We don't have any checks for this field, you can pass any string you wish.
+
+
+
+ The model to use. **Default: `gemini-1.5-flash`**
+
+ We don't have any checks for this field, you can pass any string you wish.
+
+| Model | Input(s) | Optimized for |
+| --- | --- | --- |
+| `gemini-1.5-pro` | Audio, images, videos, and text | Complex reasoning tasks such as code and text generation, text editing, problem solving, data extraction and generation |
+| `gemini-1.5-flash` | Audio, images, videos, and text | Fast and versatile performance across a diverse variety of tasks |
+| `gemini-1.0-pro` | Text | Natural language tasks, multi-turn text and code chat, and code generation |
+
+See the [Google Docs](https://ai.google.dev/gemini-api/docs/models/gemini) for the latest models.
+
+
+
+ Additional headers to send with the request.
+
+Example:
+```rust BAML
+client MyClient {
+ provider google-ai
+ options {
+ model "gemini-1.5-flash"
+ headers {
+ "X-My-Header" "my-value"
+ }
+ }
+}
+```
+
+
+## Forwarded options
+
+ BAML will auto construct this field for you from the prompt
+
+
+
+For all other options, see the [official Google Gemini API documentation](https://ai.google.dev/api/rest/v1/models/generateContent).
diff --git a/docs/docs/snippets/clients/providers/ollama.mdx b/docs/docs/snippets/clients/providers/ollama.mdx
new file mode 100644
index 000000000..3c83f1ea8
--- /dev/null
+++ b/docs/docs/snippets/clients/providers/ollama.mdx
@@ -0,0 +1,91 @@
+---
+title: ollama
+---
+
+For `ollama`, we provide a client that can be used to interact with [ollama](https://ollama.com/) `/chat/completions` endpoint.
+
+What is ollama? Ollama is an easy way to run LLMs locally!
+
+Example:
+```rust BAML
+client MyClient {
+ provider ollama
+ options {
+ model llama2
+ }
+}
+```
+
+The options are passed through directly to the API, barring a few. Here's a shorthand of the options:
+
+## Non-forwarded options
+
+ The base URL for the API. **Default: `http://localhost:11434/v1`**
+ Note the `/v1` at the end of the URL. See [Ollama's OpenAI compatability](https://ollama.com/blog/openai-compatibility)
+
+
+
+ The default role for any prompts that don't specify a role. **Default: `system`**
+
+ We don't have any checks for this field, you can pass any string you wish.
+
+
+
+ Additional headers to send with the request.
+
+Example:
+```rust BAML
+client MyClient {
+ provider ollama
+ options {
+ model llama2
+ headers {
+ "X-My-Header" "my-value"
+ }
+ }
+}
+```
+
+
+## Forwarded options
+
+ BAML will auto construct this field for you from the prompt
+
+
+ BAML will auto construct this field for you based on how you call the client in your code
+
+
+ The model to use.
+
+| Model | Description |
+| --- | --- |
+| `llama3` | Meta Llama 3: The most capable openly available LLM to date |
+| `qwen2` | Qwen2 is a new series of large language models from Alibaba group |
+| `phi3` | Phi-3 is a family of lightweight 3B (Mini) and 14B (Medium) state-of-the-art open models by Microsoft |
+| `aya` | Aya 23, released by Cohere, is a new family of state-of-the-art, multilingual models that support 23 languages. |
+| `mistral` | The 7B model released by Mistral AI, updated to version 0.3. |
+| `gemma` | Gemma is a family of lightweight, state-of-the-art open models built by Google DeepMind. Updated to version 1.1 |
+| `mixtral` | A set of Mixture of Experts (MoE) model with open weights by Mistral AI in 8x7b and 8x22b parameter sizes. |
+
+See ollama docs for the list of ollama models. [Model Library](https://ollama.com/library)
+
+To use a specific version you would do: `"mixtral:8x22b"`
+
+
+
+For all other options, see the [official OpenAI API documentation](https://platform.openai.com/docs/api-reference/chat/create).
diff --git a/docs/docs/snippets/clients/providers/openai.mdx b/docs/docs/snippets/clients/providers/openai.mdx
new file mode 100644
index 000000000..310298120
--- /dev/null
+++ b/docs/docs/snippets/clients/providers/openai.mdx
@@ -0,0 +1,94 @@
+---
+title: openai
+---
+
+The `openai` provider supports all APIs that use the same interface for the `/chat` endpoint.
+
+
+ For `ollama` ([Docs](ollama)) or `azure-openai` ([Docs](azure)) we recommend
+ using the respective provider instead.
+
+
+Example:
+
+```rust BAML
+client MyClient {
+ provider openai
+ options {
+ api_key env.MY_OPENAI_KEY
+ model "gpt-3.5-turbo"
+ temperature 0.1
+ }
+}
+```
+
+The options are passed through directly to the API, barring a few. Here's a shorthand of the options:
+
+## Non-forwarded options
+
+
+ Will be passed as a bearer token. **Default: `env.OPENAI_API_KEY`**
+ `Authorization: Bearer $api_key`
+
+
+
+ The base URL for the API. **Default: `https://api.openai.com/v1`**
+
+
+
+ The default role for any prompts that don't specify a role. **Default:
+ `system`** We don't have any checks for this field, you can pass any string
+ you wish.
+
+
+
+ Additional headers to send with the request.
+
+Example:
+
+```rust BAML
+client MyClient {
+ provider openai
+ options {
+ api_key env.MY_OPENAI_KEY
+ model "gpt-3.5-turbo"
+ headers {
+ "X-My-Header" "my-value"
+ }
+ }
+}
+```
+
+
+
+## Forwarded options
+
+
+ BAML will auto construct this field for you from the prompt
+
+
+ BAML will auto construct this field for you based on how you call the client in your code
+
+
+ The model to use.
+
+| Model | Description |
+| --------------- | ------------------------------ |
+| `gpt-3.5-turbo` | Fastest + Cheapest |
+| `gpt-4o` | Fast + text + image |
+| `gpt-4-turbo` | Smartest + text + image + code |
+
+See openai docs for the list of openai models. You can pass any model name you wish, we will not check if it exists.
+
+
+
+For all other options, see the [official OpenAI API documentation](https://platform.openai.com/docs/api-reference/chat/create).
diff --git a/docs/docs/snippets/clients/providers/other.mdx b/docs/docs/snippets/clients/providers/other.mdx
new file mode 100644
index 000000000..33ee98a34
--- /dev/null
+++ b/docs/docs/snippets/clients/providers/other.mdx
@@ -0,0 +1,28 @@
+---
+title: Others (e.g. openrouter)
+---
+
+Since many model providers are settling on following the OpenAI Chat API spec, the recommended way to use them is to use the `openai` provider.
+
+Please report an [issue](https://github.com/BoundaryML/baml/issues) if you encounter something that doesn't work as expected.
+
+## Examples
+
+### OpenRouter
+
+https://openrouter.ai - A unified interface for LLMs
+
+```rust BAML
+client MyClient {
+ provider openai
+ options {
+ base_url "https://openrouter.ai/api/v1"
+ api_key env.OPENROUTER_API_KEY
+ model "openai/gpt-3.5-turbo"
+ headers {
+ "HTTP-Referer" "YOUR-SITE-URL" // Optional
+ "X-Title" "YOUR-TITLE" // Optional
+ }
+ }
+}
+```
diff --git a/docs/docs/snippets/clients/providers/vertex.mdx b/docs/docs/snippets/clients/providers/vertex.mdx
new file mode 100644
index 000000000..b3668ef78
--- /dev/null
+++ b/docs/docs/snippets/clients/providers/vertex.mdx
@@ -0,0 +1,7 @@
+---
+title: vertex-ai
+---
+
+We don't currently offer `vertex-ai` as a provider. If you'd like to see it, please comment on this issue: [Support Vertex AI provider](https://github.com/BoundaryML/baml/issues/706).
+
+You can instead use gemini-models with the `google-ai` provider, [see here](gemini).
\ No newline at end of file
diff --git a/docs/docs/snippets/clients/retry.mdx b/docs/docs/snippets/clients/retry.mdx
new file mode 100644
index 000000000..5201a69e6
--- /dev/null
+++ b/docs/docs/snippets/clients/retry.mdx
@@ -0,0 +1,85 @@
+---
+title: retry_policy
+---
+
+A retry policy can be attached to any `client` and will attempt to retry requests that fail due to a network error.
+
+```rust BAML
+retry_policy MyPolicyName {
+ max_retries 3
+}
+```
+
+Usage:
+```rust BAML
+client MyClient {
+ provider anthropic
+ retry_policy MyPolicyName
+ options {
+ model "claude-3-sonnet-20240229"
+ api_key env.ANTHROPIC_API_KEY
+ }
+}
+```
+
+## Fields
+
+ Number of **additional** retries to attempt after the initial request fails.
+
+
+
+ The strategy to use for retrying requests. Default is `constant_delay(delay_ms=200)`.
+
+| Strategy | Docs | Notes |
+| --- | --- | --- |
+| `constant_delay` | [Docs](#constant-delay) | |
+| `exponential_backoff` | [Docs](#exponential-backoff) | |
+
+Example:
+```rust BAML
+retry_policy MyPolicyName {
+ max_retries 3
+ strategy {
+ type constant_delay
+ delay_ms 200
+ }
+}
+```
+
+
+
+## Strategies
+
+### constant_delay
+
+ Configures to the constant delay strategy.
+
+
+
+ The delay in milliseconds to wait between retries. **Default: 200**
+
+
+
+### exponential_backoff
+
+ Configures to the exponential backoff strategy.
+
+
+
+ The initial delay in milliseconds to wait between retries. **Default: 200**
+
+
+
+ The multiplier to apply to the delay after each retry. **Default: 1.5**
+
+
+
+ The maximum delay in milliseconds to wait between retries. **Default: 10000**
+
\ No newline at end of file
diff --git a/docs/docs/snippets/clients/round-robin.mdx b/docs/docs/snippets/clients/round-robin.mdx
new file mode 100644
index 000000000..e968dd643
--- /dev/null
+++ b/docs/docs/snippets/clients/round-robin.mdx
@@ -0,0 +1,85 @@
+---
+title: round-robin
+---
+
+The `round_robin` provider allows you to distribute requests across multiple clients in a round-robin fashion. After each call, the next client in the list will be used.
+
+```rust BAML
+client MyClient {
+ provider round-robin
+ options {
+ strategy [
+ ClientA
+ ClientB
+ ClientC
+ ]
+ }
+}
+```
+
+## Options
+
+
+ The list of client names to try in order. Cannot be empty.
+
+
+
+ The index of the client to start with.
+
+ **Default is `random(0, len(strategy))`**
+
+ In the [BAML Playground](/docs/get-started/quickstart/editors-vscode.mdx), Default is `0`.
+
+
+## retry_policy
+
+When using a retry_policy with a round-robin client, it will rotate the strategy list after each retry.
+
+```rust BAML
+client MyClient {
+ provider round-robin
+ retry_policy MyRetryPolicy
+ options {
+ strategy [
+ ClientA
+ ClientB
+ ClientC
+ ]
+ }
+}
+```
+
+## Nesting multiple round-robin clients
+
+You can nest multiple round-robin clients inside of each other. The round-robin as you would expect.
+
+```rust BAML
+client MyClient {
+ provider round-robin
+ options {
+ strategy [
+ ClientA
+ ClientB
+ ClientC
+ ]
+ }
+}
+
+client MegaClient {
+ provider round-robin
+ options {
+ strategy [
+ MyClient
+ ClientD
+ ClientE
+ ]
+ }
+}
+
+// Calling MegaClient will call:
+// MyClient(ClientA)
+// ClientD
+// ClientE
+// MyClient(ClientB)
+// etc.
+```
diff --git a/docs/docs/snippets/enum.mdx b/docs/docs/snippets/enum.mdx
new file mode 100644
index 000000000..af432eece
--- /dev/null
+++ b/docs/docs/snippets/enum.mdx
@@ -0,0 +1,109 @@
+---
+title: "enum"
+---
+
+Enums are useful for classification tasks. BAML has helper functions that can help you serialize an enum into your prompt in a neatly formatted list (more on that later).
+
+To define your own custom enum in BAML:
+
+
+```rust BAML
+enum MyEnum {
+ Value1
+ Value2
+ Value3
+}
+```
+
+```python Python Equivalent
+from enum import StrEnum
+
+class MyEnum(StrEnum):
+ Value1 = "Value1"
+ Value2 = "Value2"
+ Value3 = "Value3"
+```
+
+```typescript Typescript Equivalent
+enum MyEnum {
+ Value1 = "Value1",
+ Value2 = "Value2",
+ Value3 = "Value3",
+}
+```
+
+
+
+- You may have as many values as you'd like.
+- Values may not be duplicated or empty.
+- Values may not contain spaces or special characters and must not start with a number.
+
+## Enum Attributes
+
+
+This is the name of the enum rendered in the prompt.
+
+
+
+
+If set, will allow you to add/remove/modify values to the enum dynamically at runtime (in your python/ts/etc code). See [dynamic enums](/docs/calling-baml/dynamic-types) for more information.
+
+
+
+```rust BAML
+enum MyEnum {
+ Value1
+ Value2
+ Value3
+
+ @@alias("My Custom Enum")
+ @@dynamic // allows me to later skip Value2 at runtime
+}
+```
+
+## Value Attributes
+
+When prompt engineering, you can also alias values and add descriptions, or even skip them.
+
+
+Aliasing renames the values for the llm to potentially "understand" your value better, while keeping the original name in your code, so you don't need to change your downstream code everytime.
+
+This will also be used for parsing the output of the LLM back into the enum.
+
+
+
+This adds some additional context to the value in the prompt.
+
+
+
+Skip this value in the prompt and during parsing.
+
+
+
+```rust BAML
+enum MyEnum {
+ Value1 @alias("complete_summary") @description("Answer in 2 sentences")
+ Value2
+ Value3 @skip
+ Value4 @description(#"
+ This is a long description that spans multiple lines.
+ It can be useful for providing more context to the value.
+ "#)
+}
+```
+
+
+See more in [prompt syntax docs](prompt-syntax/what-is-jinja)
diff --git a/docs/docs/snippets/functions/classification.mdx b/docs/docs/snippets/functions/classification.mdx
new file mode 100644
index 000000000..b40e0775c
--- /dev/null
+++ b/docs/docs/snippets/functions/classification.mdx
@@ -0,0 +1,77 @@
+You can write functions to classify elements using [Enums](/docs/snippets/enum).
+
+Here is such an example:
+
+```rust BAML
+enum Category {
+ Refund
+ CancelOrder @description("some description")
+ TechnicalSupport @alias("technical-help") // or alias the name
+ AccountIssue
+ Question
+}
+
+function ClassifyMessage(input: string) -> Category {
+ client GPT4Turbo
+ prompt #"
+ {#
+ This automatically injects good instructions
+ for classification since BAML knows
+ Category is an enum.
+ #}
+ {{ ctx.output_format }}
+
+ {{ _.role('user') }}
+ {{ input }}
+
+ {{ _.role('assistant') }}
+ Response:
+ "#
+}
+```
+If you use BAML Playground, you can see what we inject into the prompt, with full transparency.
+
+The neat part about BAML is that you don't need to parse the enums out of the answer yourself. It _will just work_. BAML's fuzzy parsing detects when the LLM prints something like:
+```text
+Based on the information provided, I think the answer is Refund
+```
+and will give you the actual `Category.Refund` when you call the function. We will add more knobs so you can make this parsing more or less strict.
+
+## Usage
+
+```python python
+from baml_client import b
+from baml_client.types import Category
+
+...
+ result = await b.ClassifyMessage("I want to cancel my order")
+ assert result == Category.CancelOrder
+```
+
+```typescript typescript
+import b from 'baml_client'
+import { Category } from 'baml_client/types'
+
+...
+ const result = await b.ClassifyMessage("I want to cancel my order")
+ assert(result === Category.Cancel)
+
+```ruby ruby
+require_relative "baml_client/client"
+
+$b = Baml.Client
+
+def main
+ category = $b.ClassifyMessage(input: "I want to cancel my order")
+ puts category == Baml::Types::Category::CancelOrder
+end
+
+if __FILE__ == $0
+ main
+end
+```
+
+
+## Handling Dynamic Categories (e.g. user-provided, or from a database)
+To handle dynamic categories you can use [dynamic enums](/docs/calling-baml/dynamic-types) to build your enum at runtime.
+
diff --git a/docs/docs/snippets/functions/extraction.mdx b/docs/docs/snippets/functions/extraction.mdx
new file mode 100644
index 000000000..3dd195aec
--- /dev/null
+++ b/docs/docs/snippets/functions/extraction.mdx
@@ -0,0 +1,75 @@
+Here is how we can get structured data from a chunk of text or even an image (using a union input type):
+
+```rust BAML
+class CharacterDescription {
+ name string
+ clothingItems string[]
+ hairColor string? @description(#"
+ The color of the character's hair.
+ "#)
+ spells Spells[]
+}
+
+class Spells {
+ name string
+ description string
+}
+
+function DescribeCharacter(image_or_paragraph: image | string) -> CharacterDescription {
+ client GPT4o
+ prompt #"
+ {{ _.role("user")}}
+
+ Describe this character according to the schema provided:
+ {{ image_or_paragraph }}
+
+
+ {{ ctx.output_format }}
+
+ Before you answer, explain your reasoning in 3 sentences.
+ "#
+}
+```
+
+If you open up the **VSCode Playground** you will be able to test this function instantly.
+
+## Usage
+
+See [image docs](/docs/snippets/supported-types#image)
+
+
+```python python
+from baml_client import b
+from baml_client.types import CharacterDescription
+from baml_py import Image
+
+...
+ result = await b.DescribeCharacter("...")
+ assert isinstance(result, CharacterDescription)
+
+ result_from_image = await b.DescribeCharacter(Image.from_url("http://..."))
+```
+
+```typescript typescript
+import { Image } from "@boundaryml/baml"
+import b from 'baml_client'
+import { Category } from 'baml_client/types'
+
+...
+ const result = await b.DescribeCharacter("...")
+ // result == interface CharacterDescription
+
+ const result_from_image = await b.DescribeCharacter(Image.fromUrl("http://..."))
+```
+
+```ruby ruby
+require_relative "baml_client/client"
+
+$b = Baml.Client
+
+# images are not supported in Ruby
+def example
+ stream = $b.DescribeCharacter("Bob the builder wears overalls")
+end
+```
+
\ No newline at end of file
diff --git a/docs/docs/snippets/functions/function-calling.mdx b/docs/docs/snippets/functions/function-calling.mdx
new file mode 100644
index 000000000..5f3997836
--- /dev/null
+++ b/docs/docs/snippets/functions/function-calling.mdx
@@ -0,0 +1,61 @@
+---
+title: Function Calling / Tools
+---
+"Function calling" is a technique for getting an LLM to choose a function to call for you.
+
+The way it works is:
+1. You define a task with certain function(s)
+2. Ask the LLM to **choose which function to call**
+3. **Get the function parameters from the LLM** for the appropriate function it choose
+4. **Call the functions** in your code with those parameters
+
+In BAML, you can get represent a `tool` or a `function` you want to call as a BAML `class`, and make the function output be that class definition.
+
+```rust BAML
+class WeatherAPI {
+ city string @description("the user's city")
+ timeOfDay string @description("As an ISO8601 timestamp")
+}
+
+function UseTool(user_message: string) -> WeatherAPI {
+ client GPT4Turbo
+ prompt #"
+ Extract the info from this message
+ ---
+ {{ user_message }}
+ ---
+
+ {# special macro to print the output schema. #}
+ {{ ctx.output_format }}
+
+ JSON:
+ "#
+}
+```
+
+## Choosing multiple Tools
+
+To choose ONE tool out of many, you can use a union:
+```rust BAML
+function UseTool(user_message: string) -> WeatherAPI | MyOtherAPI {
+ .... // same thing
+}
+```
+
+If you use [VSCode Playground](/docs/get-started/quickstart/editors-vscode), you can see what we inject into the prompt, with full transparency.
+
+## Choosing N Tools
+To choose many tools, you can use a union of a list:
+```rust BAML
+function UseTool(user_message: string) -> (WeatherAPI | MyOtherAPI)[] {
+ .... // same thing
+}
+```
+
+## Function-calling APIs vs Prompting
+Injecting your function schemas into the prompt, as BAML does, outperforms function-calling across all benchmarks for major providers (see [Berkeley's Function-calling Leaderboard](https://gorilla.cs.berkeley.edu/leaderboard.html), where "Prompt" outperforms "FC").
+
+Keep in mind that "JSON mode" is nearly the same thing as "prompting", but it enforces the LLM response is ONLY a JSON blob.
+BAML does not use JSON mode since it allows developers to use better prompting techniques like chain-of-thought, to allow the LLM to express its reasoning before printing out the actual schema. BAML's parser can find the json schema(s) out of free-form text for you.
+
+BAML may support native function-calling APIs in the future (please let us know more about your use-case so we can prioritize accordingly)
\ No newline at end of file
diff --git a/docs/docs/snippets/functions/overview.mdx b/docs/docs/snippets/functions/overview.mdx
new file mode 100644
index 000000000..81237f247
--- /dev/null
+++ b/docs/docs/snippets/functions/overview.mdx
@@ -0,0 +1,128 @@
+A **function** is the contract between the application and the AI model. It defines the desired **input** and a **guaranteed output**.
+
+Here is a simple BAML function to extract a resume. Note the input is a chunk of resume_text, and the output is an actual resume class. Read [prompt syntax](/docs/snippets/prompt-syntax/what-is-jinja) to learn more about the prompt and what Jinja templating is.
+
+```rust BAML
+class Resume {
+ name string
+ education Education[] @description("Extract in the same order listed")
+ skills string[] @description("Only include programming languages")
+}
+
+class Education {
+ school string
+ degree string
+ year int
+}
+
+function ExtractResume(resume_text: string) -> Resume {
+ client GPT4Turbo
+ // The prompt uses Jinja syntax. Change the models or this text and watch the prompt preview change!
+ prompt #"
+ Parse the following resume and return a structured representation of the data in the schema below.
+
+ Resume:
+ ---
+ {{ resume_text }}
+ ---
+
+ {# special macro to print the output instructions. #}
+ {{ ctx.output_format }}
+
+ JSON:
+ "#
+}
+```
+
+A function signature directly translates into the same function in the language of your choice, and BAML's fuzzy parser will handle fixing any common json mistakes LLMs make. Here's how you call it:
+
+
+```python python
+from baml_client import b
+from baml_client.types import Resume
+
+async def main():
+resume_text = """Jason Doe\nPython, Rust\nUniversity of California, Berkeley, B.S.\nin Computer Science, 2020\nAlso an expert in Tableau, SQL, and C++\n"""
+
+ # this function comes from the autogenerated "baml_client".
+ # It calls the LLM you specified and handles the parsing.
+ resume = await b.ExtractResume(resume_text)
+
+ # Fully type-checked and validated!
+ assert isinstance(resume, Resume)
+
+```
+
+```typescript typescript
+import b from 'baml_client'
+
+async function main() {
+ const resume_text = `Jason Doe\nPython, Rust\nUniversity of California, Berkeley, B.S.\nin Computer Science, 2020\nAlso an expert in Tableau, SQL, and C++`
+
+ // this function comes from the autogenerated "baml_client".
+ // It calls the LLM you specified and handles the parsing.
+ const resume = await b.ExtractResume(resume_text)
+
+ // Fully type-checked and validated!
+ resume.name === 'Jason Doe'
+}
+```
+
+```ruby ruby
+
+require_relative "baml_client/client"
+b = Baml.Client
+
+# Note this is not async
+res = b.TestFnNamedArgsSingleClass(
+ myArg: Baml::Types::Resume.new(
+ key: "key",
+ key_two: true,
+ key_three: 52,
+ )
+)
+```
+
+
+
+## Complex input types
+
+If you have a complex input type you can import them from `baml_client` and use them when calling your function. Imagine we injected `class Resume` into a different baml function called AnalyzeResume. Here's what the call looks like:
+
+
+```python Python
+from baml_client.types import Resume
+from baml_client import b
+...
+ await b.AnalyzeResume(
+ Resume(name="Mark", education=[...]))
+
+````
+
+```typescript typescript
+import { Resume, b } from "baml_client"
+
+...
+ await b.AnalyzeResume({
+ name: "Mark",
+ education: [...]
+ })
+````
+
+```ruby Ruby
+require_relative "baml_client/client"
+b = Baml.Client
+...
+res = b.AnalyzeResume(
+ myArg: Baml::Types::Resume.new(
+ name: "key",
+ education: [...]
+ )
+)
+```
+
+
+
+See more at [Calling functions](/docs/calling-baml/calling-functions)
+
+Checkout [PromptFiddle](https://promptfiddle.com) to see various interactive BAML function examples.
diff --git a/docs/docs/snippets/prompt-syntax/comments.mdx b/docs/docs/snippets/prompt-syntax/comments.mdx
new file mode 100644
index 000000000..a6e648177
--- /dev/null
+++ b/docs/docs/snippets/prompt-syntax/comments.mdx
@@ -0,0 +1 @@
+Use `{# ... #}` inside the `prompt` to add comments
diff --git a/docs/docs/snippets/prompt-syntax/conditionals.mdx b/docs/docs/snippets/prompt-syntax/conditionals.mdx
new file mode 100644
index 000000000..3bf1f53e2
--- /dev/null
+++ b/docs/docs/snippets/prompt-syntax/conditionals.mdx
@@ -0,0 +1,13 @@
+Use conditional statements to control the flow and output of your templates based on conditions:
+
+```jinja2
+function MyFunc(user: User) -> string {
+ prompt #"
+ {% if user.is_active %}
+ Welcome back, {{ user.name }}!
+ {% else %}
+ Please activate your account.
+ {% endif %}
+ "#
+}
+```
diff --git a/docs/docs/snippets/prompt-syntax/ctx.mdx b/docs/docs/snippets/prompt-syntax/ctx.mdx
new file mode 100644
index 000000000..771cbad72
--- /dev/null
+++ b/docs/docs/snippets/prompt-syntax/ctx.mdx
@@ -0,0 +1,38 @@
+---
+title: ctx (accessing metadata)
+---
+
+If you try rendering `{{ ctx }}` into the prompt (literally just write that out!), you'll see all the metadata we inject to run this prompt within the playground preview.
+
+In the earlier tutorial we mentioned `ctx.output_format`, which contains the schema, but you can also access client information:
+
+
+## Usecase: Conditionally render based on client provider
+
+In this example, we render the list of messages in XML tags if the provider is Anthropic (as they recommend using them as delimiters). See also [template_string](/docs/snippets/template-string) as it's used in here.
+
+```jinja2
+template_string RenderConditionally(messages: Message[]) #"
+ {% for message in messages %}
+ {%if ctx.client.provider == "anthropic" %}
+ {{ message.user_name }}: {{ message.content }}
+ {% else %}
+ {{ message.user_name }}: {{ message.content }}
+ {% endif %}
+ {% endfor %}
+"#
+
+function MyFuncWithGPT4(messages: Message[]) -> string {
+ client GPT4o
+ prompt #"
+ {{ RenderConditionally(messages)}}
+ "#
+}
+
+function MyFuncWithAnthropic(messages: Message[]) -> string {
+ client Claude35
+ prompt #"
+ {{ RenderConditionally(messages )}}
+ #"
+}
+```
\ No newline at end of file
diff --git a/docs/docs/snippets/prompt-syntax/loops.mdx b/docs/docs/snippets/prompt-syntax/loops.mdx
new file mode 100644
index 000000000..7667ace4b
--- /dev/null
+++ b/docs/docs/snippets/prompt-syntax/loops.mdx
@@ -0,0 +1,40 @@
+Here's how you can iterate over a list of items, accessing each item's attributes:
+
+```jinja2
+function MyFunc(messages: Message[]) -> string {
+ prompt #"
+ {% for message in messages %}
+ {{ message.user_name }}: {{ message.content }}
+ {% endfor %}
+ "#
+}
+```
+
+## loop
+
+Jinja provides a `loop` object that can be used to access information about the loop. Here are some of the attributes of the `loop` object:
+
+
+| Variable | Description |
+|------------------|-----------------------------------------------------------------------------|
+| loop.index | The current iteration of the loop. (1 indexed) |
+| loop.index0 | The current iteration of the loop. (0 indexed) |
+| loop.revindex | The number of iterations from the end of the loop (1 indexed) |
+| loop.revindex0 | The number of iterations from the end of the loop (0 indexed) |
+| loop.first | True if first iteration. |
+| loop.last | True if last iteration. |
+| loop.length | The number of items in the sequence. |
+| loop.cycle | A helper function to cycle between a list of sequences. See the explanation below. |
+| loop.depth | Indicates how deep in a recursive loop the rendering currently is. Starts at level 1 |
+| loop.depth0 | Indicates how deep in a recursive loop the rendering currently is. Starts at level 0 |
+| loop.previtem | The item from the previous iteration of the loop. Undefined during the first iteration. |
+| loop.nextitem | The item from the following iteration of the loop. Undefined during the last iteration. |
+| loop.changed(*val) | True if previously called with a different value (or not called at all). |
+
+```jinja2
+prompt #"
+ {% for item in items %}
+ {{ loop.index }}: {{ item }}
+ {% endfor %}
+"#
+```
\ No newline at end of file
diff --git a/docs/docs/snippets/prompt-syntax/output-format.mdx b/docs/docs/snippets/prompt-syntax/output-format.mdx
new file mode 100644
index 000000000..414f29657
--- /dev/null
+++ b/docs/docs/snippets/prompt-syntax/output-format.mdx
@@ -0,0 +1,132 @@
+---
+title: ctx.output_format
+---
+
+`{{ ctx.output_format }}` is used within a prompt template (or in any template_string) to print out the function's output schema into the prompt. It describes to the LLM how to generate a structure BAML can parse (usually JSON).
+
+Here's an example of a function with `{{ ctx.output_format }}`, and how it gets rendered by BAML before sending it to the LLM.
+
+**BAML Prompt**
+
+```rust
+class Resume {
+ name string
+ education Education[]
+}
+function ExtractResume(resume_text: string) -> Resume {
+ prompt #"
+ Extract this resume:
+ ---
+ {{ resume_text }}
+ ---
+
+ {{ ctx.output_format }}
+ "#
+}
+```
+
+**Rendered prompt**
+
+```text
+Extract this resume
+---
+Aaron V.
+Bachelors CS, 2015
+UT Austin
+---
+
+Answer in JSON using this schema:
+{
+ name: string
+ education: [
+ {
+ school: string
+ graduation_year: string
+ }
+ ]
+}
+```
+
+## Controlling the output_format
+
+`ctx.output_format` can also be called as a function with parameters to customize how the schema is printed, like this:
+```text
+
+{{ ctx.output_format(prefix="If you use this schema correctly and I'll tip $400:\n", always_hoist_enums=true)}}
+```
+
+Here's the parameters:
+
+The prefix instruction to use before printing out the schema.
+
+```text
+Answer in this schema correctly I'll tip $400:
+{
+ ...
+}
+```
+BAML's default prefix varies based on the function's return type.
+
+| Fuction return type | Default Prefix |
+| --- | --- |
+| Primitive (String) | |
+| Primitive (Other) | `Answer as a: ` |
+| Enum | `Answer with any of the categories:\n` |
+| Class | `Answer in JSON using this schema:\n` |
+| List | `Answer with a JSON Array using this schema:\n` |
+| Union | `Answer in JSON using any of these schemas:\n` |
+| Optional | `Answer in JSON using this schema:\n` |
+
+
+
+
+Whether to inline the enum definitions in the schema, or print them above. **Default: false**
+
+
+**Inlined**
+```
+
+Answer in this json schema:
+{
+ categories: "ONE" | "TWO" | "THREE"
+}
+```
+
+**hoisted**
+```
+MyCategory
+---
+ONE
+TWO
+THREE
+
+Answer in this json schema:
+{
+ categories: MyCategory
+}
+```
+
+BAML will always hoist if you add a [description](/docs/snippets/enum#aliases-descriptions) to any of the enum values.
+
+
+
+
+
+**Default: ` or `**
+
+If a type is a union like `string | int` or an optional like `string?`, this indicates how it's rendered.
+
+
+BAML renders it as `property: string or null` as we have observed some LLMs have trouble identifying what `property: string | null` means (and are better with plain english).
+
+You can always set it to ` | ` or something else for a specific model you use.
+
+
+## Why BAML doesn't use JSON schema format in prompts
+BAML uses "type definitions" or "jsonish" format instead of the long-winded json-schema format.
+The tl;dr is that json schemas are
+1. 4x more inefficient than "type definitions".
+2. very unreadable by humans (and hence models)
+3. perform worse than type definitions (especially on deeper nested objects or smaller models)
+
+Read our [full article on json schema vs type definitions](https://www.boundaryml.com/blog/type-definition-prompting-baml)
diff --git a/docs/docs/snippets/prompt-syntax/roles.mdx b/docs/docs/snippets/prompt-syntax/roles.mdx
new file mode 100644
index 000000000..685453f66
--- /dev/null
+++ b/docs/docs/snippets/prompt-syntax/roles.mdx
@@ -0,0 +1,83 @@
+---
+title: _.role
+---
+
+BAML prompts are compiled into a `messages` array (or equivalent) that most LLM providers use:
+
+BAML Prompt -> `[{ role: "user": content: "hi there"}, { role: "assistant", ...}]`
+
+By default, BAML puts everything into a single message with the `system` role if available (or whichever one is best for the provider you have selected).
+When in doubt, the playground always shows you the current role for each message.
+
+To specify a role explicitly, add the `{{ _.role("user")}}` syntax to the prompt
+```jinja2
+prompt #"
+ {{ _.role("system") }} Everything after
+ this element will be a system prompt!
+
+ {{ _.role("user")}}
+ And everything after this
+ will be a user role
+"#
+```
+Try it out in [PromptFiddle](https://www.promptfiddle.com)
+
+
+ BAML may change the default role to `user` if using specific APIs that only support user prompts, like when using prompts with images.
+
+
+We use `_` as the prefix of `_.role()` since we plan on adding more helpers here in the future.
+
+## Example -- Using `_.role()` in for-loops
+
+Here's how you can inject a list of user/assistant messages and mark each as a user or assistant role:
+
+```rust BAML
+class Message {
+ role string
+ message string
+}
+
+function ChatWithAgent(input: Message[]) -> string {
+ client GPT4o
+ prompt #"
+ {% for m in messages %}
+ {{ _.role(m.role) }}
+ {{ m.message }}
+ {% endfor %}
+ "#
+}
+```
+
+```rust BAML
+function ChatMessages(messages: string[]) -> string {
+ client GPT4o
+ prompt #"
+ {% for m in messages %}
+ {{ _.role("user" if loop.index % 2 == 1 else "assistant") }}
+ {{ m }}
+ {% endfor %}
+ "#
+}
+```
+
+## Example -- Using `_.role()` in a template string
+
+```rust BAML
+template_strings YouAreA(name: string, job: string) #"
+ {{ _.role("system") }}
+ You are an expert {{ name }}. {{ job }}
+
+ {{ ctx.output_format }}
+ {{ _.role("user") }}
+"#
+
+function CheckJobPosting(post: string) -> bool {
+ client GPT4o
+ prompt #"
+ {{ YouAreA("hr admin", "You're role is to ensure every job posting is bias free.") }}
+
+ {{ post }}
+ "#
+}
+```
diff --git a/docs/docs/snippets/prompt-syntax/variables.mdx b/docs/docs/snippets/prompt-syntax/variables.mdx
new file mode 100644
index 000000000..b79b53ef3
--- /dev/null
+++ b/docs/docs/snippets/prompt-syntax/variables.mdx
@@ -0,0 +1,2 @@
+
+See [template_string](/docs/snippets/template-string) to learn how to add variables in .baml files
\ No newline at end of file
diff --git a/docs/docs/snippets/prompt-syntax/what-is-jinja.mdx b/docs/docs/snippets/prompt-syntax/what-is-jinja.mdx
new file mode 100644
index 000000000..05b70c177
--- /dev/null
+++ b/docs/docs/snippets/prompt-syntax/what-is-jinja.mdx
@@ -0,0 +1,82 @@
+---
+title: What is Jinja / Cookbook
+---
+
+BAML Prompt strings are essentially [Jinja](https://jinja.palletsprojects.com/en/3.1.x/templates/) templates, which offer the ability to express logic and data manipulation within strings. Jinja is a very popular and mature templating language amongst python developers, so github copilot or GPT4 can already help you write most of the logic you want.
+
+## Jinja Cookbook
+
+When in doubt -- use the BAML VSCode Playground preview. It will show you the fully rendered prompt, even when it has complex logic.
+
+### Basic Syntax
+
+- `{% ... %}`: Use for executing statements such as for-loops or conditionals.
+- `{{ ... }}`: Use for outputting expressions or variables.
+- `{# ... #}`: Use for comments within the template, which will not be rendered.
+
+### Loops / Iterating Over Lists
+
+Here's how you can iterate over a list of items, accessing each item's attributes:
+
+```jinja2
+function MyFunc(messages: Message[]) -> string {
+ prompt #"
+ {% for message in messages %}
+ {{ message.user_name }}: {{ message.content }}
+ {% endfor %}
+ "#
+}
+```
+
+### Conditional Statements
+
+Use conditional statements to control the flow and output of your templates based on conditions:
+
+```jinja2
+function MyFunc(user: User) -> string {
+ prompt #"
+ {% if user.is_active %}
+ Welcome back, {{ user.name }}!
+ {% else %}
+ Please activate your account.
+ {% endif %}
+ "#
+}
+```
+
+### Setting Variables
+
+You can define and use variables within your templates to simplify expressions or manage data:
+
+```jinja2
+function MyFunc(items: Item[]) -> string {
+ prompt #"
+ {% set total_price = 0 %}
+ {% for item in items %}
+ {% set total_price = total_price + item.price %}
+ {% endfor %}
+ Total price: {{ total_price }}
+ "#
+}
+```
+
+### Including other Templates
+
+To promote reusability, you can include other templates within a template. See [template strings](/docs/snippets/template-string):
+
+```rust
+template_string PrintUserInfo(arg1: string, arg2: User) #"
+ {{ arg1 }}
+ The user's name is: {{ arg2.name }}
+"#
+
+function MyFunc(arg1: string, user: User) -> string {
+ prompt #"
+ Here is the user info:
+ {{ PrintUserInfo(arg1, user) }}
+ "#
+}
+```
+
+### Built-in filters
+See [jinja docs](https://jinja.palletsprojects.com/en/3.1.x/templates/#list-of-builtin-filters)
\ No newline at end of file
diff --git a/docs/docs/snippets/supported-types.mdx b/docs/docs/snippets/supported-types.mdx
new file mode 100644
index 000000000..05a08b2bc
--- /dev/null
+++ b/docs/docs/snippets/supported-types.mdx
@@ -0,0 +1,286 @@
+---
+title: Supported Types
+---
+
+Here's a list of all the types you can extract from LLMs with BAML:
+
+## Primitive types
+* `bool`
+* `int`
+* `float`
+* `string`
+* `null`
+
+## Multimodal Types
+See [calling a function with multimodal types](/docs/snippets/calling-baml/multi-modal)
+
+### `image`
+
+You can use an image like this for models that support them:
+
+```rust
+function DescribeImage(myImg: image) -> string {
+ client GPT4Turbo
+ prompt #"
+ {{ _.role("user")}}
+ Describe the image in four words:
+ {{ myImg }}
+ "#
+}
+```
+You cannot name as variable "image" at the moment as it is a reserved keyword.
+
+Calling a function with an image type:
+
+```python Python
+from baml_py import Image
+from baml_client import b
+
+async def test_image_input():
+ # from URL
+ res = await b.TestImageInput(
+ img=Image.from_url(
+ "https://upload.wikimedia.org/wikipedia/en/4/4d/Shrek_%28character%29.png"
+ )
+ )
+
+ # Base64 image
+ image_b64 = "iVBORw0K...."
+ res = await b.TestImageInput(
+ img=Image.from_base64("image/png", image_b64)
+ )
+```
+
+```typescript TypeScript
+import { b } from '../baml_client'
+import { Image } from "@boundaryml/baml"
+...
+
+ // URL
+ let res = await b.TestImageInput(
+ Image.fromUrl('https://upload.wikimedia.org/wikipedia/en/4/4d/Shrek_%28character%29.png'),
+ )
+
+ // Base64
+ let res = await b.TestImageInput(
+ Image.fromBase64('image/png', image_b64),
+ )
+```
+
+```ruby Ruby
+(we're working on it!)
+```
+
+
+### `audio`
+
+Example
+```rust
+function DescribeSound(myAudio: audio) -> string {
+ client GPT4Turbo
+ prompt #"
+ {{ _.role("user")}}
+ Describe the audio in four words:
+ {{ myAudio }}
+ "#
+}
+```
+Calling functions that have `audio` types.
+
+
+```python Python
+from baml_py import Audio
+from baml_client import b
+
+async def run():
+ # from URL
+ res = await b.TestAudioInput(
+ img=Audio.from_url(
+ "https://upload.wikimedia.org/wikipedia/en/4/4d/Shrek_%28character%29.png"
+ )
+ )
+
+ # Base64
+ b64 = "iVBORw0K...."
+ res = await b.TestAudioInput(
+ img=Audio.from_base64("image/png", b64)
+ )
+```
+
+```typescript TypeScript
+import { b } from '../baml_client'
+import { Audio } from "@boundaryml/baml"
+...
+
+ // URL
+ let res = await b.TestAudioInput(
+ Audio.fromUrl('https://upload.wikimedia.org/wikipedia/en/4/4d/Shrek_%28character%29.mp4'),
+ )
+
+ // Base64
+ const audio_base64 = ".."
+ let res = await b.TestAudioInput(
+ Audio.fromBase64('image/png', audio_base64),
+ )
+
+```
+
+```ruby Ruby
+we're working on it!
+```
+
+
+## Composite/Structured Types
+
+### enum
+
+**See also:** [Enum](/docs/syntax/enum)
+
+A user-defined type consisting of a set of named constants.
+Use it when you need a model to choose from a known set of values, like in classification problems
+
+```rust
+enum Name {
+ Value1
+ Value2 @description("My optional description annotation")
+}
+```
+### class
+
+**See also:** [Class](/docs/syntax/class)
+
+Classes are for user-defined complex data structures.
+
+Use when you need an LLM to call another function (e.g. OpenAI's function calling), you can model the function's parameters as a class. You can also get models to return complex structured data by using a class.
+
+**Example:**
+Note that properties have no `:`
+```rust
+class Car {
+ model string
+ year int @description("Year of manufacture")
+}
+```
+
+### Optional (?)
+A type that represents a value that might or might not be present.
+
+Useful when a variable might not have a value and you want to explicitly handle its absence.
+**Syntax:** `?`
+
+**Example:** `int?` or `(MyClass | int)?`
+
+### Union (|)
+
+A type that can hold one of several specified types.
+
+This can be helpful with **function calling**, where you want to return different types of data depending on which function should be called.
+- **Syntax:** `|`
+- **Example:** `int | string` or `(int | string) | MyClass` or `string | MyClass | int[]`
+
+ Order is important. `int | string` is not the same as `string | int`.
+
+ For example, if you have a `"1"` string, it will be parsed as an `int` if
+ you use `int | string`, but as a `string` if you use `string | int`.
+
+
+### List/Array ([])
+A collection of elements of the same type.
+- **Syntax:** `[]`
+- **Example:** `string[]` or `(int | string)[]` or `int[][]`
+
+
+
+
Array types can be nested to create multi-dimensional arrays
+
An array type cannot be optional
+
+
+
+### ❌ Dictionary
+
+- Not yet supported. Use a `class` instead.
+
+### ❌ Set
+
+- Not yet supported. Use a `List` instead.
+
+### ❌ Tuple
+
+- Not yet supported. Use a `class` instead.
+
+## Examples and Equivalents
+
+Here are some examples and what their equivalents are in different languages.
+
+### Example 1
+
+
+```baml Baml
+int?|string[]|MyClass
+````
+
+```python Python Equivalent
+Union[Optional[int], List[str], MyClass]
+```
+
+```typescript TypeScript Equivalent
+(number | null) | string[] | MyClass
+```
+
+
+
+### Example 2
+
+
+```baml Baml
+string[]
+```
+
+```python Python Equivalent
+List[str]
+```
+
+```typescript TypeScript Equivalent
+string[]
+```
+
+
+
+### Example 3
+
+
+```baml Baml
+(int|float)[]
+```
+```python Python Equivalent
+List[Union[int, float]]
+```
+
+```typescript TypeScript Equivalent
+number[]
+```
+
+
+
+### Example 4
+
+
+```baml Baml
+(int? | string[] | MyClass)[]
+```
+
+```python Python Equivalent
+Optional[List[Union[Optional[int], List[str], MyClass]]]
+```
+
+```typescript TypeScript Equivalent
+((number | null) | string[] | MyClass)[]
+```
+
+
+
+## ⚠️ Unsupported
+- `any/json` - Not supported. We don't want to encourage its use as it defeats the purpose of having a type system. if you really need it, for now use `string` and call `json.parse` yourself or use [dynamic types](/docs/calling-baml/dynamic-types)
+- `datetime` - Not yet supported. Use a `string` instead.
+- `duration` - Not yet supported. We recommend using `string` and specifying that it must be an "ISO8601 duration" in the description, which you can parse yourself into a duration.
+- `units (currency, temperature)` - Not yet supported. Use a number (`int` or `float`) and have the unit be part of the variable name. For example, `temperature_fahrenheit` and `cost_usd` (see [@alias](/docs/snippets/class#aliases-descriptions))
diff --git a/docs/docs/syntax/comments.mdx b/docs/docs/snippets/syntax/comments.mdx
similarity index 99%
rename from docs/docs/syntax/comments.mdx
rename to docs/docs/snippets/syntax/comments.mdx
index d29f2ebed..6fe8d6448 100644
--- a/docs/docs/syntax/comments.mdx
+++ b/docs/docs/snippets/syntax/comments.mdx
@@ -33,4 +33,4 @@ Multiline comments are denoted via `{//` and `//}`.
bar
//}
```
-
+
diff --git a/docs/docs/snippets/syntax/dictionaries.mdx b/docs/docs/snippets/syntax/dictionaries.mdx
new file mode 100644
index 000000000..81e71a03f
--- /dev/null
+++ b/docs/docs/snippets/syntax/dictionaries.mdx
@@ -0,0 +1,34 @@
+Dictionaries in BAML have this syntax. You'll see them mainly in `tests` declared in BAML, and `clients`.
+
+```baml BAML
+{
+ key1 value1
+ key2 {
+ nestedKey1 nestedValue1
+ }
+}
+```
+Note they do not use `:`
+
+You can use unquoted or quoted strings, booleans, numbers and nested dictionaries as values.
+
+```rust BAML
+{
+ key1 "value1"
+ key2 {
+ nestedKey1 1
+ nestedKey2 true
+ }
+}
+```
+
+**Dictionary with multiline string as a value**:
+
+```rust BAML
+{
+ key1 #"
+ This is a
+ multiline string
+ "#
+}
+```
\ No newline at end of file
diff --git a/docs/docs/snippets/syntax/lists.mdx b/docs/docs/snippets/syntax/lists.mdx
new file mode 100644
index 000000000..958b790d5
--- /dev/null
+++ b/docs/docs/snippets/syntax/lists.mdx
@@ -0,0 +1,22 @@
+If you have to declare a list in a .baml file you can use this syntax:
+```rust baml
+{
+ key1 [value1, value2, value3],
+ key2 [
+ value1,
+ value2,
+ value3
+ ]
+ key3 [
+ {
+ key1 value1,
+ key2 value2
+ }
+ {
+ key1 value1,
+ key2 value2
+ }
+ ]
+}
+```
+The commas are optional if doing a multiline list.
\ No newline at end of file
diff --git a/docs/docs/syntax/strings.mdx b/docs/docs/snippets/syntax/strings.mdx
similarity index 93%
rename from docs/docs/syntax/strings.mdx
rename to docs/docs/snippets/syntax/strings.mdx
index 657fa726d..512f63533 100644
--- a/docs/docs/syntax/strings.mdx
+++ b/docs/docs/snippets/syntax/strings.mdx
@@ -63,4 +63,4 @@ python#"
return 1
"#
```
-these are not functional code blocks they are can just be used for documentation purposes.
+these are not functional code blocks -- they are only used for documentation purposes.
diff --git a/docs/docs/snippets/template-string.mdx b/docs/docs/snippets/template-string.mdx
new file mode 100644
index 000000000..2525921de
--- /dev/null
+++ b/docs/docs/snippets/template-string.mdx
@@ -0,0 +1,35 @@
+Writing prompts requires a lot of string manipulation. BAML has a `template_string` to let you combine different string templates together. Under-the-hood they use [jinja](/docs/snippets/prompt-syntax/what-is-jinja) to evaluate the string and its inputs.
+
+Think of template strings as functions that have variables, and return a string. They can be used to define reusable parts of a prompt, or to make the prompt more readable by breaking it into smaller parts.
+
+Example
+```rust BAML
+// Inject a list of "system" or "user" messages into the prompt.
+template_string PrintMessages(messages: Message[]) #"
+ {% for m in messages %}
+ {{ _.role(m.role) }}
+ {{ m.message }}
+ {% endfor %}
+"#
+
+function ClassifyConversation(messages: Message[]) -> Category[] {
+ client GPT4Turbo
+ prompt #"
+ Classify this conversation:
+ {{ PrintMessages(messages) }}
+
+ Use the following categories:
+ {{ ctx.output_format}}
+ "#
+}
+```
+
+In this example we can call the template_string `PrintMessages` to subdivide the prompt into "user" or "system" messages using `_.role()` (see [message roles](/docs/snippets/prompt-syntax/roles)). This allows us to reuse the logic for printing messages in multiple prompts.
+
+You can nest as many template strings inside each other and call them however many times you want.
+
+
+ The BAML linter may give you a warning when you use template strings due to a static analysis limitation. You can ignore this warning. If it renders in the playground, you're good!
+
+Use the playground preview to ensure your template string is being evaluated correctly!
+
diff --git a/docs/docs/snippets/test-cases.mdx b/docs/docs/snippets/test-cases.mdx
new file mode 100644
index 000000000..48690915d
--- /dev/null
+++ b/docs/docs/snippets/test-cases.mdx
@@ -0,0 +1,109 @@
+You can test your BAML functions in the VSCode Playground by adding a `test` snippet into a BAML file:
+
+```rust
+enum Category {
+ Refund
+ CancelOrder
+ TechnicalSupport
+ AccountIssue
+ Question
+}
+
+function ClassifyMessage(input: string) -> Category {
+ client GPT4Turbo
+ prompt #"
+ ... truncated ...
+ "#
+}
+
+test Test1 {
+ functions [ClassifyMessage]
+ args {
+ input "Can't access my account using my usual login credentials, and each attempt results in an error message stating 'Invalid username or password.' I have tried resetting my password using the 'Forgot Password' link, but I haven't received the promised password reset email."
+ }
+}
+```
+See the [interactive examples](https://promptfiddle.com)
+
+The BAML playground will give you a starting snippet to copy that will match your function signature.
+
+
+BAML doesn't use between key-value pairs `:` except in function parameters
+
+
+## Complex object inputs
+
+Objects are injected as dictionaries
+```rust
+class Message {
+ user string
+ content string
+}
+
+function ClassifyMessage(messages: Messages[]) -> Category {
+...
+}
+
+test Test1 {
+ functions [ClassifyMessage]
+ args {
+ messages [
+ {
+ user "hey there"
+ content #"
+ You can also add a multi-line
+ string with the hashtags
+ Instead of ugly json with \n
+ "#
+ }
+ ]
+ }
+}
+```
+
+## Images
+An `image` input type is translated into this object (assuming the function signature is `function MyFunction(myImage: image) -> ...`):
+
+URL input
+```
+test Test1 {
+ args {
+ myImage {
+ url "https...."
+ }
+ }
+}
+```
+base64 input
+```
+test Test1 {
+ args {
+ myImage {
+ base64 "base64string"
+ media_type "image/png"
+ }
+ }
+}
+```
+file input (coming soon)
+
+
+## Audio
+Audio inputs are similar to images:
+
+URL input
+```
+...
+ {
+ url "https//domain.com/somefile.mp3"
+ }
+...
+```
+Base64 input
+```
+{
+ media_type "audio/mp3"
+ base64 "base64string"
+}
+```
+file input (coming soon)
\ No newline at end of file
diff --git a/docs/docs/syntax/class.mdx b/docs/docs/syntax/class.mdx
deleted file mode 100644
index f5b0fc74b..000000000
--- a/docs/docs/syntax/class.mdx
+++ /dev/null
@@ -1,67 +0,0 @@
----
-title: "class"
----
-
-Classes consist of a name, a list of properties, and their [types](/docs/syntax/type).
-In the context of LLMs, classes describe the type of the variables you can inject into prompts and extract out from the response. In python, classes are represented by [pydantic](https://pydantic-docs.helpmanual.io/) models.
-
-
-```llvm Baml
-class Foo {
- property1 string
- property2 int?
- property3 Bar[]
- property4 MyEnum
-}
-```
-
-```python Python Equivalent
-from pydantic import BaseModel
-from path.to.bar import Bar
-from path.to.my_enum import MyEnum
-
-class Foo(BaseModel):
- property1: str
- property2: Optional[int]= None
- property3: List[Bar]
- property4: MyEnum
-```
-
-```typescript Typescript Equivalent
-import z from "zod";
-import { BarZod } from "./path/to/bar";
-import { MyEnumZod } from "./path/to/my_enum";
-
-const FooZod = z.object({
- property1: z.string(),
- property2: z.number().int().nullable().optional(),
- property3: z.array(BarZod),
- property4: MyEnumZod,
-});
-
-type Foo = z.infer;
-```
-
-
-
-## Properties
-
-Classes may have any number of properties.
-Property names must follow:
-
-- Must start with a letter
-- Must contain only letters, numbers, and underscores
-- Must be unique within the class
-
-The type of a property can be any [supported type](/docs/syntax/type)
-
-### Default values
-
-- Not yet supported. For optional properties, the default value is `None` in python.
-
-## Inheritance
-
-Not supported. Like rust, we take the stance that [composition is better than inheritance](https://www.digitalocean.com/community/tutorials/composition-vs-inheritance).
-
-## aliases, descriptions
-Classes support aliases, descriptions, and other kinds of attributes. See the [prompt engineering docs](./prompt_engineering/class)
diff --git a/docs/docs/syntax/client/client.mdx b/docs/docs/syntax/client/client.mdx
deleted file mode 100644
index fd309a96c..000000000
--- a/docs/docs/syntax/client/client.mdx
+++ /dev/null
@@ -1,229 +0,0 @@
----
-title: client
----
-
-A **client** is the mechanism by which a function calls an LLM.
-
-## Syntax
-
-```rust
-client Name {
- provider ProviderName
- options {
- // ...
- }
-}
-```
-
-- `Name`: The name of the client (can be any [a-zA-Z], numbers or `_`). Must start with a letter.
-
-## Properties
-
-| Property | Type | Description | Required |
-| -------------- | -------------------- | -------------------------------------------------- | -------- |
-| `provider` | name of the provider | The provider to use. | Yes |
-| `options` | key-value pair | These are passed through directly to the provider. | No |
-| `retry_policy` | name of the policy | [Learn more](/docs/syntax/client/retry) | No |
-
-## Providers
-
-BAML ships with the following providers (you can can also write your own!):
-
-- LLM client providers
- - `openai`
- - `azure-openai`
- - `anthropic`
- - `google-ai`
- - `ollama`
-- Composite client providers
- - `fallback`
- - `round-robin`
-
-There are two primary types of LLM clients: chat and completion. BAML abstracts
-away the differences between these two types of LLMs by putting that logic in
-the clients.
-
-You can call a chat client with a single completion prompt and it will
-automatically map it to a chat prompt. Similarly you can call a completion
-client with multiple chat prompts and it will automatically map it to a
-completion prompt.
-
-### OpenAI/Azure
-
-Provider names:
-
-- `openai-azure`
-
-You must pick the right provider for the type of model you are using. For
-example, if you are using a GPT-3 model, you must use a `chat` provider, but if
-you're using the instruct model, you must use a `completion` provider.
-
-You can see all models supported by OpenAI [here](https://platform.openai.com/docs/models).
-
-Accepts any options as defined by [OpenAI/Azure SDK](https://github.com/openai/openai-python/blob/9e6e1a284eeb2c20c05a03831e5566a4e9eaba50/src/openai/types/chat/completion_create_params.py#L28)
-
-See [Azure Docs](https://learn.microsoft.com/en-us/azure/ai-services/openai/quickstart?tabs=command-line,python&pivots=programming-language-python#create-a-new-python-application) to learn how to get your Azure API key.
-
-```rust
-// A client that uses the OpenAI chat API.
-client MyGPT35Client {
- // Since we're using a GPT-3 model, we must use a chat provider.
- provider openai
- options {
- model gpt-3.5-turbo
- // Set the api_key parameter to the OPENAI_API_KEY environment variable
- api_key env.OPENAI_API_KEY
- }
-}
-
-// A client that uses the OpenAI chat API.
-client MyAzureClient {
- // I configured the deployment to use a GPT-3 model,
- // so I must use a chat provider.
- provider openai-azure
- options {
- api_key env.AZURE_OPENAI_KEY
- // This may change in the future
- api_version "2023-05-15"
- api_type azure
- azure_endpoint env.AZURE_OPENAI_ENDPOINT
- model "gpt-35-turbo-default"
- }
-}
-```
-
-
-### Anthropic
-
-Provider names:
-
-- `anthropic`
-
-Accepts any options as defined by [Anthropic SDK](https://github.com/anthropics/anthropic-sdk-python/blob/fc90c357176b67cfe3a8152bbbf07df0f12ce27c/src/anthropic/types/completion_create_params.py#L20)
-
-```rust
-client MyClient {
- provider baml-anthropic
- options {
- model claude-2
- max_tokens_to_sample 300
- }
-}
-```
-### Google
-
-Provider names:
-- `google-ai`
-
-Accepts any options as defined by the [Gemini SDK](https://ai.google.dev/gemini-api/docs/get-started/tutorial?lang=rest#configuration).
-
-```rust
-client MyGoogleClient {
- provider google-ai
- options{
- model "gemini-1.5-pro-001"
- }
-}
-```
-
-### Ollama
-
-- BAML Python Client >= 0.18.0
-- BAML Typescript Client >= 0.0.6
-
-Provider names:
-
-- `ollama`
-
-Accepts any options as defined by [Ollama SDK](https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-chat-completion).
-
-```rust
-client MyOllamaClient {
- provider ollama
- options {
- model llama2
- }
-}
-```
-#### Requirements
-
-1. For Ollama, in your terminal run `ollama serve`
-2. In another window, run `ollama run llama2` (or your model), and you should be good to go.
-3. If your Ollama port is not 11434, you can specify the endpoint manually.
-
-```rust
-client MyClient {
- provider ollama
- options {
- model llama2
- options {
- temperature 0
- base_url "http://localhost:" // Default is 11434
- }
- }
-}
-```
-
-
-This is not the Vertex AI Gemini API, but the Google Generative AI Gemini API, which supports the same models but at a different endpoint.
-
-
-### Fallback
-
-The `baml-fallback` provider allows you to define a resilient client, by
-specifying strategies for re-running failed requests. See
-[Fallbacks/Redundancy](/docs/syntax/client/redundancy) for more information.
-
-### Round Robin
-
-The `baml-round-robin` provider allows you to load-balance your requests across
-multiple clients. Here's an example:
-
-```rust
-client MyClient {
- provider round-robin
- options {
- strategy [
- MyGptClient
- MyAnthropicClient
- ]
- }
-}
-```
-
-This will alternate between `MyGptClient` and `MyAnthropicClient` for successive
-requests, starting from a randomly selected client (so that if you run multiple
-instances of your application, they won't all start with the same client).
-
-If you want to control which client is used for the first request, you can specify
-a `start` index, which tells BAML to start with the client at index `start`, like
-so:
-
-```rust
-client MyClient {
- provider baml-round-robin
- options {
- start 1
- strategy [
- MyGptClient
- MyAnthropicClient
- ]
- }
-}
-```
-
-## Other providers
-You can use the `openai` provider if the provider you're trying to use has the same ChatML response format (i.e. HuggingFace via their Inference Endpoint or your own local endpoint)
-
-Some providers ask you to add a `base_url`, which you can do like this:
-
-```rust
-client MyClient {
- provider openai
- options {
- model some-custom-model
- api_key env.OPEN
- base_url "https://some-custom-url-here"
- }
-}
-```
\ No newline at end of file
diff --git a/docs/docs/syntax/client/redundancy.mdx b/docs/docs/syntax/client/redundancy.mdx
deleted file mode 100644
index 3e11ce8f8..000000000
--- a/docs/docs/syntax/client/redundancy.mdx
+++ /dev/null
@@ -1,52 +0,0 @@
----
-title: Fallbacks/Redundancy
----
-
-Many LLMs are subject to fail due to transient errors. Setting up a fallback allows you to switch to a different LLM when prior LLMs fail (e.g. outage, high latency, rate limits, etc).
-
-To accomplish this, instead of new syntax, you can simple define a `client` using a `baml-fallback` provider.
-
-The `baml-fallback` provider takes a `strategy` option, which is a list of `client`s to try in order. If the first client fails, the second client is tried, and so on.
-
-```rust
-client MySafeClient {
- provider baml-fallback
- options {
- // First try GPT4 client, if it fails, try GPT35 client.
- strategy [
- GPT4,
- GPT35
- // If you had more clients, you could add them here.
- // Anthropic
- ]
- }
-}
-
-client GPT4 {
- provider baml-openai-chat
- options {
- // ...
- }
-}
-
-client GPT35 {
- provider baml-openai-chat
- options {
- // ...
- }
-}
-```
-
-Fallbacks are triggered on any error.
-
-Errors codes are:
-| Name | Error Code |
-| ----------------- | -------------------- |
-| BAD_REQUEST | 400 |
-| UNAUTHORIZED | 401 |
-| FORBIDDEN | 403 |
-| NOT_FOUND | 404 |
-| RATE_LIMITED | 429 |
-| INTERNAL_ERROR | 500 |
-| SERVICE_UNAVAILABLE | 503 |
-| UNKNOWN | 1 |
diff --git a/docs/docs/syntax/client/retry.mdx b/docs/docs/syntax/client/retry.mdx
deleted file mode 100644
index 0584bc561..000000000
--- a/docs/docs/syntax/client/retry.mdx
+++ /dev/null
@@ -1,80 +0,0 @@
----
-title: retry_policy
----
-
-Many LLMs are subject to fail due to transient errors. The retry policy allows you to configure how many times and how the client should retry a failed operation before giving up.
-
-## Syntax
-
-```rust
-retry_policy PolicyName {
- max_retries int
- strategy {
- type constant_delay
- delay_ms int? // defaults to 200
- } | {
- type exponential_backoff
- delay_ms int? // defaults to 200
- max_delay_ms int? // defaults to 10000
- multiplier float? // defaults to 1.5
- }
-}
-```
-
-### Properties
-
-| Name | Description | Required |
-| ------------- | ----------------------------------------------------------------------- | -------------------------------------- |
-| `max_retries` | The maximum number of times the client should retry a failed operation. | YES |
-| `strategy` | The strategy to use for retrying failed operations. | NO, defauts to `constant_delay(200ms)` |
-
-You can read more about specific retry strategy param:
-
-- [constant_delay](https://tenacity.readthedocs.io/en/latest/api.html?highlight=wait_exponential#tenacity.wait.wait_fixed)
-- [exponential_backoff](https://tenacity.readthedocs.io/en/latest/api.html?highlight=wait_exponential#tenacity.wait.wait_exponential)
-
-## Conditions for retrying
-
-If the client encounters a transient error, it will retry the operation. The following errors are considered transient:
-| Name | Error Code | Retry |
-| ----------------- | -------------------- | --- |
-| BAD_REQUEST | 400 | NO |
-| UNAUTHORIZED | 401 | NO |
-| FORBIDDEN | 403 | NO |
-| NOT_FOUND | 404 | NO |
-| RATE_LIMITED | 429 | YES |
-| INTERNAL_ERROR | 500 | YES |
-| SERVICE_UNAVAILABLE | 503 | YES |
-| UNKNOWN | 1 | YES |
-
-The UNKNOWN error code is used when the client encounters an error that is not listed above. This is usually a temporary error, but it is not guaranteed.
-
-## Example
-
-
- Each client may have a different retry policy, or no retry policy at all. But
- you can also reuse the same retry policy across multiple clients.
-
-
-```rust
-// in a .baml file
-
-retry_policy MyRetryPolicy {
- max_retries 5
- strategy {
- type exponential_backoff
- }
-}
-
-// A client that uses the OpenAI chat API.
-client MyGPT35Client {
- provider baml-openai-chat
- // Set the retry policy to the MyRetryPolicy defined above.
- // Any impl that uses this client will retry failed operations.
- retry_policy MyRetryPolicy
- options {
- model gpt-3.5-turbo
- api_key env.OPENAI_API_KEY
- }
-}
-```
diff --git a/docs/docs/syntax/enum.mdx b/docs/docs/syntax/enum.mdx
deleted file mode 100644
index 7b0feb15a..000000000
--- a/docs/docs/syntax/enum.mdx
+++ /dev/null
@@ -1,39 +0,0 @@
----
-title: "enum"
----
-
-Enums are useful for classification tasks. BAML has helper functions that can help you serialize an enum into your prompt in a neatly formatted list (more on that later).
-
-To define your own custom enum in BAML:
-
-
-```rust BAML
-enum MyEnum {
- Value1
- Value2
- Value3
-}
-```
-
-```python Python Equivalent
-from enum import StrEnum
-
-class MyEnum(StrEnum):
- Value1 = "Value1"
- Value2 = "Value2"
- Value3 = "Value3"
-```
-
-```typescript Typescript Equivalent
-enum MyEnum {
- Value1 = "Value1",
- Value2 = "Value2",
- Value3 = "Value3",
-}
-```
-
-
-
-- You may have as many values as you'd like.
-- Values may not be duplicated or empty.
-- Values may not contain spaces or special characters and must not start with a number.
diff --git a/docs/docs/syntax/function-testing.mdx b/docs/docs/syntax/function-testing.mdx
deleted file mode 100644
index 9799409bf..000000000
--- a/docs/docs/syntax/function-testing.mdx
+++ /dev/null
@@ -1,279 +0,0 @@
----
-title: "Unit Testing"
----
-
-There are two types of tests you may want to run on your AI functions:
-
-- Unit Tests: Tests a single AI function
-- Integration Tests: Tests a pipeline of AI functions and potentially buisness logic
-
-We support both types of tests using BAML.
-
-## Using the playground
-
-Use the playground to run tests against individual functions
-
-
-
-## Baml CLI
-
-You can run tests defined
-
-## From BAML Studio
-
-Coming soon
-You can also create tests from production logs in BAML Studio. Any weird or atypical
-user inputs can be used to create a test case with just 1 click.
-
-## JSON Files (`__tests__` folder)
-
-Unit tests created by the playground are stored in the `__tests__` folder.
-
-The project structure should look like this:
-
-```bash
-.
-├── baml_client/
-└── baml_src/
- ├── __tests__/
- │ ├── YourAIFunction/
- │ │ ├── test_name_monkey.json
- │ │ └── test_name_cricket.json
- │ └── YourAIFunction2/
- │ └── test_name_jellyfish.json
- ├── main.baml
- └── foo.baml
-```
-
-You can manually create tests by creating a folder for each function you want to test. Inside each folder, create a json file for each test case you want to run. The json file should be named `test_name.json` where `test_name` is the name of the test case.
-
-To see the structure of the JSON file, you can create a test using the playground and then copy the JSON file into your project.
-
-
- The BAML compiler reads the `__tests__` folder and generates a pytest file for
- you so you don't have to manually write test boilerplate code.
-
-
-## Programmatic Testing (using pytest)
-
-For python, you can leverage **pytest** to run tests. All you need is to add a **@baml_test** decorator to your test functions to get your test data visualized on the baml dashboard.
-
-### Running tests
-
-
- Make sure you are running these commands from your python virtual environment
- (or **`poetry shell`** if you use poetry)
-
-
-```bash
-# From your project root
-# Lists all tests
-pytest -m baml_test --collect-only
-```
-
-```bash
-# From your project root
-# Runs all tests
-# For every function
-pytest -m baml_test
-```
-
-To run tests for a subdirectory
-
-```bash
-# From your project root
-# Note the underscore at the end of the folder name
-pytest -m baml_test ./your-tests-folder/
-```
-
-To run tests that have a specific name or group name
-
-```bash
-# From your project root
-pytest -m baml_test -k test_group_name
-```
-
-You can read more about the `-k` arg of pytest here ([PyTest Docs](https://docs.pytest.org/en/latest/example/markers.html#using-k-expr-to-select-tests-based-on-their-name))
-
-`-k` will match any tests with that given name.
-
-To run a specific test case in a test group
-
-```bash
-# From your project root
-pytest -m baml_test -k 'test_group_name and test_case_name'
-```
-
-### Unit Test an AI Function
-
-Section in progress..
-
-### Integration Tests (test a pipeline calling multiple functions)
-
-
- TypeScript support for testing is still in closed alpha - please contact us if you would like to use it!
-
-
-
-
-```python Test Pipeline
-# Import your baml-generated LLM functions
-from baml_client import baml as b
-
-# Import testing library
-from baml_client.testing import baml_test
-
-# Mark this as a baml test (recorded on dashboard and does some setup)
-@baml_test
-async def test_pipeline():
- message = "I am ecstatic"
- response = await b.ClassifySentiment(message)
- assert response == Sentiment.POSITIVE
- response = await b.GetHappyResponse(message)
-```
-
-
-
-
- Make sure your test file, the Test class and/or test function is prefixed with
- `test` or `Test` respectively. Otherwise, pytest will not pick up your tests.
-
-
-### Parameterized Tests
-
-Parameterized tests allow you declare a list of inputs and expected outputs for a test case. baml will run the test for each input and compare the output to the expected output.
-
-```python
-from baml_client.testing import baml_test
-# Import your baml-generated LLM functions
-from baml_client import baml
-# Import any custom types defined in .baml files
-from baml_client.types import Sentiment
-
-@baml_test
-@pytest.mark.parametrize(
- "input, expected_output",
- [
- ("I am ecstatic", Sentiment.POSITIVE),
- ("I am sad", Sentiment.NEGATIVE),
- ("I am angry", Sentiment.NEGATIVE),
- ],
-)
-async def test_sentiments(input, expected_output):
- response = await baml.ClassifySentiment(input)
- assert response == expected_output
-```
-
-This will generate 3 test_cases on the dashboard, one for each input.
-
-### Using custom names for each test
-
-The parametrize decorator also allows you to specify a custom name for each test case. See below on how we name each test case using the ids parameter.
-
-```python
-from baml_client import baml as b
-from baml_client.types import Sentiment, IClassifySentiment
-
-test_cases = [
- {"input": "I am ecstatic", "expected_output": Sentiment.POSITIVE, "id": "ecstatic-test"},
- {"input": "I am sad", "expected_output": Sentiment.NEGATIVE, "id": "sad-test"},
- {"input": "I am angry", "expected_output": Sentiment.NEGATIVE, "id": "angry-test"},
-]
-
-@b.ClassifySentiment.test
-@pytest.mark.parametrize(
- "test_case",
- test_cases,
- ids=[case['id'] for case in test_cases]
-)
-# Note the argument name "test_case" is set by the first parameter in the parametrize() decorator
-async def test_sentiments(ClassifySentimentImpl: IClassifySentiment, test_case):
- response = await ClassifySentimentImpl(test_case["input"])
- assert response == test_case["expected_output"]
-```
-
-### Grouping Tests by Input Type
-
-Alternatively, you can group things together logically by defining one test case or test class per input type your testing. In our case, we'll split up all Positive sentiments into their own group.
-
-```python
-from baml_client.testing import baml_test
-# Import your baml-generated LLM functions
-from baml_client import baml
-# Import any custom types defined in .baml files
-from baml_client.types import Sentiment
-
-@baml_test
-@pytest.mark.asyncio
-@pytest.mark.parametrize(
- # Note we only need to pass in one variable to the test, the "input".
- "input",
- [
- "I am ecstatic",
- "I am super happy!"
- ],
-)
-class TestHappySentiments:
- async def test_sentiments(input, expected_output):
- response = await baml.ClassifySentiment(input)
- assert response == Sentiment.POSITIVE
-
-@baml_test
-@pytest.mark.asyncio
-@pytest.mark.parametrize(
- # Note we only need to pass in one variable to the test, the "input".
- "input",
- [
- "I am sad",
- "I am angry"
- ],
-)
-class TestSadSentiments:
- async def test_sentiments(input, expected_output):
- response = await baml.ClassifySentiment(input)
- assert response == Sentiment.NEGATIVE
-```
-
-Alternatively you can just write a test function for each input type.
-
-```python
-from baml_client.testing import baml_test
-from baml_client import baml
-from baml_client.types import Sentiment
-
-@baml_test
-@pytest.mark.asyncio
-@pytest.mark.parametrize(
- "input",
- [
- "I am ecstatic",
- "I am super happy!",
- "I am thrilled",
- "I am overjoyed",
- ],
-)
-async def test_happy_sentiments(input):
- response = await baml.ClassifySentiment(input)
- assert response == Sentiment.POSITIVE
-
-@baml_test
-@pytest.mark.asyncio
-@pytest.mark.parametrize(
- "input",
- [
- "I am sad",
- "I am angry",
- "I am upset",
- "I am frustrated",
- ],
-)
-async def test_sad_sentiments(input):
- response = await baml.ClassifySentiment(input)
- assert response == Sentiment.NEGATIVE
-```
diff --git a/docs/docs/syntax/function.mdx b/docs/docs/syntax/function.mdx
deleted file mode 100644
index 338480230..000000000
--- a/docs/docs/syntax/function.mdx
+++ /dev/null
@@ -1,13 +0,0 @@
----
-title: "function"
----
-
-A **function** is the contract between the application and the AI model. It defines the desired **input** and **output**.
-
-
-
-
-With baml, you can modify the implementation of a function and keep the application logic that uses the
-function unchanged.
-
-Checkout [PromptFiddle](https://promptfiddle.com) to see various BAML function examples.
\ No newline at end of file
diff --git a/docs/docs/syntax/generator.mdx b/docs/docs/syntax/generator.mdx
deleted file mode 100644
index fa788dc7d..000000000
--- a/docs/docs/syntax/generator.mdx
+++ /dev/null
@@ -1,21 +0,0 @@
----
-title: generator
----
-
-The `generator` configuration needs to be added anywhere in .baml files to generate the `baml_client` in Python or Typescript.
-
-We recommend running **baml init** to have this setup for you with sane defaults.
-
-Here is how you can add a generator block:
-
-```rust
-generator MyGenerator{
- output_type typescript // or python/pydantic, ruby
- output_dir ../
-}
-```
-
-| Property | Description | Options | Default |
-| ------------------- | ------------------------------------------------ | --------------------------------- | ---------------------------------------------- |
-| output_type | The language of the generated client | python/pydantic, ruby, typescript | |
-| output_dir | The directory where we'll output the generated baml_client | | ../ |
diff --git a/docs/docs/syntax/overview.mdx b/docs/docs/syntax/overview.mdx
deleted file mode 100644
index 29f7a8ef9..000000000
--- a/docs/docs/syntax/overview.mdx
+++ /dev/null
@@ -1,68 +0,0 @@
----
-title: BAML Project Structure
----
-
-A BAML project has the following structure:
-
-```bash
-.
-├── baml_client/ # Generated code
-├── baml_src/ # Prompts live here
-│ └── foo.baml
-# The rest of your project (not generated nor used by BAML)
-├── app/
-│ ├── __init__.py
-│ └── main.py
-└── pyproject.toml
-
-```
-
-1. `baml_src/` is the directory where you write your BAML files with the AI
- function declarations, prompts, retry policies, etc. It also contains
- [generator](/syntax/generator) blocks which configure how and where to
- transpile your BAML code.
-
-2. `baml_client/` is the directory where BAML will generate code, and where you'll
- import the generated code from.
-
-
-
-```python Python
-from baml_client import baml as b
-
-await b.YourAIFunction()
-```
-
-```typescript TypeScript
-import b from "@/baml_client";
-
-await b.YourAIFunction();
-```
-
-
-
-3. `baml_src/__tests__/` are where your unit tests live. The `.json` files
- store the test inputs that can be loaded, deleted, created, and ran using
- the BAML VSCode extension. You can also write programmatic python/TS tests
- anywhere you like. See [here](/v3/syntax/function-testing) for more
- information.
-
-
- **You should never edit any files inside baml_client directory** as the whole
- directory gets regenerated on every `baml build` (auto runs on save if using
- the VSCode extension).
-
-
-
- If you ever run into any issues with the generated code (like merge
- conflicts), you can always delete the `baml_client` directory and it will get
- regenerated automatically once you fix any other conflicts in your `.baml`
- files.
-
-
-### imports
-
-BAML by default has global imports. Every entity declared in any `.baml` file
-is available to all other `.baml` files under the same `baml_src` directory.
-You **can** have multiple `baml_src` directories, but no promises on how the
-VSCode extension will behave (yet).
diff --git a/docs/docs/syntax/prompt_engineering/overview.mdx b/docs/docs/syntax/prompt_engineering/overview.mdx
deleted file mode 100644
index 9f044879f..000000000
--- a/docs/docs/syntax/prompt_engineering/overview.mdx
+++ /dev/null
@@ -1,89 +0,0 @@
----
-title: Prompt Syntax
----
-
-Prompts are written using the [Jinja templating language](https://jinja.palletsprojects.com/en/3.0.x/templates/).
-
-There are **2 jinja macros** (or functions) that we have included into the language for you. We recommend viewing what they do using the VSCode preview (or in [promptfiddle.com](promptfiddle.com)), so you can see the full string transform in real time.
-
-1. **`{{ _.role("user") }}`**: This divides up the string into different message roles.
-2. **`{{ ctx.output_format }}`**: This prints out the output format instructions for the prompt.
-You can add your own prefix instructions like this: `{{ ctx.output_format(prefix="Please please please format your output like this:")}}`. We have more parameters you can customize. Docs coming soon.
-3. **`{{ ctx.client }}`**: This prints out the client model the function is using
-
-"ctx" is contextual information about the prompt (like the output format or client). "_." is a special namespace for other BAML functions.
-
-
-
-Here is what a prompt with jinja looks like using these macros:
-
-```rust
-enum Category {
- Refund
- CancelOrder
- TechnicalSupport
- AccountIssue
- Question
-}
-
-class Message {
- role string
- message string
-}
-
-
-function ClassifyConversation(messages: Message[]) -> Category[] {
- client GPT4Turbo
- prompt #"
- Classify this conversation:
- {% for m in messages %}
- {{ _.role(m.role) }}
- {{ m.message }}
- {% endfor %}
-
- Use the following categories:
- {{ ctx.output_format}}
- "#
-}
-```
-
-### Template strings
-You can create your own typed templates using the `template_string` keyword, and call them from a prompt:
-
-```rust
-// Extract the logic out of the prompt:
-template_string PrintMessages(messages: Message[]) -> string {
- {% for m in messages %}
- {{ _.role(m.role) }}
- {{ m.message }}
- {% endfor %}
-}
-
-function ClassifyConversation(messages: Message[]) -> Category[] {
- client GPT4Turbo
- prompt #"
- Classify this conversation:
- {{ PrintMessages(messages) }}
-
- Use the following categories:
- {{ ctx.output_format}}
- "#
-}
-```
-
-### Conditionals
-You can use these special variables to write conditionals, like if you want to change your prompt depending on the model:
-
- ```rust
- {% if ctx.client.name == "GPT4Turbo" %}
- // Do something
- {% endif %}
- ```
-
-You can use conditionals on your input objects as well:
-
- ```rust
- {% if messages[0].role == "user" %}
- // Do something
- {% endif %}
- ```
diff --git a/docs/docs/syntax/type-deserializer.mdx b/docs/docs/syntax/type-deserializer.mdx
deleted file mode 100644
index 461b97620..000000000
--- a/docs/docs/syntax/type-deserializer.mdx
+++ /dev/null
@@ -1,110 +0,0 @@
----
-title: Parsing and Deserialization
----
-
-Baml uses a custom `Deserializer` to parse a string into the desired type. **You don't have to do anything to enable to deserializer, it comes built in.**
-
-Instead of doing the following, you can rely on BAML to do the parsing for you.
-
-```python
-# Example parsing code you might be writing today
-# without baml
-import json
-
-openai_response_text = await openai.completions.create(
- ...
-)
-response = SomePydanticModel(**json.loads(openai_response_text))
-
-```
-
-## Examples
-
-
-
-| LLM Output | Desired Type | Baml Output | How |
-| ------------------------------------------------------------------------------------------------------------------------------------------------ | ------------ | --------------- | ------------------------------------------------------------------------------------------ |
-| `great` | Style | `Style.GREAT` | We handle case insensitivity |
-| `"great"` | Style | `Style.GREAT` | We handle extra quotes |
-| `great` | Style[] | `[Style.GREAT]` | Non-array types are automatically wrapped in an array |
-| `{ "feeling": "great" }` | Style | `Style.GREAT` | When looking for a singular value, we can parse dictionaries of 1 keys as singular objects |
-|
Some text that goes before... \```json {"feeling": "great"} \``` Some text that came after
| Style | `Style.GREAT` | We can find the inner json object and parse it even when surrounded by lots of text |
-
-
-Note, we can apply the same parsing logic to any type, not just enums. e.g. in the
-case of numbers, we can remove commas and parse the number. This page outlines all
-the rules we use to parse each type.
-
-
- The deserializer makes 0 external calls and runs fully locally!
-
-
-## Error handling
-
-All parsing errors are handled by the `Deserializer` and will raise a `DeserializerException`.
-
-
-
-```python Python
-from baml_client import baml as b
-from baml_client import DeserializerException
-
-try:
- response = await b.SomeAIFunction(query="I want to buy a car")
-except DeserializerException as e:
- # The parser was not able read the response as the expected type
- print(e)
-```
-
-```typescript TypeScript
-import b, { DeserializerException } from "@/baml_client";
-
-const main = async () => {
- try {
- await b.ClassifyMessage("I want to cancel my order");
- } catch (e) {
- if (e instanceof DeserializerException) {
- // The parser was not able read the response as the expected type
- console.log(e);
- }
- throw e;
- }
-};
-
-if (require.main === module) {
- main();
-}
-```
-
-
-
-## Primitive Types
-
-TODO: Include a section on how each type is parsed and coerced from other types.
-
-## Composite/Structured Types
-
-### enum
-
-**See:** [Prompt engineering > Enum > @alias](/docs/syntax/prompt_engineering/enum#deserialization-with-alias)
-
-### class
-
-**See:** [Prompt engineering > Class](/docs/syntax/class)
-
-### Optional (?)
-
-If the type is optional, the parser will attempt to parse the value as the type, or return `null` if we failed to parse.
-
-### Union (|)
-
-Unions are parsed in left to right order. The first type that successfully parses the value will be returned.
-If no types are able to parse the value, a `DeserializerException` will be raised.
-
-### List/Array ([])
-
-Lists parse each element in the list as the type specified in the list.
-
-- It will always return a list, even if the list is empty.
-- If an element fails to parse, it is skipped and not included in the final list.
-- If the value is not a list, the parser will attempt to parse the value as the type and return a list with a single element.
diff --git a/docs/docs/syntax/type.mdx b/docs/docs/syntax/type.mdx
deleted file mode 100644
index b7e33be6e..000000000
--- a/docs/docs/syntax/type.mdx
+++ /dev/null
@@ -1,251 +0,0 @@
----
-title: Supported Types
----
-
-## Primitive Types
-
-### ✅ bool
-
-- **When to use it:** When you need to represent a simple true/false condition.
-- **Syntax:** `bool`
-
-### ✅ int
-
-- **When to use it:** When you want numeric values
-- **Syntax:** `int`
-
-### ✅ float
-
-- **When to use it:** When dealing with numerical values that require precision (like measurements or monetary values).
-- **Syntax:** `float`
-
-### ✅ string
-
-- **Syntax:** `string`
-
-### ✅ char
-
-- **When to use it:** When you need to represent a single letter, digit, or other symbols.
-- **Syntax:** `char`
-
-### ✅ null
-
-- **Syntax:** `null`
-
-### ✅ Images
-
-You can use an image like this:
-
-```rust
-function DescribeImage(myImg: image) -> string {
- client GPT4Turbo
- prompt #"
- {{ _.role("user")}}
- Describe the image in four words:
- {{ myImg }}
- "#
-}
-```
-
-### ⚠️ bytes
-
-- Not yet supported. Use a `string[]` or `int[]` instead.
-
-### ⚠️ any/json
-
-- Not supported.
-
- We don't want to encourage its use as it defeats the purpose of having a
- type system. if you really need it, for now use `string` and call
- `json.parse` yourself. Also, message us on discord so we can understand your
- use case and consider supporting it.
-
-
-### Dates/Times
-
-#### ⚠️ datetime
-
-- Not yet supported. Use a `string` or `int` (milliseconds since epoch) instead.
-
-#### ⚠️ datetime interval
-
-- Not yet supported. Use a `string` or `int` (milliseconds since epoch) instead.
-
-### ⚠️ Unit Values (currency, temperature, etc)
-
-Many times you may want to represent a number with a unit. For example, a
-temperature of 32 degrees Fahrenheit or cost of $100.00.
-
-- Not yet supported. We recommend using a number (`int` or `float`) and having
- the unit be part of the variable name. For example, `temperature_fahrenheit`
- and `cost_usd` (see [@alias](/docs/syntax/class#alias)).
-
-
-
-
-## Composite/Structured Types
-
-### ✅ enum
-
-**See also:** [Enum](/docs/syntax/enum)
-
-A user-defined type consisting of a set of named constants.
-- **When to use it:** Use it when you need a model to choose from a known set of values, like in classification problems
-- **Syntax:**
-
-```rust
-enum Name {
- Value1
- Value2
-}
-```
-
-- **Example:**
-
-```rust
-enum Color {
- Red
- Green
- Blue
-}
-```
-
-### ✅ class
-
-**See also:** [Class](/docs/syntax/class)
-
-- **What it is:** User-defined complex data structures.
-- **When to use it:** When you need an LLM to call another function (e.g. OpenAI's function calling), you can model the function's parameters as a class. You can also get models to return complex structured data by using a class.
-- **Syntax:**
-
-```rust
-class ClassName {
- ...
-}
-```
-
-- **Example:**
-
-```rust
-class Car {
- model string
- year int
-}
-```
-
-### ✅ Optional (?)
-
-- **What it is:** A type that represents a value that might or might not be present.
-- **When to use it:** When a variable might not have a value and you want to explicitly handle its absence.
-- **Syntax:** `?`
-- **Example:** `int?` or `(MyClass | int)?`
-
-### ✅ Union (|)
-
-- **What it is:** A type that can hold one of several specified types.
-- **When to use it:** When a variable can legitimately be of more than one type. This can be helpful with function calling, where you want to return different types of data depending on which function should be called.
-- **Syntax:** `|`
-- **Example:** `int | string` or `(int | string) | MyClass` or `string | MyClass | int[]`
-
- Order is important. `int | string` is not the same as `string | int`.
-
- For example, if you have a `"1"` string, it will be parsed as an `int` if
- you use `int | string`, but as a `string` if you use `string | int`.
-
-
-### ✅ List/Array ([])
-
-- **What it is:** A collection of elements of the same type.
-- **When to use it:** When you need to store a list of items of the same type.
-- **Syntax:** `[]`
-- **Example:** `string[]` or `(int | string)[]` or `int[][]`
-
-
-
-
Array types can be nested to create multi-dimensional arrays
The maximum number of tokens to allow in the generated response. The default value is the maximum allowed value for the model that you are using. For more information, see Inference parameters for foundation models.
+ max_tokens: ::std::option::Option,
+ ///
The likelihood of the model selecting higher-probability options while generating a response. A lower value makes the model more likely to choose higher-probability options, while a higher value makes the model more likely to choose lower-probability options.
The percentage of most-likely candidates that the model considers for the next token. For example, if you choose a value of 0.8 for topP, the model selects from the top 80% of the probability distribution of tokens that could be next in the sequence.