From 18a8369c2d7e422fb7922b4dbd8d5ab8f29ca422 Mon Sep 17 00:00:00 2001 From: Matt Steele Date: Wed, 13 Sep 2023 12:02:48 -0400 Subject: [PATCH] fix: Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index cf06a6b..ffea379 100644 --- a/README.md +++ b/README.md @@ -121,7 +121,7 @@ await fetch('https://api.openai.com/v1/embeddings', { In an effort to streamline all the utilities into a single opinionated service, you can create a `client` that will determine what is the best model and truncate if needed to fit your needs. ```js -import { createClient } = from 'openai-tokens' +import { createClient } from 'openai-tokens' const client = createClient({ // put in your OpenAI key here @@ -329,7 +329,7 @@ You can pass options to the validate wrapper as seen in the examples above. The A dynamic router has been provided for convenience. This allows you to pass multiple models. The module will choose the first valid model, so you can always maintain the smallest possible (and save some money 💰). ```js -import { dynamicWrapper } = from 'openai-tokens' +import { dynamicWrapper } from 'openai-tokens' const chat = async (messages = []) => { const body = await fetch('https://api.openai.com/v1/chat/completions', {