Skip to content

Commit

Permalink
chore: s/MODEL_CHAT/CHAT_MODEL/ in example and test envvars for consi…
Browse files Browse the repository at this point in the history
…stency and clarity (#515)
  • Loading branch information
trentm authored Jan 8, 2025
1 parent e8cce97 commit 0f4f713
Show file tree
Hide file tree
Showing 3 changed files with 12 additions and 12 deletions.
4 changes: 2 additions & 2 deletions packages/instrumentation-openai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,8 @@ ollama serve

# When using Ollama, we default to qwen2.5:0.5b, which is a small model. You
# can choose a larger one, or a different tool capable model like mistral-nemo.
export MODEL_CHAT=qwen2.5
ollama pull $MODEL_CHAT
export CHAT_MODEL=qwen2.5
ollama pull $CHAT_MODEL

OPENAI_BASE_URL=http://localhost:11434/v1 \
node use-chat.js
Expand Down
8 changes: 4 additions & 4 deletions packages/instrumentation-openai/examples/openai.js
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,9 @@ function newOpenAIAndModels() {
// Default to models available in both the OpenAI platform and Azure OpenAI
// Service. For Azure, however, this "model" must match the Azure "deployment
// name".
let chatModel = process.env.MODEL_CHAT ?? 'gpt-4o-mini';
let chatModel = process.env.CHAT_MODEL ?? 'gpt-4o-mini';
let embeddingsModel =
process.env.MODEL_EMBEDDINGS ?? 'text-embedding-3-small';
process.env.EMBEDDINGS_MODEL ?? 'text-embedding-3-small';

if (process.env.AZURE_OPENAI_API_KEY) {
clientCtor = AzureOpenAI;
Expand All @@ -47,8 +47,8 @@ function newOpenAIAndModels() {
) {
process.env.OPENAI_API_KEY = 'unused';
// Note: Others like LocalAI do not use Ollama's naming scheme.
chatModel = process.env.MODEL_CHAT ?? 'qwen2.5:0.5b';
embeddingsModel = process.env.MODEL_EMBEDDINGS ?? 'all-minilm:33m';
chatModel = process.env.CHAT_MODEL ?? 'qwen2.5:0.5b';
embeddingsModel = process.env.EMBEDDINGS_MODEL ?? 'all-minilm:33m';
}

return { client: new clientCtor(), chatModel, embeddingsModel };
Expand Down
12 changes: 6 additions & 6 deletions packages/instrumentation-openai/test/fixtures.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -107,8 +107,8 @@ if (process.env.TEST_FIXTURES_ENV_FILE) {
require('dotenv').config({ path: process.env.TEST_FIXTURES_ENV_FILE });
}

const UNIT_TEST_MODEL_TOOLS = 'gpt-4o-mini';
const UNIT_TEST_MODEL_EMBEDDINGS = 'text-embedding-3-small';
const UNIT_TEST_CHAT_MODEL = 'gpt-4o-mini';
const UNIT_TEST_EMBEDDINGS_MODEL = 'text-embedding-3-small';

// Configure the test fixtures based on the test mode.
const testMode = process.env.TEST_FIXTURES_MODE || 'unit';
Expand All @@ -130,8 +130,8 @@ switch (testMode) {
// OPENAI_API_KEY needs to be set to something to avoid OpenAI
// constructor error. However, because of mocking, it isn't used.
process.env.OPENAI_API_KEY = 'notused';
process.env.TEST_CHAT_MODEL = UNIT_TEST_MODEL_TOOLS;
process.env.TEST_EMBEDDINGS_MODEL = UNIT_TEST_MODEL_EMBEDDINGS;
process.env.TEST_CHAT_MODEL = UNIT_TEST_CHAT_MODEL;
process.env.TEST_EMBEDDINGS_MODEL = UNIT_TEST_EMBEDDINGS_MODEL;
targetService = 'openai';
break;

Expand All @@ -149,8 +149,8 @@ switch (testMode) {
}
usingNock = true;
process.env.TEST_NOCK_BACK_MODE = 'update';
process.env.TEST_CHAT_MODEL = UNIT_TEST_MODEL_TOOLS;
process.env.TEST_EMBEDDINGS_MODEL = UNIT_TEST_MODEL_EMBEDDINGS;
process.env.TEST_CHAT_MODEL = UNIT_TEST_CHAT_MODEL;
process.env.TEST_EMBEDDINGS_MODEL = UNIT_TEST_EMBEDDINGS_MODEL;
targetService = 'openai';
break;

Expand Down

0 comments on commit 0f4f713

Please sign in to comment.