Skip to content

Commit

Permalink
feat(OpenAI Chat Model Node): Add reasoning effort option to control …
Browse files Browse the repository at this point in the history
…the amount of reasoning tokens to use
  • Loading branch information
jeanpaul committed Feb 6, 2025
1 parent fff98b1 commit c1a28dc
Showing 1 changed file with 37 additions and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,32 @@ export class LmChatOpenAi implements INodeType {
'Controls randomness: Lowering results in less random completions. As the temperature approaches zero, the model will become deterministic and repetitive.',
type: 'number',
},
{
displayName: 'Reasoning Effort',
name: 'reasoningEffort',
default: 'medium',
description:
'Controls the amount of reasoning tokens to use. A value of "low" will favor speed and economical token usage, "high" will favor more complete reasoning at the cost of more tokens generated and slower responses.',
type: 'options',
options: [
{
name: 'Low',
value: 'low',
description: 'Favors speed and economical token usage',
},
{
name: 'Medium',
value: 'medium',
description: 'Balance between speed and reasoning accuracy',
},
{
name: 'High',
value: 'high',
description:
'Favors more complete reasoning at the cost of more tokens generated and slower responses',
},
],
},
{
displayName: 'Timeout',
name: 'timeout',
Expand Down Expand Up @@ -311,6 +337,7 @@ export class LmChatOpenAi implements INodeType {
temperature?: number;
topP?: number;
responseFormat?: 'text' | 'json_object';
reasoningEffort?: 'low' | 'medium' | 'high';
};

const configuration: ClientOptions = {};
Expand All @@ -320,6 +347,15 @@ export class LmChatOpenAi implements INodeType {
configuration.baseURL = credentials.url as string;
}

// Extra options to send to OpenAI, that are not directly supported by LangChain
const modelKwargs: {
response_format?: object;
reasoning_effort?: 'low' | 'medium' | 'high';
} = {};
if (options.responseFormat) modelKwargs.response_format = { type: options.responseFormat };
if (options.reasoningEffort && ['low', 'medium', 'high'].includes(options.reasoningEffort))
modelKwargs.reasoning_effort = options.reasoningEffort;

const model = new ChatOpenAI({
openAIApiKey: credentials.apiKey as string,
modelName,
Expand All @@ -328,11 +364,7 @@ export class LmChatOpenAi implements INodeType {
maxRetries: options.maxRetries ?? 2,
configuration,
callbacks: [new N8nLlmTracing(this)],
modelKwargs: options.responseFormat
? {
response_format: { type: options.responseFormat },
}
: undefined,
modelKwargs,
onFailedAttempt: makeN8nLlmFailedAttemptHandler(this, openAiFailedAttemptHandler),
});

Expand Down

0 comments on commit c1a28dc

Please sign in to comment.