Skip to content

Commit

Permalink
refactor(api): update temperature and penalty parameters for DeepSeek…
Browse files Browse the repository at this point in the history
… and OpenAI models

- Made temperature, presence_penalty, frequency_penalty, and top_p optional in RequestPayload interface.
- Adjusted request payload construction in DeepSeekApi to conditionally include temperature for the "deepseek-reasoner" model.
- Updated ChatGPTApi to handle temperature and penalty parameters based on model type, specifically for "deepseek-reasoner" and "o1" models.
- Ensured compatibility with existing models while enhancing flexibility for new configurations.
  • Loading branch information
kiritoko1029 committed Jan 20, 2025
1 parent c6ef17b commit 41056b0
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 13 deletions.
5 changes: 3 additions & 2 deletions app/client/platforms/deepseek.ts
Original file line number Diff line number Diff line change
Expand Up @@ -84,14 +84,15 @@ export class DeepSeekApi implements LLMApi {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: modelConfig.temperature,
presence_penalty: modelConfig.presence_penalty,
frequency_penalty: modelConfig.frequency_penalty,
top_p: modelConfig.top_p,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};

if (modelConfig.model === "deepseek-reasoner") {
requestPayload.temperature = modelConfig.temperature;
}
console.log("[Request] openai payload: ", requestPayload);

const shouldStream = !!options.config.stream;
Expand Down
31 changes: 21 additions & 10 deletions app/client/platforms/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,10 @@ export interface RequestPayload {
}[];
stream?: boolean;
model: string;
temperature: number;
presence_penalty: number;
frequency_penalty: number;
top_p: number;
temperature?: number;
presence_penalty?: number;
frequency_penalty?: number;
top_p?: number;
max_tokens?: number;
max_completion_tokens?: number;
}
Expand Down Expand Up @@ -196,6 +196,8 @@ export class ChatGPTApi implements LLMApi {

const isDalle3 = _isDalle3(options.config.model);
const isO1 = options.config.model.startsWith("o1");
const isDeepseekReasoner =
options.config.model.startsWith("deepseek-reasoner");
if (isDalle3) {
const prompt = getMessageTextContent(
options.messages.slice(-1)?.pop() as any,
Expand Down Expand Up @@ -226,16 +228,25 @@ export class ChatGPTApi implements LLMApi {
messages,
stream: options.config.stream,
model: modelConfig.model,
temperature: !isO1 ? modelConfig.temperature : 1,
presence_penalty: !isO1 ? modelConfig.presence_penalty : 0,
frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0,
top_p: !isO1 ? modelConfig.top_p : 1,
temperature:
!isO1 && !isDeepseekReasoner ? modelConfig.temperature : undefined,
presence_penalty: !isDeepseekReasoner
? isO1
? 0
: modelConfig.presence_penalty
: undefined,
frequency_penalty: !isDeepseekReasoner
? isO1
? 0
: modelConfig.frequency_penalty
: undefined,
top_p: !isDeepseekReasoner ? (isO1 ? 1 : modelConfig.top_p) : undefined,
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
};

// O1 使用 max_completion_tokens 控制token数 (https://platform.openai.com/docs/guides/reasoning#controlling-costs)
if (isO1) {
// O1 使用 max_completion_tokens 控制token数
if (isO1 || isDeepseekReasoner) {
requestPayload["max_completion_tokens"] = modelConfig.max_tokens;
}

Expand Down
2 changes: 1 addition & 1 deletion src-tauri/tauri.conf.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
},
"package": {
"productName": "NextChat",
"version": "2.15.9"
"version": "2.15.10-beta.1"
},
"tauri": {
"allowlist": {
Expand Down

0 comments on commit 41056b0

Please sign in to comment.