diff --git a/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts b/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts index 7568ee69be..7fcae3823c 100644 --- a/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts +++ b/src/renderer/src/aiCore/clients/openai/OpenAIApiClient.ts @@ -5,7 +5,9 @@ import { GEMINI_FLASH_MODEL_REGEX, getOpenAIWebSearchParams, getThinkModelType, + isClaudeReasoningModel, isDoubaoThinkingAutoModel, + isGeminiReasoningModel, isGPT5SeriesModel, isGrokReasoningModel, isNotSupportSystemMessageModel, @@ -46,6 +48,7 @@ import { Model, OpenAIServiceTier, Provider, + SystemProviderIds, ToolCallResponse, TranslateAssistant, WebSearchSource @@ -557,17 +560,28 @@ export class OpenAIAPIClient extends OpenAIBaseClient< } } - const lastUserMsg = userMessages.findLast((m) => m.role === 'user') - if ( - lastUserMsg && - isSupportedThinkingTokenQwenModel(model) && - !isSupportEnableThinkingProvider(this.provider) - ) { - const postsuffix = '/no_think' - const qwenThinkModeEnabled = assistant.settings?.qwenThinkMode === true - const currentContent = lastUserMsg.content + // poe 需要通过用户消息传递 reasoningEffort + const reasoningEffort = this.getReasoningEffort(assistant, model) - lastUserMsg.content = processPostsuffixQwen3Model(currentContent, postsuffix, qwenThinkModeEnabled) as any + const lastUserMsg = userMessages.findLast((m) => m.role === 'user') + if (lastUserMsg) { + if (isSupportedThinkingTokenQwenModel(model) && !isSupportEnableThinkingProvider(this.provider)) { + const postsuffix = '/no_think' + const qwenThinkModeEnabled = assistant.settings?.qwenThinkMode === true + const currentContent = lastUserMsg.content + + lastUserMsg.content = processPostsuffixQwen3Model(currentContent, postsuffix, qwenThinkModeEnabled) as any + } + if (this.provider.id === SystemProviderIds.poe) { + // 如果以后 poe 支持 reasoning_effort 参数了,可以删掉这部分 + if (isGPT5SeriesModel(model) && reasoningEffort.reasoning_effort) { + lastUserMsg.content += ` --reasoning_effort ${reasoningEffort.reasoning_effort}` + } else if (isClaudeReasoningModel(model) && reasoningEffort.thinking?.budget_tokens) { + lastUserMsg.content += ` --thinking_budget ${reasoningEffort.thinking.budget_tokens}` + } else if (isGeminiReasoningModel(model) && reasoningEffort.extra_body?.google?.thinking_config) { + lastUserMsg.content += ` --thinking_budget ${reasoningEffort.extra_body.google.thinking_config.thinking_budget}` + } + } } // 4. 最终请求消息 @@ -585,8 +599,6 @@ export class OpenAIAPIClient extends OpenAIBaseClient< // Note: Some providers like Mistral don't support stream_options const shouldIncludeStreamOptions = streamOutput && isSupportStreamOptionsProvider(this.provider) - const reasoningEffort = this.getReasoningEffort(assistant, model) - // minimal cannot be used with web_search tool if (isGPT5SeriesModel(model) && reasoningEffort.reasoning_effort === 'minimal' && enableWebSearch) { reasoningEffort.reasoning_effort = 'low' diff --git a/src/renderer/src/config/providers.ts b/src/renderer/src/config/providers.ts index 6ee1f675b8..2e9bb483f8 100644 --- a/src/renderer/src/config/providers.ts +++ b/src/renderer/src/config/providers.ts @@ -1225,7 +1225,7 @@ export const PROVIDER_URLS: Record = { }, poe: { api: { - url: 'https://api.poe.com/v1' + url: 'https://api.poe.com/v1/' }, websites: { official: 'https://poe.com/', diff --git a/src/renderer/src/types/sdk.ts b/src/renderer/src/types/sdk.ts index 36608ab9fe..e897098963 100644 --- a/src/renderer/src/types/sdk.ts +++ b/src/renderer/src/types/sdk.ts @@ -81,7 +81,14 @@ export type ReasoningEffortOptionalParams = { thinking_budget?: number incremental_output?: boolean enable_reasoning?: boolean - extra_body?: Record + extra_body?: { + google?: { + thinking_config: { + thinking_budget: number + include_thoughts?: boolean + } + } + } // Add any other potential reasoning-related keys here if they exist }