Fix/gemini (#7659)

* refactor: update Gemini and OpenAI API clients for improved reasoning model handling

- Replaced isGeminiReasoningModel with isSupportedThinkingTokenGeminiModel in GeminiAPIClient for better model validation.
- Enhanced OpenAIAPIClient to support additional configurations for reasoning efforts and thinking budgets based on model type.
- Introduced new thinking tags for Gemini models in ThinkingTagExtractionMiddleware.
- Updated model checks in models.ts to streamline reasoning model identification.
- Adjusted ThinkingButton component to differentiate between Gemini and Gemini Pro models based on regex checks.

* refactor(GeminiAPIClient): streamline reasoning configuration handling

- Simplified the logic for returning thinking configuration when reasoningEffort is undefined in GeminiAPIClient.
- Updated ApiService to include enableReasoning flag for API calls, enhancing control over reasoning capabilities.

* fix(OpenAIAPIClient): add support for non-flash Gemini models in reasoning configuration

- Introduced a check for non-flash models in the OpenAIAPIClient to enhance reasoning configuration handling for supported Gemini models.
- This change ensures that reasoning is correctly configured based on the model type, improving overall model validation.
This commit is contained in:
SuYao 2025-06-30 13:51:23 +08:00 committed by GitHub
parent 1034b94628
commit a9a9d884ce
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 71 additions and 20 deletions

View File

@ -22,8 +22,8 @@ import { GenericChunk } from '@renderer/aiCore/middleware/schemas'
import {
findTokenLimit,
GEMINI_FLASH_MODEL_REGEX,
isGeminiReasoningModel,
isGemmaModel,
isSupportedThinkingTokenGeminiModel,
isVisionModel
} from '@renderer/config/models'
import { CacheService } from '@renderer/services/CacheService'
@ -393,29 +393,29 @@ export class GeminiAPIClient extends BaseApiClient<
* @returns The reasoning effort
*/
private getBudgetToken(assistant: Assistant, model: Model) {
if (isGeminiReasoningModel(model)) {
if (isSupportedThinkingTokenGeminiModel(model)) {
const reasoningEffort = assistant?.settings?.reasoning_effort
// 如果thinking_budget是undefined不思考
if (reasoningEffort === undefined) {
return {
thinkingConfig: {
includeThoughts: false,
...(GEMINI_FLASH_MODEL_REGEX.test(model.id) ? { thinkingBudget: 0 } : {})
} as ThinkingConfig
}
return GEMINI_FLASH_MODEL_REGEX.test(model.id)
? {
thinkingConfig: {
thinkingBudget: 0
}
}
: {}
}
const effortRatio = EFFORT_RATIO[reasoningEffort]
if (effortRatio > 1) {
if (reasoningEffort === 'auto') {
return {
thinkingConfig: {
includeThoughts: true
includeThoughts: true,
thinkingBudget: -1
}
}
}
const effortRatio = EFFORT_RATIO[reasoningEffort]
const { min, max } = findTokenLimit(model.id) || { min: 0, max: 0 }
// 计算 budgetTokens确保不低于 min
const budget = Math.floor((max - min) * effortRatio + min)

View File

@ -114,6 +114,9 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
if (!reasoningEffort) {
if (model.provider === 'openrouter') {
if (isSupportedThinkingTokenGeminiModel(model) && !GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
return {}
}
return { reasoning: { enabled: false, exclude: true } }
}
if (isSupportedThinkingTokenQwenModel(model)) {
@ -126,7 +129,15 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
if (isSupportedThinkingTokenGeminiModel(model)) {
if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
return { reasoning_effort: 'none' }
return {
extra_body: {
google: {
thinking_config: {
thinking_budget: 0
}
}
}
}
}
return {}
}
@ -169,12 +180,37 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
}
// OpenAI models
if (isSupportedReasoningEffortOpenAIModel(model) || isSupportedThinkingTokenGeminiModel(model)) {
if (isSupportedReasoningEffortOpenAIModel(model)) {
return {
reasoning_effort: reasoningEffort
}
}
if (isSupportedThinkingTokenGeminiModel(model)) {
if (reasoningEffort === 'auto') {
return {
extra_body: {
google: {
thinking_config: {
thinking_budget: -1,
include_thoughts: true
}
}
}
}
}
return {
extra_body: {
google: {
thinking_config: {
thinking_budget: budgetTokens,
include_thoughts: true
}
}
}
}
}
// Claude models
if (isSupportedThinkingTokenClaudeModel(model)) {
const maxTokens = assistant.settings?.maxTokens

View File

@ -11,11 +11,13 @@ export const MIDDLEWARE_NAME = 'ThinkingTagExtractionMiddleware'
// 不同模型的思考标签配置
const reasoningTags: TagConfig[] = [
{ openingTag: '<think>', closingTag: '</think>', separator: '\n' },
{ openingTag: '<thought>', closingTag: '</thought>', separator: '\n' },
{ openingTag: '###Thinking', closingTag: '###Response', separator: '\n' }
]
const getAppropriateTag = (model?: Model): TagConfig => {
if (model?.id?.includes('qwen3')) return reasoningTags[0]
if (model?.id?.includes('gemini-2.5')) return reasoningTags[1]
// 可以在这里添加更多模型特定的标签配置
return reasoningTags[0] // 默认使用 <think> 标签
}

View File

@ -2509,14 +2509,16 @@ export function isGeminiReasoningModel(model?: Model): boolean {
return true
}
if (model.id.includes('gemini-2.5')) {
if (isSupportedThinkingTokenGeminiModel(model)) {
return true
}
return false
}
export const isSupportedThinkingTokenGeminiModel = isGeminiReasoningModel
export const isSupportedThinkingTokenGeminiModel = (model: Model): boolean => {
return model.id.includes('gemini-2.5')
}
export function isQwenReasoningModel(model?: Model): boolean {
if (!model) {

View File

@ -7,6 +7,7 @@ import {
} from '@renderer/components/Icons/SVGIcon'
import { useQuickPanel } from '@renderer/components/QuickPanel'
import {
GEMINI_FLASH_MODEL_REGEX,
isDoubaoThinkingAutoModel,
isSupportedReasoningEffortGrokModel,
isSupportedThinkingTokenDoubaoModel,
@ -37,13 +38,14 @@ const MODEL_SUPPORTED_OPTIONS: Record<string, ThinkingOption[]> = {
default: ['off', 'low', 'medium', 'high'],
grok: ['off', 'low', 'high'],
gemini: ['off', 'low', 'medium', 'high', 'auto'],
gemini_pro: ['low', 'medium', 'high', 'auto'],
qwen: ['off', 'low', 'medium', 'high'],
doubao: ['off', 'auto', 'high']
}
// 选项转换映射表:当选项不支持时使用的替代选项
const OPTION_FALLBACK: Record<ThinkingOption, ThinkingOption> = {
off: 'off',
off: 'low', // off -> low (for Gemini Pro models)
low: 'high',
medium: 'high', // medium -> high (for Grok models)
high: 'high',
@ -57,6 +59,7 @@ const ThinkingButton: FC<Props> = ({ ref, model, assistant, ToolbarButton }): Re
const isGrokModel = isSupportedReasoningEffortGrokModel(model)
const isGeminiModel = isSupportedThinkingTokenGeminiModel(model)
const isGeminiFlashModel = GEMINI_FLASH_MODEL_REGEX.test(model.id)
const isQwenModel = isSupportedThinkingTokenQwenModel(model)
const isDoubaoModel = isSupportedThinkingTokenDoubaoModel(model)
@ -66,12 +69,18 @@ const ThinkingButton: FC<Props> = ({ ref, model, assistant, ToolbarButton }): Re
// 确定当前模型支持的选项类型
const modelType = useMemo(() => {
if (isGeminiModel) return 'gemini'
if (isGeminiModel) {
if (isGeminiFlashModel) {
return 'gemini'
} else {
return 'gemini_pro'
}
}
if (isGrokModel) return 'grok'
if (isQwenModel) return 'qwen'
if (isDoubaoModel) return 'doubao'
return 'default'
}, [isGeminiModel, isGrokModel, isQwenModel, isDoubaoModel])
}, [isGeminiModel, isGrokModel, isQwenModel, isDoubaoModel, isGeminiFlashModel])
// 获取当前模型支持的选项
const supportedOptions = useMemo(() => {

View File

@ -607,6 +607,7 @@ export async function checkApi(provider: Provider, model: Model): Promise<void>
messages: 'hi',
assistant,
streamOutput: true,
enableReasoning: false,
shouldThrow: true
}

View File

@ -53,6 +53,7 @@ export type ReasoningEffortOptionalParams = {
enable_thinking?: boolean
thinking_budget?: number
enable_reasoning?: boolean
extra_body?: Record<string, any>
// Add any other potential reasoning-related keys here if they exist
}