fix: update deepseek logic to match deepseek v3.2 (#11648)

* fix: update deepseek dependency to version 1.0.31 and improve provider creation logging

* chore

* feat: deepseek official hybrid infer

* fix: deepseek-v3.2-speciale tooluse and reasoning

* fix: 添加固定推理模型支持并更新相关逻辑

* refactor: simplify logic

* feat: aihubmix

* all system_providers

* feat: cherryin

* temp fix

* fix: address PR review feedback for DeepSeek v3.2 implementation

- Add default case in buildCherryInProviderOptions to fallback to genericProviderOptions
- Add clarifying comment for switch fall-through in reasoning.ts
- Add comprehensive test coverage for isFixedReasoningModel (negative cases)
- Add test coverage for new provider whitelist (deepseek, cherryin, new-api, aihubmix, sophnet, dmxapi)
- Add test coverage for isDeepSeekHybridInferenceModel prefix patterns
- Verify function calling logic works correctly via regex matching after removing provider-based checks
- Use includes() for deepseek-chat matching to support potential variants

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>

* fix: remove unnecessary fall-through case for unknown providers in getReasoningEffort

---------

Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
SuYao 2025-12-04 19:13:51 +08:00 committed by GitHub
parent 9637fb8a43
commit 981bb9f451
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
20 changed files with 216 additions and 43 deletions

View File

@ -41,6 +41,7 @@
"ai": "^5.0.26"
},
"dependencies": {
"@ai-sdk/openai-compatible": "^1.0.28",
"@ai-sdk/provider": "^2.0.0",
"@ai-sdk/provider-utils": "^3.0.17"
},

View File

@ -2,7 +2,6 @@ import { AnthropicMessagesLanguageModel } from '@ai-sdk/anthropic/internal'
import { GoogleGenerativeAILanguageModel } from '@ai-sdk/google/internal'
import type { OpenAIProviderSettings } from '@ai-sdk/openai'
import {
OpenAIChatLanguageModel,
OpenAICompletionLanguageModel,
OpenAIEmbeddingModel,
OpenAIImageModel,
@ -10,6 +9,7 @@ import {
OpenAISpeechModel,
OpenAITranscriptionModel
} from '@ai-sdk/openai/internal'
import { OpenAICompatibleChatLanguageModel } from '@ai-sdk/openai-compatible'
import {
type EmbeddingModelV2,
type ImageModelV2,
@ -118,7 +118,7 @@ const createCustomFetch = (originalFetch?: any) => {
return originalFetch ? originalFetch(url, options) : fetch(url, options)
}
}
class CherryInOpenAIChatLanguageModel extends OpenAIChatLanguageModel {
class CherryInOpenAIChatLanguageModel extends OpenAICompatibleChatLanguageModel {
constructor(modelId: string, settings: any) {
super(modelId, {
...settings,

View File

@ -41,7 +41,7 @@
"dependencies": {
"@ai-sdk/anthropic": "^2.0.49",
"@ai-sdk/azure": "^2.0.74",
"@ai-sdk/deepseek": "^1.0.29",
"@ai-sdk/deepseek": "^1.0.31",
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
"@ai-sdk/provider": "^2.0.0",
"@ai-sdk/provider-utils": "^3.0.17",

View File

@ -14,9 +14,9 @@ import { isBaseProvider } from '@cherrystudio/ai-core/core/providers/schemas'
import { loggerService } from '@logger'
import {
isAnthropicModel,
isFixedReasoningModel,
isGenerateImageModel,
isOpenRouterBuiltInWebSearchModel,
isReasoningModel,
isSupportedReasoningEffortModel,
isSupportedThinkingTokenModel,
isWebSearchModel
@ -83,7 +83,7 @@ export async function buildStreamTextParams(
const enableReasoning =
((isSupportedThinkingTokenModel(model) || isSupportedReasoningEffortModel(model)) &&
assistant.settings?.reasoning_effort !== undefined) ||
(isReasoningModel(model) && (!isSupportedThinkingTokenModel(model) || !isSupportedReasoningEffortModel(model)))
isFixedReasoningModel(model)
// 判断是否使用内置搜索
// 条件:没有外部搜索提供商 && (用户开启了内置搜索 || 模型强制使用内置搜索)

View File

@ -56,6 +56,7 @@ function tryResolveProviderId(identifier: string): ProviderId | null {
/**
* AI SDK Provider ID
*
* TODO: 整理函数逻辑
*/
export function getAiSdkProviderId(provider: Provider): string {
// 1. 尝试解析provider.id

View File

@ -26,6 +26,7 @@ import {
isNewApiProvider,
isOllamaProvider,
isPerplexityProvider,
isSupportStreamOptionsProvider,
isVertexProvider
} from '@renderer/utils/provider'
import { cloneDeep, isEmpty } from 'lodash'
@ -286,7 +287,7 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A
...options,
name: actualProvider.id,
...extraOptions,
includeUsage: true
includeUsage: isSupportStreamOptionsProvider(actualProvider)
}
}
}

View File

@ -275,7 +275,9 @@ export function buildProviderOptions(
}[rawProviderId] || rawProviderId
if (rawProviderKey === 'cherryin') {
rawProviderKey = { gemini: 'google', ['openai-response']: 'openai' }[actualProvider.type] || actualProvider.type
rawProviderKey =
{ gemini: 'google', ['openai-response']: 'openai', openai: 'cherryin' }[actualProvider.type] ||
actualProvider.type
}
// 返回 AI Core SDK 要求的格式:{ 'providerId': providerOptions } 以及提取的标准参数
@ -440,6 +442,7 @@ function buildCherryInProviderOptions(
): OpenAIResponsesProviderOptions | AnthropicProviderOptions | GoogleGenerativeAIProviderOptions {
switch (actualProvider.type) {
case 'openai':
return buildGenericProviderOptions(assistant, model, capabilities)
case 'openai-response':
return buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier, textVerbosity)
@ -448,8 +451,10 @@ function buildCherryInProviderOptions(
case 'gemini':
return buildGeminiProviderOptions(assistant, model, capabilities)
default:
return buildGenericProviderOptions(assistant, model, capabilities)
}
return {}
}
/**

View File

@ -250,9 +250,25 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
enable_thinking: true,
incremental_output: true
}
// TODO: 支持 new-api类型
case SystemProviderIds['new-api']:
case SystemProviderIds.cherryin: {
return {
extra_body: {
thinking: {
type: 'enabled' // auto is invalid
}
}
}
}
case SystemProviderIds.hunyuan:
case SystemProviderIds['tencent-cloud-ti']:
case SystemProviderIds.doubao:
case SystemProviderIds.deepseek:
case SystemProviderIds.aihubmix:
case SystemProviderIds.sophnet:
case SystemProviderIds.ppio:
case SystemProviderIds.dmxapi:
return {
thinking: {
type: 'enabled' // auto is invalid
@ -274,8 +290,6 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
logger.warn(
`Skipping thinking options for provider ${provider.name} as DeepSeek v3.1 thinking control method is unknown`
)
case SystemProviderIds.silicon:
// specially handled before
}
}
}

View File

@ -12,6 +12,7 @@ import {
isDeepSeekHybridInferenceModel,
isDoubaoSeedAfter251015,
isDoubaoThinkingAutoModel,
isFixedReasoningModel,
isGeminiReasoningModel,
isGrok4FastReasoningModel,
isHunyuanReasoningModel,
@ -356,6 +357,10 @@ describe('DeepSeek & Thinking Tokens', () => {
)
).toBe(true)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v2' }))).toBe(false)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v3.2' }))).toBe(true)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'agent/deepseek-v3.2' }))).toBe(true)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-chat' }))).toBe(true)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v3.2-speciale' }))).toBe(false)
const allowed = createModel({ id: 'deepseek-v3.1', provider: 'doubao' })
expect(isSupportedThinkingTokenModel(allowed)).toBe(true)
@ -364,6 +369,37 @@ describe('DeepSeek & Thinking Tokens', () => {
expect(isSupportedThinkingTokenModel(disallowed)).toBe(false)
})
it('supports DeepSeek v3.1+ models from newly added providers', () => {
// Test newly added providers for DeepSeek thinking token support
const newProviders = ['deepseek', 'cherryin', 'new-api', 'aihubmix', 'sophnet', 'dmxapi']
newProviders.forEach((provider) => {
const model = createModel({ id: 'deepseek-v3.1', provider })
expect(
isSupportedThinkingTokenModel(model),
`Provider ${provider} should support thinking tokens for deepseek-v3.1`
).toBe(true)
})
})
it('tests various prefix patterns for isDeepSeekHybridInferenceModel', () => {
// Test with custom prefixes
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'custom-deepseek-v3.2' }))).toBe(true)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'prefix-deepseek-v3.1' }))).toBe(true)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'agent/deepseek-v3.2' }))).toBe(true)
// Test that speciale is properly excluded
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'custom-deepseek-v3.2-speciale' }))).toBe(false)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'agent/deepseek-v3.2-speciale' }))).toBe(false)
// Test basic deepseek-chat
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-chat' }))).toBe(true)
// Test version variations
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v3.1.2' }))).toBe(true)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v3-1' }))).toBe(true)
})
it('supports Gemini thinking models while filtering image variants', () => {
expect(isSupportedThinkingTokenModel(createModel({ id: 'gemini-2.5-flash-latest' }))).toBe(true)
expect(isSupportedThinkingTokenModel(createModel({ id: 'gemini-2.5-flash-image' }))).toBe(false)
@ -535,6 +571,41 @@ describe('isReasoningModel', () => {
const magistral = createModel({ id: 'magistral-reasoning' })
expect(isReasoningModel(magistral)).toBe(true)
})
it('identifies fixed reasoning models', () => {
const models = [
'deepseek-reasoner',
'o1-preview',
'o1-mini',
'qwq-32b-preview',
'step-3-minimax',
'generic-reasoning-model',
'some-random-model-thinking',
'some-random-model-think',
'deepseek-v3.2-speciale'
]
models.forEach((id) => {
const model = createModel({ id })
expect(isFixedReasoningModel(model), `Model ${id} should be reasoning`).toBe(true)
})
})
it('excludes non-fixed reasoning models from isFixedReasoningModel', () => {
// Models that support thinking tokens or reasoning effort should NOT be fixed reasoning models
const nonFixedModels = [
{ id: 'deepseek-v3.2', provider: 'deepseek' }, // Supports thinking tokens
{ id: 'deepseek-chat', provider: 'deepseek' }, // Supports thinking tokens
{ id: 'claude-3-opus-20240229', provider: 'anthropic' }, // Supports thinking tokens via extended_thinking
{ id: 'gpt-4o', provider: 'openai' }, // Not a reasoning model at all
{ id: 'gpt-4', provider: 'openai' } // Not a reasoning model at all
]
nonFixedModels.forEach(({ id, provider }) => {
const model = createModel({ id, provider })
expect(isFixedReasoningModel(model), `Model ${id} should NOT be fixed reasoning`).toBe(false)
})
})
})
describe('Thinking model classification', () => {

View File

@ -117,12 +117,8 @@ describe('isFunctionCallingModel', () => {
it('excludes explicitly blocked ids', () => {
expect(isFunctionCallingModel(createModel({ id: 'gemini-1.5-flash' }))).toBe(false)
})
it('forces support for trusted providers', () => {
for (const provider of ['deepseek', 'anthropic', 'kimi', 'moonshot']) {
expect(isFunctionCallingModel(createModel({ provider }))).toBe(true)
}
expect(isFunctionCallingModel(createModel({ id: 'deepseek-v3.2-speciale' }))).toBe(false)
expect(isFunctionCallingModel(createModel({ id: 'deepseek/deepseek-v3.2-speciale' }))).toBe(false)
})
it('returns true when identified as deepseek hybrid inference model', () => {
@ -134,4 +130,19 @@ describe('isFunctionCallingModel', () => {
deepSeekHybridMock.mockReturnValueOnce(true)
expect(isFunctionCallingModel(createModel({ id: 'deepseek-v3-1', provider: 'dashscope' }))).toBe(false)
})
it('supports anthropic models through claude regex match', () => {
expect(isFunctionCallingModel(createModel({ id: 'claude-3-5-sonnet', provider: 'anthropic' }))).toBe(true)
expect(isFunctionCallingModel(createModel({ id: 'claude-3-opus', provider: 'anthropic' }))).toBe(true)
})
it('supports kimi models through kimi-k2 regex match', () => {
expect(isFunctionCallingModel(createModel({ id: 'kimi-k2-0711-preview', provider: 'moonshot' }))).toBe(true)
expect(isFunctionCallingModel(createModel({ id: 'kimi-k2', provider: 'kimi' }))).toBe(true)
})
it('supports deepseek models through deepseek regex match', () => {
expect(isFunctionCallingModel(createModel({ id: 'deepseek-chat', provider: 'deepseek' }))).toBe(true)
expect(isFunctionCallingModel(createModel({ id: 'deepseek-coder', provider: 'deepseek' }))).toBe(true)
})
})

View File

@ -21,7 +21,7 @@ import { isTextToImageModel } from './vision'
// Reasoning models
export const REASONING_REGEX =
/^(?!.*-non-reasoning\b)(o\d+(?:-[\w-]+)?|.*\b(?:reasoning|reasoner|thinking)\b.*|.*-[rR]\d+.*|.*\bqwq(?:-[\w-]+)?\b.*|.*\bhunyuan-t1(?:-[\w-]+)?\b.*|.*\bglm-zero-preview\b.*|.*\bgrok-(?:3-mini|4|4-fast)(?:-[\w-]+)?\b.*)$/i
/^(?!.*-non-reasoning\b)(o\d+(?:-[\w-]+)?|.*\b(?:reasoning|reasoner|thinking|think)\b.*|.*-[rR]\d+.*|.*\bqwq(?:-[\w-]+)?\b.*|.*\bhunyuan-t1(?:-[\w-]+)?\b.*|.*\bglm-zero-preview\b.*|.*\bgrok-(?:3-mini|4|4-fast)(?:-[\w-]+)?\b.*)$/i
// 模型类型到支持的reasoning_effort的映射表
// TODO: refactor this. too many identical options
@ -161,7 +161,13 @@ function _isSupportedThinkingTokenModel(model: Model): boolean {
'nvidia',
'ppio',
'hunyuan',
'tencent-cloud-ti'
'tencent-cloud-ti',
'deepseek',
'cherryin',
'new-api',
'aihubmix',
'sophnet',
'dmxapi'
] satisfies SystemProviderId[]
).some((id) => id === model.provider)
}
@ -462,15 +468,19 @@ export const isSupportedThinkingTokenZhipuModel = (model: Model): boolean => {
export const isDeepSeekHybridInferenceModel = (model: Model) => {
const { idResult, nameResult } = withModelIdAndNameAsId(model, (model) => {
const modelId = getLowerBaseModelName(model.id)
// deepseek官方使用chat和reasoner做推理控制其他provider需要单独判断id可能会有所差别
// openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型这里有风险
// 这里假定所有deepseek-chat都是deepseek-v3.2
// Matches: "deepseek-v3" followed by ".digit" or "-digit".
// Optionally, this can be followed by ".alphanumeric_sequence" or "-alphanumeric_sequence"
// until the end of the string.
// Examples: deepseek-v3.1, deepseek-v3-1, deepseek-v3.1.2, deepseek-v3.1-alpha
// Does NOT match: deepseek-v3.123 (missing separator after '1'), deepseek-v3.x (x isn't a digit)
// TODO: move to utils and add test cases
return /deepseek-v3(?:\.\d|-\d)(?:(\.|-)\w+)?$/.test(modelId) || modelId.includes('deepseek-chat-v3.1')
return (
/(\w+-)?deepseek-v3(?:\.\d|-\d)(?:(\.|-)(?!speciale$)\w+)?$/.test(modelId) ||
modelId.includes('deepseek-chat-v3.1') ||
modelId.includes('deepseek-chat')
)
})
return idResult || nameResult
}
@ -545,7 +555,8 @@ export function isReasoningModel(model?: Model): boolean {
isMiniMaxReasoningModel(model) ||
modelId.includes('magistral') ||
modelId.includes('pangu-pro-moe') ||
modelId.includes('seed-oss')
modelId.includes('seed-oss') ||
modelId.includes('deepseek-v3.2-speciale')
) {
return true
}
@ -596,3 +607,17 @@ export const findTokenLimit = (modelId: string): { min: number; max: number } |
}
return undefined
}
/**
* Determines if a model is a fixed reasoning model.
*
* A model is considered a fixed reasoning model if it meets all of the following criteria:
* - It is a reasoning model
* - It does NOT support thinking tokens
* - It does NOT support reasoning effort
*
* @param model - The model to check
* @returns `true` if the model is a fixed reasoning model, `false` otherwise
*/
export const isFixedReasoningModel = (model: Model) =>
isReasoningModel(model) && !isSupportedThinkingTokenModel(model) && !isSupportedReasoningEffortModel(model)

View File

@ -44,7 +44,8 @@ const FUNCTION_CALLING_EXCLUDED_MODELS = [
'glm-4\\.5v',
'gemini-2.5-flash-image(?:-[\\w-]+)?',
'gemini-2.0-flash-preview-image-generation',
'gemini-3(?:\\.\\d+)?-pro-image(?:-[\\w-]+)?'
'gemini-3(?:\\.\\d+)?-pro-image(?:-[\\w-]+)?',
'deepseek-v3.2-speciale'
]
export const FUNCTION_CALLING_REGEX = new RegExp(
@ -67,10 +68,6 @@ export function isFunctionCallingModel(model?: Model): boolean {
return FUNCTION_CALLING_REGEX.test(modelId) || FUNCTION_CALLING_REGEX.test(model.name)
}
if (['deepseek', 'anthropic', 'kimi', 'moonshot'].includes(model.provider)) {
return true
}
// 2025/08/26 百炼与火山引擎均不支持 v3.1 函数调用
// 先默认支持
if (isDeepSeekHybridInferenceModel(model)) {

View File

@ -53,7 +53,10 @@ const visionAllowedModels = [
'llama-4(?:-[\\w-]+)?',
'step-1o(?:.*vision)?',
'step-1v(?:-[\\w-]+)?',
'qwen-omni(?:-[\\w-]+)?'
'qwen-omni(?:-[\\w-]+)?',
'mistral-large-(2512|latest)',
'mistral-medium-(2508|latest)',
'mistral-small-(2506|latest)'
]
const visionExcludedModels = [

View File

@ -927,7 +927,7 @@ export const PROVIDER_URLS: Record<SystemProviderId, ProviderUrls> = {
websites: {
official: 'https://www.dmxapi.cn/register?aff=bwwY',
apiKey: 'https://www.dmxapi.cn/register?aff=bwwY',
docs: 'https://dmxapi.cn/models.html#code-block',
docs: 'https://doc.dmxapi.cn/',
models: 'https://www.dmxapi.cn/pricing'
}
},

View File

@ -11,6 +11,7 @@ import { QuickPanelReservedSymbol, useQuickPanel } from '@renderer/components/Qu
import {
getThinkModelType,
isDoubaoThinkingAutoModel,
isFixedReasoningModel,
isGPT5SeriesReasoningModel,
isOpenAIWebSearchModel,
MODEL_SUPPORTED_OPTIONS
@ -42,6 +43,8 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
// 确定当前模型支持的选项类型
const modelType = useMemo(() => getThinkModelType(model), [model])
const isFixedReasoning = isFixedReasoningModel(model)
// 获取当前模型支持的选项
const supportedOptions: ThinkingOption[] = useMemo(() => {
if (modelType === 'doubao') {
@ -111,6 +114,8 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
}, [quickPanelHook, panelItems, t])
const handleOpenQuickPanel = useCallback(() => {
if (isFixedReasoning) return
if (quickPanelHook.isVisible && quickPanelHook.symbol === QuickPanelReservedSymbol.Thinking) {
quickPanelHook.close()
return
@ -121,9 +126,11 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
return
}
openQuickPanel()
}, [openQuickPanel, quickPanelHook, isThinkingEnabled, supportedOptions, disableThinking])
}, [openQuickPanel, quickPanelHook, isThinkingEnabled, supportedOptions, disableThinking, isFixedReasoning])
useEffect(() => {
if (isFixedReasoning) return
const disposeMenu = quickPanel.registerRootMenu([
{
label: t('assistants.settings.reasoning_effort.label'),
@ -140,10 +147,11 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
disposeMenu()
disposeTrigger()
}
}, [currentReasoningEffort, openQuickPanel, quickPanel, t])
}, [currentReasoningEffort, openQuickPanel, quickPanel, t, isFixedReasoning])
const ariaLabel =
isThinkingEnabled && supportedOptions.includes('none')
const ariaLabel = isFixedReasoning
? t('chat.input.thinking.label')
: isThinkingEnabled && supportedOptions.includes('none')
? t('common.close')
: t('assistants.settings.reasoning_effort.label')
@ -151,9 +159,10 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
<Tooltip placement="top" title={ariaLabel} mouseLeaveDelay={0} arrow>
<ActionIconButton
onClick={handleOpenQuickPanel}
active={currentReasoningEffort !== 'none'}
active={isFixedReasoning || currentReasoningEffort !== 'none'}
aria-label={ariaLabel}
aria-pressed={currentReasoningEffort !== 'none'}>
aria-pressed={currentReasoningEffort !== 'none'}
style={isFixedReasoning ? { cursor: 'default' } : undefined}>
{ThinkingIcon(currentReasoningEffort)}
</ActionIconButton>
</Tooltip>

View File

@ -1,4 +1,4 @@
import { isSupportedReasoningEffortModel, isSupportedThinkingTokenModel } from '@renderer/config/models'
import { isReasoningModel } from '@renderer/config/models'
import ThinkingButton from '@renderer/pages/home/Inputbar/tools/components/ThinkingButton'
import { defineTool, registerTool, TopicType } from '@renderer/pages/home/Inputbar/types'
@ -6,7 +6,7 @@ const thinkingTool = defineTool({
key: 'thinking',
label: (t) => t('chat.input.thinking.label'),
visibleInScopes: [TopicType.Chat],
condition: ({ model }) => isSupportedThinkingTokenModel(model) || isSupportedReasoningEffortModel(model),
condition: ({ model }) => isReasoningModel(model),
render: ({ assistant, model, quickPanel }) => (
<ThinkingButton quickPanel={quickPanel} model={model} assistantId={assistant.id} />
)

View File

@ -96,6 +96,9 @@ export type ReasoningEffortOptionalParams = {
include_thoughts?: boolean
}
}
thinking?: {
type: 'enabled' | 'disabled'
}
thinking_budget?: number
reasoning_effort?: OpenAI.Chat.Completions.ChatCompletionCreateParams['reasoning_effort'] | 'auto'
}

View File

@ -222,6 +222,9 @@ describe('naming', () => {
it('should remove trailing :free', () => {
expect(getLowerBaseModelName('gpt-4:free')).toBe('gpt-4')
})
it('should remove trailing (free)', () => {
expect(getLowerBaseModelName('agent/gpt-4(free)')).toBe('gpt-4')
})
})
describe('getFirstCharacter', () => {

View File

@ -79,6 +79,10 @@ export const getLowerBaseModelName = (id: string, delimiter: string = '/'): stri
if (baseModelName.endsWith(':free')) {
return baseModelName.replace(':free', '')
}
// for cherryin
if (baseModelName.endsWith('(free)')) {
return baseModelName.replace('(free)', '')
}
return baseModelName
}

View File

@ -128,16 +128,15 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/deepseek@npm:^1.0.29":
version: 1.0.29
resolution: "@ai-sdk/deepseek@npm:1.0.29"
"@ai-sdk/deepseek@npm:^1.0.31":
version: 1.0.31
resolution: "@ai-sdk/deepseek@npm:1.0.31"
dependencies:
"@ai-sdk/openai-compatible": "npm:1.0.27"
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17"
"@ai-sdk/provider-utils": "npm:3.0.18"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/f43fba5c72e3f2d8ddc79d68c656cb4fc5fcd488c97b0a5371ad728e2d5c7a8c61fe9125a2a471b7648d99646cd2c78aad2d462c1469942bb4046763c5f13f38
checksum: 10c0/851965392ce03c85ffacf74900ec159bccef491b9bf6142ac08bc25f4d2bbf4df1d754e76fe9793403dee4a8da76fb6b7a9ded84491ec309bdea9aa478e6f542
languageName: node
linkType: hard
@ -243,6 +242,18 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/openai-compatible@npm:^1.0.28":
version: 1.0.28
resolution: "@ai-sdk/openai-compatible@npm:1.0.28"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.18"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/f484774e0094a12674f392d925038a296191723b4c76bd833eabf1b334cf3c84fe77a2e2c5fbac974ec5e18340e113c6a81c86d957c9529a7a60e87cd390ada8
languageName: node
linkType: hard
"@ai-sdk/openai-compatible@patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch":
version: 1.0.27
resolution: "@ai-sdk/openai-compatible@patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch::version=1.0.27&hash=c44b76"
@ -304,6 +315,19 @@ __metadata:
languageName: node
linkType: hard
"@ai-sdk/provider-utils@npm:3.0.18":
version: 3.0.18
resolution: "@ai-sdk/provider-utils@npm:3.0.18"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@standard-schema/spec": "npm:^1.0.0"
eventsource-parser: "npm:^3.0.6"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/209c15b0dceef0ba95a7d3de544be0a417ad4a0bd5143496b3966a35fedf144156d93a42ff8c3d7db56781b9836bafc8c132c98978c49240e55bc1a36e18a67f
languageName: node
linkType: hard
"@ai-sdk/provider@npm:2.0.0, @ai-sdk/provider@npm:^2.0.0":
version: 2.0.0
resolution: "@ai-sdk/provider@npm:2.0.0"
@ -1830,7 +1854,7 @@ __metadata:
dependencies:
"@ai-sdk/anthropic": "npm:^2.0.49"
"@ai-sdk/azure": "npm:^2.0.74"
"@ai-sdk/deepseek": "npm:^1.0.29"
"@ai-sdk/deepseek": "npm:^1.0.31"
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
"@ai-sdk/provider": "npm:^2.0.0"
"@ai-sdk/provider-utils": "npm:^3.0.17"
@ -1851,6 +1875,7 @@ __metadata:
version: 0.0.0-use.local
resolution: "@cherrystudio/ai-sdk-provider@workspace:packages/ai-sdk-provider"
dependencies:
"@ai-sdk/openai-compatible": "npm:^1.0.28"
"@ai-sdk/provider": "npm:^2.0.0"
"@ai-sdk/provider-utils": "npm:^3.0.17"
tsdown: "npm:^0.13.3"