Merge remote-tracking branch 'origin/main' into feat/proxy-api-server

This commit is contained in:
suyao 2025-12-04 21:39:17 +08:00
commit 0e6830b1bf
No known key found for this signature in database
89 changed files with 1768 additions and 395 deletions

View File

@ -19,7 +19,7 @@ jobs:
echo "tag=${{ github.event.release.tag_name }}" >> $GITHUB_OUTPUT echo "tag=${{ github.event.release.tag_name }}" >> $GITHUB_OUTPUT
- name: Dispatch update-download-version workflow to cherry-studio-docs - name: Dispatch update-download-version workflow to cherry-studio-docs
uses: peter-evans/repository-dispatch@v3 uses: peter-evans/repository-dispatch@v4
with: with:
token: ${{ secrets.REPO_DISPATCH_TOKEN }} token: ${{ secrets.REPO_DISPATCH_TOKEN }}
repository: CherryHQ/cherry-studio-docs repository: CherryHQ/cherry-studio-docs

View File

@ -318,6 +318,7 @@
"motion": "^12.10.5", "motion": "^12.10.5",
"notion-helper": "^1.3.22", "notion-helper": "^1.3.22",
"npx-scope-finder": "^1.2.0", "npx-scope-finder": "^1.2.0",
"ollama-ai-provider-v2": "^1.5.5",
"oxlint": "^1.22.0", "oxlint": "^1.22.0",
"oxlint-tsgolint": "^0.2.0", "oxlint-tsgolint": "^0.2.0",
"p-queue": "^8.1.0", "p-queue": "^8.1.0",

View File

@ -41,6 +41,7 @@
"ai": "^5.0.26" "ai": "^5.0.26"
}, },
"dependencies": { "dependencies": {
"@ai-sdk/openai-compatible": "^1.0.28",
"@ai-sdk/provider": "^2.0.0", "@ai-sdk/provider": "^2.0.0",
"@ai-sdk/provider-utils": "^3.0.17" "@ai-sdk/provider-utils": "^3.0.17"
}, },

View File

@ -2,7 +2,6 @@ import { AnthropicMessagesLanguageModel } from '@ai-sdk/anthropic/internal'
import { GoogleGenerativeAILanguageModel } from '@ai-sdk/google/internal' import { GoogleGenerativeAILanguageModel } from '@ai-sdk/google/internal'
import type { OpenAIProviderSettings } from '@ai-sdk/openai' import type { OpenAIProviderSettings } from '@ai-sdk/openai'
import { import {
OpenAIChatLanguageModel,
OpenAICompletionLanguageModel, OpenAICompletionLanguageModel,
OpenAIEmbeddingModel, OpenAIEmbeddingModel,
OpenAIImageModel, OpenAIImageModel,
@ -10,6 +9,7 @@ import {
OpenAISpeechModel, OpenAISpeechModel,
OpenAITranscriptionModel OpenAITranscriptionModel
} from '@ai-sdk/openai/internal' } from '@ai-sdk/openai/internal'
import { OpenAICompatibleChatLanguageModel } from '@ai-sdk/openai-compatible'
import { import {
type EmbeddingModelV2, type EmbeddingModelV2,
type ImageModelV2, type ImageModelV2,
@ -118,7 +118,7 @@ const createCustomFetch = (originalFetch?: any) => {
return originalFetch ? originalFetch(url, options) : fetch(url, options) return originalFetch ? originalFetch(url, options) : fetch(url, options)
} }
} }
class CherryInOpenAIChatLanguageModel extends OpenAIChatLanguageModel { class CherryInOpenAIChatLanguageModel extends OpenAICompatibleChatLanguageModel {
constructor(modelId: string, settings: any) { constructor(modelId: string, settings: any) {
super(modelId, { super(modelId, {
...settings, ...settings,

View File

@ -41,7 +41,7 @@
"dependencies": { "dependencies": {
"@ai-sdk/anthropic": "^2.0.49", "@ai-sdk/anthropic": "^2.0.49",
"@ai-sdk/azure": "^2.0.74", "@ai-sdk/azure": "^2.0.74",
"@ai-sdk/deepseek": "^1.0.29", "@ai-sdk/deepseek": "^1.0.31",
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch", "@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
"@ai-sdk/provider": "^2.0.0", "@ai-sdk/provider": "^2.0.0",
"@ai-sdk/provider-utils": "^3.0.17", "@ai-sdk/provider-utils": "^3.0.17",

View File

@ -35,7 +35,6 @@ export interface WebSearchPluginConfig {
anthropic?: AnthropicSearchConfig anthropic?: AnthropicSearchConfig
xai?: ProviderOptionsMap['xai']['searchParameters'] xai?: ProviderOptionsMap['xai']['searchParameters']
google?: GoogleSearchConfig google?: GoogleSearchConfig
'google-vertex'?: GoogleSearchConfig
openrouter?: OpenRouterSearchConfig openrouter?: OpenRouterSearchConfig
} }
@ -44,7 +43,6 @@ export interface WebSearchPluginConfig {
*/ */
export const DEFAULT_WEB_SEARCH_CONFIG: WebSearchPluginConfig = { export const DEFAULT_WEB_SEARCH_CONFIG: WebSearchPluginConfig = {
google: {}, google: {},
'google-vertex': {},
openai: {}, openai: {},
'openai-chat': {}, 'openai-chat': {},
xai: { xai: {
@ -97,55 +95,28 @@ export type WebSearchToolInputSchema = {
'openai-chat': InferToolInput<OpenAIChatWebSearchTool> 'openai-chat': InferToolInput<OpenAIChatWebSearchTool>
} }
export const switchWebSearchTool = (providerId: string, config: WebSearchPluginConfig, params: any) => { export const switchWebSearchTool = (config: WebSearchPluginConfig, params: any) => {
switch (providerId) { if (config.openai) {
case 'openai': { if (!params.tools) params.tools = {}
if (config.openai) { params.tools.web_search = openai.tools.webSearch(config.openai)
if (!params.tools) params.tools = {} } else if (config['openai-chat']) {
params.tools.web_search = openai.tools.webSearch(config.openai) if (!params.tools) params.tools = {}
} params.tools.web_search_preview = openai.tools.webSearchPreview(config['openai-chat'])
break } else if (config.anthropic) {
} if (!params.tools) params.tools = {}
case 'openai-chat': { params.tools.web_search = anthropic.tools.webSearch_20250305(config.anthropic)
if (config['openai-chat']) { } else if (config.google) {
if (!params.tools) params.tools = {} // case 'google-vertex':
params.tools.web_search_preview = openai.tools.webSearchPreview(config['openai-chat']) if (!params.tools) params.tools = {}
} params.tools.web_search = google.tools.googleSearch(config.google || {})
break } else if (config.xai) {
} const searchOptions = createXaiOptions({
searchParameters: { ...config.xai, mode: 'on' }
case 'anthropic': { })
if (config.anthropic) { params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
if (!params.tools) params.tools = {} } else if (config.openrouter) {
params.tools.web_search = anthropic.tools.webSearch_20250305(config.anthropic) const searchOptions = createOpenRouterOptions(config.openrouter)
} params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
break
}
case 'google': {
// case 'google-vertex':
if (!params.tools) params.tools = {}
params.tools.web_search = google.tools.googleSearch(config.google || {})
break
}
case 'xai': {
if (config.xai) {
const searchOptions = createXaiOptions({
searchParameters: { ...config.xai, mode: 'on' }
})
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
}
break
}
case 'openrouter': {
if (config.openrouter) {
const searchOptions = createOpenRouterOptions(config.openrouter)
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
}
break
}
} }
return params return params
} }

View File

@ -4,7 +4,6 @@
*/ */
import { definePlugin } from '../../' import { definePlugin } from '../../'
import type { AiRequestContext } from '../../types'
import type { WebSearchPluginConfig } from './helper' import type { WebSearchPluginConfig } from './helper'
import { DEFAULT_WEB_SEARCH_CONFIG, switchWebSearchTool } from './helper' import { DEFAULT_WEB_SEARCH_CONFIG, switchWebSearchTool } from './helper'
@ -18,15 +17,8 @@ export const webSearchPlugin = (config: WebSearchPluginConfig = DEFAULT_WEB_SEAR
name: 'webSearch', name: 'webSearch',
enforce: 'pre', enforce: 'pre',
transformParams: async (params: any, context: AiRequestContext) => { transformParams: async (params: any) => {
const { providerId } = context switchWebSearchTool(config, params)
switchWebSearchTool(providerId, config, params)
if (providerId === 'cherryin' || providerId === 'cherryin-chat') {
// cherryin.gemini
const _providerId = params.model.provider.split('.')[1]
switchWebSearchTool(_providerId, config, params)
}
return params return params
} }
}) })

View File

@ -102,6 +102,17 @@ export function formatVertexApiHost(
return formatApiHost(trimmedHost) return formatApiHost(trimmedHost)
} }
/**
* Ollama API
*/
export function formatOllamaApiHost(host: string): string {
const normalizedHost = withoutTrailingSlash(host)
?.replace(/\/v1$/, '')
?.replace(/\/api$/, '')
?.replace(/\/chat$/, '')
return formatApiHost(normalizedHost + '/api', false)
}
/** /**
* Formats an API host URL by normalizing it and optionally appending an API version. * Formats an API host URL by normalizing it and optionally appending an API version.
* *

View File

@ -7,6 +7,11 @@ export const documentExts = ['.pdf', '.doc', '.docx', '.pptx', '.xlsx', '.odt',
export const thirdPartyApplicationExts = ['.draftsExport'] export const thirdPartyApplicationExts = ['.draftsExport']
export const bookExts = ['.epub'] export const bookExts = ['.epub']
export const API_SERVER_DEFAULTS = {
HOST: '127.0.0.1',
PORT: 23333
}
/** /**
* A flat array of all file extensions known by the linguist database. * A flat array of all file extensions known by the linguist database.
* This is the primary source for identifying code files. * This is the primary source for identifying code files.

View File

@ -52,11 +52,12 @@ export function isAwsBedrockProvider<P extends MinimalProvider>(provider: P): bo
return provider.type === 'aws-bedrock' return provider.type === 'aws-bedrock'
} }
/**
* Check if provider is AI Gateway type
*/
export function isAIGatewayProvider<P extends MinimalProvider>(provider: P): boolean { export function isAIGatewayProvider<P extends MinimalProvider>(provider: P): boolean {
return provider.type === 'ai-gateway' return provider.type === 'gateway'
}
export function isOllamaProvider<P extends MinimalProvider>(provider: P): boolean {
return provider.type === 'ollama'
} }
/** /**

View File

@ -9,6 +9,7 @@
import { import {
formatApiHost, formatApiHost,
formatAzureOpenAIApiHost, formatAzureOpenAIApiHost,
formatOllamaApiHost,
formatVertexApiHost, formatVertexApiHost,
routeToEndpoint, routeToEndpoint,
withoutTrailingSlash withoutTrailingSlash
@ -18,6 +19,7 @@ import {
isAzureOpenAIProvider, isAzureOpenAIProvider,
isCherryAIProvider, isCherryAIProvider,
isGeminiProvider, isGeminiProvider,
isOllamaProvider,
isPerplexityProvider, isPerplexityProvider,
isVertexProvider isVertexProvider
} from './detection' } from './detection'
@ -77,6 +79,8 @@ export function formatProviderApiHost<T extends MinimalProvider>(provider: T, co
} }
} else if (formatted.id === SystemProviderIds.copilot || formatted.id === SystemProviderIds.github) { } else if (formatted.id === SystemProviderIds.copilot || formatted.id === SystemProviderIds.github) {
formatted.apiHost = formatApiHost(formatted.apiHost, false) formatted.apiHost = formatApiHost(formatted.apiHost, false)
} else if (isOllamaProvider(formatted)) {
formatted.apiHost = formatOllamaApiHost(formatted.apiHost)
} else if (isGeminiProvider(formatted)) { } else if (isGeminiProvider(formatted)) {
formatted.apiHost = formatApiHost(formatted.apiHost, true, 'v1beta') formatted.apiHost = formatApiHost(formatted.apiHost, true, 'v1beta')
} else if (isAzureOpenAIProvider(formatted)) { } else if (isAzureOpenAIProvider(formatted)) {

View File

@ -19,6 +19,7 @@ export {
isCherryAIProvider, isCherryAIProvider,
isGeminiProvider, isGeminiProvider,
isNewApiProvider, isNewApiProvider,
isOllamaProvider,
isOpenAICompatibleProvider, isOpenAICompatibleProvider,
isOpenAIProvider, isOpenAIProvider,
isPerplexityProvider, isPerplexityProvider,

View File

@ -79,12 +79,12 @@ export const SHARED_PROVIDER_CONFIGS: ProviderConfig[] = [
aliases: ['hf', 'hugging-face'] aliases: ['hf', 'hugging-face']
}, },
{ {
id: 'ai-gateway', id: 'gateway',
name: 'AI Gateway', name: 'Vercel AI Gateway',
import: () => import('@ai-sdk/gateway'), import: () => import('@ai-sdk/gateway'),
creatorFunctionName: 'createGateway', creatorFunctionName: 'createGateway',
supportsImageGeneration: true, supportsImageGeneration: true,
aliases: ['gateway'] aliases: ['ai-gateway']
}, },
{ {
id: 'cerebras', id: 'cerebras',
@ -92,6 +92,13 @@ export const SHARED_PROVIDER_CONFIGS: ProviderConfig[] = [
import: () => import('@ai-sdk/cerebras'), import: () => import('@ai-sdk/cerebras'),
creatorFunctionName: 'createCerebras', creatorFunctionName: 'createCerebras',
supportsImageGeneration: false supportsImageGeneration: false
},
{
id: 'ollama',
name: 'Ollama',
import: () => import('ollama-ai-provider-v2'),
creatorFunctionName: 'createOllama',
supportsImageGeneration: false
} }
] as const ] as const

View File

@ -6,8 +6,10 @@
*/ */
import { formatPrivateKey, hasProviderConfig, ProviderConfigFactory } from '@cherrystudio/ai-core/provider' import { formatPrivateKey, hasProviderConfig, ProviderConfigFactory } from '@cherrystudio/ai-core/provider'
import { isEmpty } from 'lodash'
import { routeToEndpoint } from '../api' import { routeToEndpoint } from '../api'
import { isOllamaProvider } from './detection'
import { getAiSdkProviderId } from './mapping' import { getAiSdkProviderId } from './mapping'
import type { MinimalProvider } from './types' import type { MinimalProvider } from './types'
import { SystemProviderIds } from './types' import { SystemProviderIds } from './types'
@ -157,6 +159,19 @@ export function providerToAiSdkConfig(
} }
} }
if (isOllamaProvider(provider)) {
return {
providerId: 'ollama',
options: {
...baseConfig,
headers: {
...provider.extra_headers,
Authorization: !isEmpty(baseConfig.apiKey) ? `Bearer ${baseConfig.apiKey}` : undefined
}
}
}
}
// Build extra options // Build extra options
const extraOptions: Record<string, unknown> = {} const extraOptions: Record<string, unknown> = {}
if (endpoint) { if (endpoint) {

View File

@ -11,7 +11,8 @@ export const ProviderTypeSchema = z.enum([
'aws-bedrock', 'aws-bedrock',
'vertex-anthropic', 'vertex-anthropic',
'new-api', 'new-api',
'ai-gateway' 'gateway',
'ollama'
]) ])
export type ProviderType = z.infer<typeof ProviderTypeSchema> export type ProviderType = z.infer<typeof ProviderTypeSchema>
@ -98,7 +99,7 @@ export const SystemProviderIdSchema = z.enum([
'longcat', 'longcat',
'huggingface', 'huggingface',
'sophnet', 'sophnet',
'ai-gateway', 'gateway',
'cerebras' 'cerebras'
]) ])
@ -167,7 +168,7 @@ export const SystemProviderIds = {
aionly: 'aionly', aionly: 'aionly',
longcat: 'longcat', longcat: 'longcat',
huggingface: 'huggingface', huggingface: 'huggingface',
'ai-gateway': 'ai-gateway', gateway: 'gateway',
cerebras: 'cerebras' cerebras: 'cerebras'
} as const satisfies Record<SystemProviderId, SystemProviderId> } as const satisfies Record<SystemProviderId, SystemProviderId>

View File

@ -27,5 +27,10 @@ export const getLowerBaseModelName = (id: string, delimiter: string = '/'): stri
if (baseModelName.endsWith(':free')) { if (baseModelName.endsWith(':free')) {
return baseModelName.replace(':free', '') return baseModelName.replace(':free', '')
} }
// for cherryin
if (baseModelName.endsWith('(free)')) {
return baseModelName.replace('(free)', '')
}
return baseModelName return baseModelName
} }

View File

@ -91,23 +91,6 @@ function createIssueCard(issueData) {
return { return {
elements: [ elements: [
{
tag: 'div',
text: {
tag: 'lark_md',
content: `**🐛 New GitHub Issue #${issueNumber}**`
}
},
{
tag: 'hr'
},
{
tag: 'div',
text: {
tag: 'lark_md',
content: `**📝 Title:** ${issueTitle}`
}
},
{ {
tag: 'div', tag: 'div',
text: { text: {
@ -158,7 +141,7 @@ function createIssueCard(issueData) {
template: 'blue', template: 'blue',
title: { title: {
tag: 'plain_text', tag: 'plain_text',
content: '🆕 Cherry Studio - New Issue' content: `#${issueNumber} - ${issueTitle}`
} }
} }
} }

View File

@ -1,3 +1,4 @@
import { API_SERVER_DEFAULTS } from '@shared/config/constant'
import type { ApiServerConfig } from '@types' import type { ApiServerConfig } from '@types'
import { v4 as uuidv4 } from 'uuid' import { v4 as uuidv4 } from 'uuid'
@ -6,9 +7,6 @@ import { reduxService } from '../services/ReduxService'
const logger = loggerService.withContext('ApiServerConfig') const logger = loggerService.withContext('ApiServerConfig')
const defaultHost = 'localhost'
const defaultPort = 23333
class ConfigManager { class ConfigManager {
private _config: ApiServerConfig | null = null private _config: ApiServerConfig | null = null
@ -30,8 +28,8 @@ class ConfigManager {
} }
this._config = { this._config = {
enabled: serverSettings?.enabled ?? false, enabled: serverSettings?.enabled ?? false,
port: serverSettings?.port ?? defaultPort, port: serverSettings?.port ?? API_SERVER_DEFAULTS.PORT,
host: defaultHost, host: serverSettings?.host ?? API_SERVER_DEFAULTS.HOST,
apiKey: apiKey apiKey: apiKey
} }
return this._config return this._config
@ -39,8 +37,8 @@ class ConfigManager {
logger.warn('Failed to load config from Redux, using defaults', { error }) logger.warn('Failed to load config from Redux, using defaults', { error })
this._config = { this._config = {
enabled: false, enabled: false,
port: defaultPort, port: API_SERVER_DEFAULTS.PORT,
host: defaultHost, host: API_SERVER_DEFAULTS.HOST,
apiKey: this.generateApiKey() apiKey: this.generateApiKey()
} }
return this._config return this._config

View File

@ -20,8 +20,8 @@ const swaggerOptions: swaggerJSDoc.Options = {
}, },
servers: [ servers: [
{ {
url: 'http://localhost:23333', url: '/',
description: 'Local development server' description: 'Current server'
} }
], ],
components: { components: {

View File

@ -19,19 +19,9 @@ export default class EmbeddingsFactory {
}) })
} }
if (provider === 'ollama') { if (provider === 'ollama') {
if (baseURL.includes('v1/')) {
return new OllamaEmbeddings({
model: model,
baseUrl: baseURL.replace('v1/', ''),
requestOptions: {
// @ts-ignore expected
'encoding-format': 'float'
}
})
}
return new OllamaEmbeddings({ return new OllamaEmbeddings({
model: model, model: model,
baseUrl: baseURL, baseUrl: baseURL.replace(/\/api$/, ''),
requestOptions: { requestOptions: {
// @ts-ignore expected // @ts-ignore expected
'encoding-format': 'float' 'encoding-format': 'float'

View File

@ -189,7 +189,7 @@ export default class ModernAiProvider {
config: ModernAiProviderConfig config: ModernAiProviderConfig
): Promise<CompletionsResult> { ): Promise<CompletionsResult> {
// ai-gateway不是image/generation 端点所以就先不走legacy了 // ai-gateway不是image/generation 端点所以就先不走legacy了
if (config.isImageGenerationEndpoint && this.getActualProvider().id !== SystemProviderIds['ai-gateway']) { if (config.isImageGenerationEndpoint && this.getActualProvider().id !== SystemProviderIds.gateway) {
// 使用 legacy 实现处理图像生成(支持图片编辑等高级功能) // 使用 legacy 实现处理图像生成(支持图片编辑等高级功能)
if (!config.uiMessages) { if (!config.uiMessages) {
throw new Error('uiMessages is required for image generation endpoint') throw new Error('uiMessages is required for image generation endpoint')
@ -480,7 +480,7 @@ export default class ModernAiProvider {
// 代理其他方法到原有实现 // 代理其他方法到原有实现
public async models() { public async models() {
if (this.actualProvider.id === SystemProviderIds['ai-gateway']) { if (this.actualProvider.id === SystemProviderIds.gateway) {
const formatModel = function (models: GatewayLanguageModelEntry[]): Model[] { const formatModel = function (models: GatewayLanguageModelEntry[]): Model[] {
return models.map((m) => ({ return models.map((m) => ({
id: m.id, id: m.id,

View File

@ -9,6 +9,7 @@ import {
import { REFERENCE_PROMPT } from '@renderer/config/prompts' import { REFERENCE_PROMPT } from '@renderer/config/prompts'
import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio' import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio'
import { getAssistantSettings } from '@renderer/services/AssistantService' import { getAssistantSettings } from '@renderer/services/AssistantService'
import type { RootState } from '@renderer/store'
import type { import type {
Assistant, Assistant,
GenerateImageParams, GenerateImageParams,
@ -245,23 +246,20 @@ export abstract class BaseApiClient<
protected getVerbosity(model?: Model): OpenAIVerbosity { protected getVerbosity(model?: Model): OpenAIVerbosity {
try { try {
const state = window.store?.getState() const state = window.store?.getState() as RootState
const verbosity = state?.settings?.openAI?.verbosity const verbosity = state?.settings?.openAI?.verbosity
if (verbosity && ['low', 'medium', 'high'].includes(verbosity)) { // If model is provided, check if the verbosity is supported by the model
// If model is provided, check if the verbosity is supported by the model if (model) {
if (model) { const supportedVerbosity = getModelSupportedVerbosity(model)
const supportedVerbosity = getModelSupportedVerbosity(model) // Use user's verbosity if supported, otherwise use the first supported option
// Use user's verbosity if supported, otherwise use the first supported option return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0]
return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0]
}
return verbosity
} }
return verbosity
} catch (error) { } catch (error) {
logger.warn('Failed to get verbosity from state:', error as Error) logger.warn('Failed to get verbosity from state. Fallback to undefined.', error as Error)
return undefined
} }
return 'medium'
} }
protected getTimeout(model: Model) { protected getTimeout(model: Model) {

View File

@ -32,7 +32,6 @@ import {
isSupportedThinkingTokenModel, isSupportedThinkingTokenModel,
isSupportedThinkingTokenQwenModel, isSupportedThinkingTokenQwenModel,
isSupportedThinkingTokenZhipuModel, isSupportedThinkingTokenZhipuModel,
isSupportVerbosityModel,
isVisionModel, isVisionModel,
MODEL_SUPPORTED_REASONING_EFFORT, MODEL_SUPPORTED_REASONING_EFFORT,
ZHIPU_RESULT_TOKENS ZHIPU_RESULT_TOKENS
@ -714,13 +713,8 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
...modalities, ...modalities,
// groq 有不同的 service tier 配置,不符合 openai 接口类型 // groq 有不同的 service tier 配置,不符合 openai 接口类型
service_tier: this.getServiceTier(model) as OpenAIServiceTier, service_tier: this.getServiceTier(model) as OpenAIServiceTier,
...(isSupportVerbosityModel(model) // verbosity. getVerbosity ensures the returned value is valid.
? { verbosity: this.getVerbosity(model),
text: {
verbosity: this.getVerbosity(model)
}
}
: {}),
...this.getProviderSpecificParameters(assistant, model), ...this.getProviderSpecificParameters(assistant, model),
...reasoningEffort, ...reasoningEffort,
// ...getOpenAIWebSearchParams(model, enableWebSearch), // ...getOpenAIWebSearchParams(model, enableWebSearch),

View File

@ -11,7 +11,7 @@ import { getStoreSetting } from '@renderer/hooks/useSettings'
import { getAssistantSettings } from '@renderer/services/AssistantService' import { getAssistantSettings } from '@renderer/services/AssistantService'
import store from '@renderer/store' import store from '@renderer/store'
import type { SettingsState } from '@renderer/store/settings' import type { SettingsState } from '@renderer/store/settings'
import type { Assistant, GenerateImageParams, Model, Provider } from '@renderer/types' import { type Assistant, type GenerateImageParams, type Model, type Provider } from '@renderer/types'
import type { import type {
OpenAIResponseSdkMessageParam, OpenAIResponseSdkMessageParam,
OpenAIResponseSdkParams, OpenAIResponseSdkParams,
@ -25,7 +25,8 @@ import type {
OpenAISdkRawOutput, OpenAISdkRawOutput,
ReasoningEffortOptionalParams ReasoningEffortOptionalParams
} from '@renderer/types/sdk' } from '@renderer/types/sdk'
import { formatApiHost } from '@renderer/utils/api' import { formatApiHost, withoutTrailingSlash } from '@renderer/utils/api'
import { isOllamaProvider } from '@renderer/utils/provider'
import { BaseApiClient } from '../BaseApiClient' import { BaseApiClient } from '../BaseApiClient'
@ -115,6 +116,34 @@ export abstract class OpenAIBaseClient<
})) }))
.filter(isSupportedModel) .filter(isSupportedModel)
} }
if (isOllamaProvider(this.provider)) {
const baseUrl = withoutTrailingSlash(this.getBaseURL(false))
.replace(/\/v1$/, '')
.replace(/\/api$/, '')
const response = await fetch(`${baseUrl}/api/tags`, {
headers: {
Authorization: `Bearer ${this.apiKey}`,
...this.defaultHeaders(),
...this.provider.extra_headers
}
})
if (!response.ok) {
throw new Error(`Ollama server returned ${response.status} ${response.statusText}`)
}
const data = await response.json()
if (!data?.models || !Array.isArray(data.models)) {
throw new Error('Invalid response from Ollama API: missing models array')
}
return data.models.map((model) => ({
id: model.name,
object: 'model',
owned_by: 'ollama'
}))
}
const response = await sdk.models.list() const response = await sdk.models.list()
if (this.provider.id === 'together') { if (this.provider.id === 'together') {
// @ts-ignore key is not typed // @ts-ignore key is not typed

View File

@ -4,7 +4,7 @@ import { isGemini3Model, isSupportedThinkingTokenQwenModel } from '@renderer/con
import type { MCPTool } from '@renderer/types' import type { MCPTool } from '@renderer/types'
import { type Assistant, type Message, type Model, type Provider, SystemProviderIds } from '@renderer/types' import { type Assistant, type Message, type Model, type Provider, SystemProviderIds } from '@renderer/types'
import type { Chunk } from '@renderer/types/chunk' import type { Chunk } from '@renderer/types/chunk'
import { isSupportEnableThinkingProvider } from '@renderer/utils/provider' import { isOllamaProvider, isSupportEnableThinkingProvider } from '@renderer/utils/provider'
import { openrouterReasoningMiddleware, skipGeminiThoughtSignatureMiddleware } from '@shared/middleware' import { openrouterReasoningMiddleware, skipGeminiThoughtSignatureMiddleware } from '@shared/middleware'
import type { LanguageModelMiddleware } from 'ai' import type { LanguageModelMiddleware } from 'ai'
import { extractReasoningMiddleware, simulateStreamingMiddleware } from 'ai' import { extractReasoningMiddleware, simulateStreamingMiddleware } from 'ai'
@ -239,6 +239,7 @@ function addModelSpecificMiddlewares(builder: AiSdkMiddlewareBuilder, config: Ai
// Use /think or /no_think suffix to control thinking mode // Use /think or /no_think suffix to control thinking mode
if ( if (
config.provider && config.provider &&
!isOllamaProvider(config.provider) &&
isSupportedThinkingTokenQwenModel(config.model) && isSupportedThinkingTokenQwenModel(config.model) &&
!isSupportEnableThinkingProvider(config.provider) !isSupportEnableThinkingProvider(config.provider)
) { ) {

View File

@ -11,12 +11,16 @@ import { vertex } from '@ai-sdk/google-vertex/edge'
import { combineHeaders } from '@ai-sdk/provider-utils' import { combineHeaders } from '@ai-sdk/provider-utils'
import type { AnthropicSearchConfig, WebSearchPluginConfig } from '@cherrystudio/ai-core/built-in/plugins' import type { AnthropicSearchConfig, WebSearchPluginConfig } from '@cherrystudio/ai-core/built-in/plugins'
import { isBaseProvider } from '@cherrystudio/ai-core/core/providers/schemas' import { isBaseProvider } from '@cherrystudio/ai-core/core/providers/schemas'
import type { BaseProviderId } from '@cherrystudio/ai-core/provider'
import { loggerService } from '@logger' import { loggerService } from '@logger'
import { import {
isAnthropicModel, isAnthropicModel,
isFixedReasoningModel,
isGeminiModel,
isGenerateImageModel, isGenerateImageModel,
isGrokModel,
isOpenAIModel,
isOpenRouterBuiltInWebSearchModel, isOpenRouterBuiltInWebSearchModel,
isReasoningModel,
isSupportedReasoningEffortModel, isSupportedReasoningEffortModel,
isSupportedThinkingTokenModel, isSupportedThinkingTokenModel,
isWebSearchModel isWebSearchModel
@ -24,11 +28,12 @@ import {
import { getDefaultModel } from '@renderer/services/AssistantService' import { getDefaultModel } from '@renderer/services/AssistantService'
import store from '@renderer/store' import store from '@renderer/store'
import type { CherryWebSearchConfig } from '@renderer/store/websearch' import type { CherryWebSearchConfig } from '@renderer/store/websearch'
import { type Assistant, type MCPTool, type Provider } from '@renderer/types' import type { Model } from '@renderer/types'
import { type Assistant, type MCPTool, type Provider, SystemProviderIds } from '@renderer/types'
import type { StreamTextParams } from '@renderer/types/aiCoreTypes' import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern' import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern'
import { replacePromptVariables } from '@renderer/utils/prompt' import { replacePromptVariables } from '@renderer/utils/prompt'
import { isAwsBedrockProvider } from '@renderer/utils/provider' import { isAIGatewayProvider, isAwsBedrockProvider } from '@renderer/utils/provider'
import type { ModelMessage, Tool } from 'ai' import type { ModelMessage, Tool } from 'ai'
import { stepCountIs } from 'ai' import { stepCountIs } from 'ai'
@ -43,6 +48,25 @@ const logger = loggerService.withContext('parameterBuilder')
type ProviderDefinedTool = Extract<Tool<any, any>, { type: 'provider-defined' }> type ProviderDefinedTool = Extract<Tool<any, any>, { type: 'provider-defined' }>
function mapVertexAIGatewayModelToProviderId(model: Model): BaseProviderId | undefined {
if (isAnthropicModel(model)) {
return 'anthropic'
}
if (isGeminiModel(model)) {
return 'google'
}
if (isGrokModel(model)) {
return 'xai'
}
if (isOpenAIModel(model)) {
return 'openai'
}
logger.warn(
`[mapVertexAIGatewayModelToProviderId] Unknown model type for AI Gateway: ${model.id}. Web search will not be enabled.`
)
return undefined
}
/** /**
* AI SDK * AI SDK
* *
@ -83,7 +107,7 @@ export async function buildStreamTextParams(
const enableReasoning = const enableReasoning =
((isSupportedThinkingTokenModel(model) || isSupportedReasoningEffortModel(model)) && ((isSupportedThinkingTokenModel(model) || isSupportedReasoningEffortModel(model)) &&
assistant.settings?.reasoning_effort !== undefined) || assistant.settings?.reasoning_effort !== undefined) ||
(isReasoningModel(model) && (!isSupportedThinkingTokenModel(model) || !isSupportedReasoningEffortModel(model))) isFixedReasoningModel(model)
// 判断是否使用内置搜索 // 判断是否使用内置搜索
// 条件:没有外部搜索提供商 && (用户开启了内置搜索 || 模型强制使用内置搜索) // 条件:没有外部搜索提供商 && (用户开启了内置搜索 || 模型强制使用内置搜索)
@ -117,6 +141,11 @@ export async function buildStreamTextParams(
if (enableWebSearch) { if (enableWebSearch) {
if (isBaseProvider(aiSdkProviderId)) { if (isBaseProvider(aiSdkProviderId)) {
webSearchPluginConfig = buildProviderBuiltinWebSearchConfig(aiSdkProviderId, webSearchConfig, model) webSearchPluginConfig = buildProviderBuiltinWebSearchConfig(aiSdkProviderId, webSearchConfig, model)
} else if (isAIGatewayProvider(provider) || SystemProviderIds.gateway === provider.id) {
const aiSdkProviderId = mapVertexAIGatewayModelToProviderId(model)
if (aiSdkProviderId) {
webSearchPluginConfig = buildProviderBuiltinWebSearchConfig(aiSdkProviderId, webSearchConfig, model)
}
} }
if (!tools) { if (!tools) {
tools = {} tools = {}

View File

@ -218,7 +218,6 @@ export async function prepareSpecialProviderConfig(
...(config.options.headers ? config.options.headers : {}), ...(config.options.headers ? config.options.headers : {}),
'Content-Type': 'application/json', 'Content-Type': 'application/json',
'anthropic-version': '2023-06-01', 'anthropic-version': '2023-06-01',
'anthropic-beta': 'oauth-2025-04-20',
Authorization: `Bearer ${oauthToken}` Authorization: `Bearer ${oauthToken}`
}, },
baseURL: 'https://api.anthropic.com/v1', baseURL: 'https://api.anthropic.com/v1',

View File

@ -27,7 +27,8 @@ vi.mock('@cherrystudio/ai-core/provider', async (importOriginal) => {
'xai', 'xai',
'deepseek', 'deepseek',
'openrouter', 'openrouter',
'openai-compatible' 'openai-compatible',
'cherryin'
] ]
if (baseProviders.includes(id)) { if (baseProviders.includes(id)) {
return { success: true, data: id } return { success: true, data: id }
@ -37,7 +38,15 @@ vi.mock('@cherrystudio/ai-core/provider', async (importOriginal) => {
}, },
customProviderIdSchema: { customProviderIdSchema: {
safeParse: vi.fn((id) => { safeParse: vi.fn((id) => {
const customProviders = ['google-vertex', 'google-vertex-anthropic', 'bedrock'] const customProviders = [
'google-vertex',
'google-vertex-anthropic',
'bedrock',
'gateway',
'aihubmix',
'newapi',
'ollama'
]
if (customProviders.includes(id)) { if (customProviders.includes(id)) {
return { success: true, data: id } return { success: true, data: id }
} }
@ -47,20 +56,7 @@ vi.mock('@cherrystudio/ai-core/provider', async (importOriginal) => {
} }
}) })
vi.mock('../provider/factory', () => ({ // Don't mock getAiSdkProviderId - use real implementation for more accurate tests
getAiSdkProviderId: vi.fn((provider) => {
// Simulate the provider ID mapping
const mapping: Record<string, string> = {
[SystemProviderIds.gemini]: 'google',
[SystemProviderIds.openai]: 'openai',
[SystemProviderIds.anthropic]: 'anthropic',
[SystemProviderIds.grok]: 'xai',
[SystemProviderIds.deepseek]: 'deepseek',
[SystemProviderIds.openrouter]: 'openrouter'
}
return mapping[provider.id] || provider.id
})
}))
vi.mock('@renderer/config/models', async (importOriginal) => ({ vi.mock('@renderer/config/models', async (importOriginal) => ({
...(await importOriginal()), ...(await importOriginal()),
@ -179,8 +175,11 @@ describe('options utils', () => {
provider: SystemProviderIds.openai provider: SystemProviderIds.openai
} as Model } as Model
beforeEach(() => { beforeEach(async () => {
vi.clearAllMocks() vi.clearAllMocks()
// Reset getCustomParameters to return empty object by default
const { getCustomParameters } = await import('../reasoning')
vi.mocked(getCustomParameters).mockReturnValue({})
}) })
describe('buildProviderOptions', () => { describe('buildProviderOptions', () => {
@ -391,7 +390,6 @@ describe('options utils', () => {
enableWebSearch: false, enableWebSearch: false,
enableGenerateImage: false enableGenerateImage: false
}) })
expect(result.providerOptions).toHaveProperty('deepseek') expect(result.providerOptions).toHaveProperty('deepseek')
expect(result.providerOptions.deepseek).toBeDefined() expect(result.providerOptions.deepseek).toBeDefined()
}) })
@ -461,10 +459,14 @@ describe('options utils', () => {
} }
) )
expect(result.providerOptions.openai).toHaveProperty('custom_param') expect(result.providerOptions).toStrictEqual({
expect(result.providerOptions.openai.custom_param).toBe('custom_value') openai: {
expect(result.providerOptions.openai).toHaveProperty('another_param') custom_param: 'custom_value',
expect(result.providerOptions.openai.another_param).toBe(123) another_param: 123,
serviceTier: undefined,
textVerbosity: undefined
}
})
}) })
it('should extract AI SDK standard params from custom parameters', async () => { it('should extract AI SDK standard params from custom parameters', async () => {
@ -696,5 +698,459 @@ describe('options utils', () => {
}) })
}) })
}) })
describe('AI Gateway provider', () => {
const gatewayProvider: Provider = {
id: SystemProviderIds.gateway,
name: 'Vercel AI Gateway',
type: 'gateway',
apiKey: 'test-key',
apiHost: 'https://gateway.vercel.com',
isSystem: true
} as Provider
it('should build OpenAI options for OpenAI models through gateway', () => {
const openaiModel: Model = {
id: 'openai/gpt-4',
name: 'GPT-4',
provider: SystemProviderIds.gateway
} as Model
const result = buildProviderOptions(mockAssistant, openaiModel, gatewayProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
expect(result.providerOptions).toHaveProperty('openai')
expect(result.providerOptions.openai).toBeDefined()
})
it('should build Anthropic options for Anthropic models through gateway', () => {
const anthropicModel: Model = {
id: 'anthropic/claude-3-5-sonnet-20241022',
name: 'Claude 3.5 Sonnet',
provider: SystemProviderIds.gateway
} as Model
const result = buildProviderOptions(mockAssistant, anthropicModel, gatewayProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
expect(result.providerOptions).toHaveProperty('anthropic')
expect(result.providerOptions.anthropic).toBeDefined()
})
it('should build Google options for Gemini models through gateway', () => {
const geminiModel: Model = {
id: 'google/gemini-2.0-flash-exp',
name: 'Gemini 2.0 Flash',
provider: SystemProviderIds.gateway
} as Model
const result = buildProviderOptions(mockAssistant, geminiModel, gatewayProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
expect(result.providerOptions).toHaveProperty('google')
expect(result.providerOptions.google).toBeDefined()
})
it('should build xAI options for Grok models through gateway', () => {
const grokModel: Model = {
id: 'xai/grok-2-latest',
name: 'Grok 2',
provider: SystemProviderIds.gateway
} as Model
const result = buildProviderOptions(mockAssistant, grokModel, gatewayProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
expect(result.providerOptions).toHaveProperty('xai')
expect(result.providerOptions.xai).toBeDefined()
})
it('should include reasoning parameters for Anthropic models when enabled', () => {
const anthropicModel: Model = {
id: 'anthropic/claude-3-5-sonnet-20241022',
name: 'Claude 3.5 Sonnet',
provider: SystemProviderIds.gateway
} as Model
const result = buildProviderOptions(mockAssistant, anthropicModel, gatewayProvider, {
enableReasoning: true,
enableWebSearch: false,
enableGenerateImage: false
})
expect(result.providerOptions.anthropic).toHaveProperty('thinking')
expect(result.providerOptions.anthropic.thinking).toEqual({
type: 'enabled',
budgetTokens: 5000
})
})
it('should merge gateway routing options from custom parameters', async () => {
const { getCustomParameters } = await import('../reasoning')
vi.mocked(getCustomParameters).mockReturnValue({
gateway: {
order: ['vertex', 'anthropic'],
only: ['vertex', 'anthropic']
}
})
const anthropicModel: Model = {
id: 'anthropic/claude-3-5-sonnet-20241022',
name: 'Claude 3.5 Sonnet',
provider: SystemProviderIds.gateway
} as Model
const result = buildProviderOptions(mockAssistant, anthropicModel, gatewayProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
// Should have both anthropic provider options and gateway routing options
expect(result.providerOptions).toHaveProperty('anthropic')
expect(result.providerOptions).toHaveProperty('gateway')
expect(result.providerOptions.gateway).toEqual({
order: ['vertex', 'anthropic'],
only: ['vertex', 'anthropic']
})
})
it('should combine provider-specific options with gateway routing options', async () => {
const { getCustomParameters } = await import('../reasoning')
vi.mocked(getCustomParameters).mockReturnValue({
gateway: {
order: ['openai', 'anthropic']
}
})
const openaiModel: Model = {
id: 'openai/gpt-4',
name: 'GPT-4',
provider: SystemProviderIds.gateway
} as Model
const result = buildProviderOptions(mockAssistant, openaiModel, gatewayProvider, {
enableReasoning: true,
enableWebSearch: false,
enableGenerateImage: false
})
// Should have OpenAI provider options with reasoning
expect(result.providerOptions.openai).toBeDefined()
expect(result.providerOptions.openai).toHaveProperty('reasoningEffort')
// Should also have gateway routing options
expect(result.providerOptions.gateway).toBeDefined()
expect(result.providerOptions.gateway.order).toEqual(['openai', 'anthropic'])
})
it('should build generic options for unknown model types through gateway', () => {
const unknownModel: Model = {
id: 'unknown-provider/model-name',
name: 'Unknown Model',
provider: SystemProviderIds.gateway
} as Model
const result = buildProviderOptions(mockAssistant, unknownModel, gatewayProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
expect(result.providerOptions).toHaveProperty('openai-compatible')
expect(result.providerOptions['openai-compatible']).toBeDefined()
})
})
describe('Proxy provider custom parameters mapping', () => {
it('should map cherryin provider ID to actual AI SDK provider ID (Google)', async () => {
const { getCustomParameters } = await import('../reasoning')
// Mock Cherry In provider that uses Google SDK
const cherryinProvider = {
id: 'cherryin',
name: 'Cherry In',
type: 'gemini', // Using Google SDK
apiKey: 'test-key',
apiHost: 'https://cherryin.com',
models: [] as Model[]
} as Provider
const geminiModel: Model = {
id: 'gemini-2.0-flash-exp',
name: 'Gemini 2.0 Flash',
provider: 'cherryin'
} as Model
// User provides custom parameters with Cherry Studio provider ID
vi.mocked(getCustomParameters).mockReturnValue({
cherryin: {
customOption1: 'value1',
customOption2: 'value2'
}
})
const result = buildProviderOptions(mockAssistant, geminiModel, cherryinProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
// Should map to 'google' AI SDK provider, not 'cherryin'
expect(result.providerOptions).toHaveProperty('google')
expect(result.providerOptions).not.toHaveProperty('cherryin')
expect(result.providerOptions.google).toMatchObject({
customOption1: 'value1',
customOption2: 'value2'
})
})
it('should map cherryin provider ID to actual AI SDK provider ID (OpenAI)', async () => {
const { getCustomParameters } = await import('../reasoning')
// Mock Cherry In provider that uses OpenAI SDK
const cherryinProvider = {
id: 'cherryin',
name: 'Cherry In',
type: 'openai-response', // Using OpenAI SDK
apiKey: 'test-key',
apiHost: 'https://cherryin.com',
models: [] as Model[]
} as Provider
const openaiModel: Model = {
id: 'gpt-4',
name: 'GPT-4',
provider: 'cherryin'
} as Model
// User provides custom parameters with Cherry Studio provider ID
vi.mocked(getCustomParameters).mockReturnValue({
cherryin: {
customOpenAIOption: 'openai_value'
}
})
const result = buildProviderOptions(mockAssistant, openaiModel, cherryinProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
// Should map to 'openai' AI SDK provider, not 'cherryin'
expect(result.providerOptions).toHaveProperty('openai')
expect(result.providerOptions).not.toHaveProperty('cherryin')
expect(result.providerOptions.openai).toMatchObject({
customOpenAIOption: 'openai_value'
})
})
it('should allow direct AI SDK provider ID in custom parameters', async () => {
const { getCustomParameters } = await import('../reasoning')
const geminiProvider = {
id: SystemProviderIds.gemini,
name: 'Google',
type: 'gemini',
apiKey: 'test-key',
apiHost: 'https://generativelanguage.googleapis.com',
models: [] as Model[]
} as Provider
const geminiModel: Model = {
id: 'gemini-2.0-flash-exp',
name: 'Gemini 2.0 Flash',
provider: SystemProviderIds.gemini
} as Model
// User provides custom parameters directly with AI SDK provider ID
vi.mocked(getCustomParameters).mockReturnValue({
google: {
directGoogleOption: 'google_value'
}
})
const result = buildProviderOptions(mockAssistant, geminiModel, geminiProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
// Should merge directly to 'google' provider
expect(result.providerOptions.google).toMatchObject({
directGoogleOption: 'google_value'
})
})
it('should map gateway provider custom parameters to actual AI SDK provider', async () => {
const { getCustomParameters } = await import('../reasoning')
const gatewayProvider: Provider = {
id: SystemProviderIds.gateway,
name: 'Vercel AI Gateway',
type: 'gateway',
apiKey: 'test-key',
apiHost: 'https://gateway.vercel.com',
isSystem: true
} as Provider
const anthropicModel: Model = {
id: 'anthropic/claude-3-5-sonnet-20241022',
name: 'Claude 3.5 Sonnet',
provider: SystemProviderIds.gateway
} as Model
// User provides both gateway routing options and gateway-scoped custom parameters
vi.mocked(getCustomParameters).mockReturnValue({
gateway: {
order: ['vertex', 'anthropic'],
only: ['vertex']
},
customParam: 'should_go_to_anthropic'
})
const result = buildProviderOptions(mockAssistant, anthropicModel, gatewayProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
// Gateway routing options should be preserved
expect(result.providerOptions.gateway).toEqual({
order: ['vertex', 'anthropic'],
only: ['vertex']
})
// Custom parameters should go to the actual AI SDK provider (anthropic)
expect(result.providerOptions.anthropic).toMatchObject({
customParam: 'should_go_to_anthropic'
})
})
it('should handle mixed custom parameters (AI SDK provider ID + custom params)', async () => {
const { getCustomParameters } = await import('../reasoning')
const openaiProvider: Provider = {
id: SystemProviderIds.openai,
name: 'OpenAI',
type: 'openai-response',
apiKey: 'test-key',
apiHost: 'https://api.openai.com/v1',
isSystem: true
} as Provider
// User provides both direct AI SDK provider params and custom params
vi.mocked(getCustomParameters).mockReturnValue({
openai: {
providerSpecific: 'value1'
},
customParam1: 'value2',
customParam2: 123
})
const result = buildProviderOptions(mockAssistant, mockModel, openaiProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
// Should merge both into 'openai' provider options
expect(result.providerOptions.openai).toMatchObject({
providerSpecific: 'value1',
customParam1: 'value2',
customParam2: 123
})
})
// Note: For proxy providers like aihubmix/newapi, users should write AI SDK provider ID (google/anthropic)
// instead of the Cherry Studio provider ID for custom parameters to work correctly
it('should handle cherryin fallback to openai-compatible with custom parameters', async () => {
const { getCustomParameters } = await import('../reasoning')
// Mock cherryin provider that falls back to openai-compatible (default case)
const cherryinProvider = {
id: 'cherryin',
name: 'Cherry In',
type: 'openai',
apiKey: 'test-key',
apiHost: 'https://cherryin.com',
models: [] as Model[]
} as Provider
const testModel: Model = {
id: 'some-model',
name: 'Some Model',
provider: 'cherryin'
} as Model
// User provides custom parameters with cherryin provider ID
vi.mocked(getCustomParameters).mockReturnValue({
customCherryinOption: 'cherryin_value'
})
const result = buildProviderOptions(mockAssistant, testModel, cherryinProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
// When cherryin falls back to default case, it should use rawProviderId (cherryin)
// User's cherryin params should merge with the provider options
expect(result.providerOptions).toHaveProperty('cherryin')
expect(result.providerOptions.cherryin).toMatchObject({
customCherryinOption: 'cherryin_value'
})
})
it('should handle cross-provider configurations', async () => {
const { getCustomParameters } = await import('../reasoning')
const openaiProvider: Provider = {
id: SystemProviderIds.openai,
name: 'OpenAI',
type: 'openai-response',
apiKey: 'test-key',
apiHost: 'https://api.openai.com/v1',
isSystem: true
} as Provider
// User provides parameters for multiple providers
// In real usage, anthropic/google params would be treated as regular params for openai provider
vi.mocked(getCustomParameters).mockReturnValue({
openai: {
openaiSpecific: 'openai_value'
},
customParam: 'value'
})
const result = buildProviderOptions(mockAssistant, mockModel, openaiProvider, {
enableReasoning: false,
enableWebSearch: false,
enableGenerateImage: false
})
// Should have openai provider options with both scoped and custom params
expect(result.providerOptions).toHaveProperty('openai')
expect(result.providerOptions.openai).toMatchObject({
openaiSpecific: 'openai_value',
customParam: 'value'
})
})
})
}) })
}) })

View File

@ -1,5 +1,5 @@
import type { BedrockProviderOptions } from '@ai-sdk/amazon-bedrock' import type { BedrockProviderOptions } from '@ai-sdk/amazon-bedrock'
import type { AnthropicProviderOptions } from '@ai-sdk/anthropic' import { type AnthropicProviderOptions } from '@ai-sdk/anthropic'
import type { GoogleGenerativeAIProviderOptions } from '@ai-sdk/google' import type { GoogleGenerativeAIProviderOptions } from '@ai-sdk/google'
import type { OpenAIResponsesProviderOptions } from '@ai-sdk/openai' import type { OpenAIResponsesProviderOptions } from '@ai-sdk/openai'
import type { XaiProviderOptions } from '@ai-sdk/xai' import type { XaiProviderOptions } from '@ai-sdk/xai'
@ -7,6 +7,9 @@ import { baseProviderIdSchema, customProviderIdSchema } from '@cherrystudio/ai-c
import { loggerService } from '@logger' import { loggerService } from '@logger'
import { import {
getModelSupportedVerbosity, getModelSupportedVerbosity,
isAnthropicModel,
isGeminiModel,
isGrokModel,
isOpenAIModel, isOpenAIModel,
isQwenMTModel, isQwenMTModel,
isSupportFlexServiceTierModel, isSupportFlexServiceTierModel,
@ -29,12 +32,14 @@ import {
type OpenAIServiceTier, type OpenAIServiceTier,
OpenAIServiceTiers, OpenAIServiceTiers,
type Provider, type Provider,
type ServiceTier type ServiceTier,
SystemProviderIds
} from '@renderer/types' } from '@renderer/types'
import { type AiSdkParam, isAiSdkParam, type OpenAIVerbosity } from '@renderer/types/aiCoreTypes' import { type AiSdkParam, isAiSdkParam, type OpenAIVerbosity } from '@renderer/types/aiCoreTypes'
import { isSupportServiceTierProvider, isSupportVerbosityProvider } from '@renderer/utils/provider' import { isSupportServiceTierProvider, isSupportVerbosityProvider } from '@renderer/utils/provider'
import type { JSONValue } from 'ai' import type { JSONValue } from 'ai'
import { t } from 'i18next' import { t } from 'i18next'
import type { OllamaCompletionProviderOptions } from 'ollama-ai-provider-v2'
import { addAnthropicHeaders } from '../prepareParams/header' import { addAnthropicHeaders } from '../prepareParams/header'
import { getAiSdkProviderId } from '../provider/factory' import { getAiSdkProviderId } from '../provider/factory'
@ -156,8 +161,8 @@ export function buildProviderOptions(
providerOptions: Record<string, Record<string, JSONValue>> providerOptions: Record<string, Record<string, JSONValue>>
standardParams: Partial<Record<AiSdkParam, any>> standardParams: Partial<Record<AiSdkParam, any>>
} { } {
logger.debug('buildProviderOptions', { assistant, model, actualProvider, capabilities })
const rawProviderId = getAiSdkProviderId(actualProvider) const rawProviderId = getAiSdkProviderId(actualProvider)
logger.debug('buildProviderOptions', { assistant, model, actualProvider, capabilities, rawProviderId })
// 构建 provider 特定的选项 // 构建 provider 特定的选项
let providerSpecificOptions: Record<string, any> = {} let providerSpecificOptions: Record<string, any> = {}
const serviceTier = getServiceTier(model, actualProvider) const serviceTier = getServiceTier(model, actualProvider)
@ -172,14 +177,13 @@ export function buildProviderOptions(
case 'azure': case 'azure':
case 'azure-responses': case 'azure-responses':
{ {
const options: OpenAIResponsesProviderOptions = buildOpenAIProviderOptions( providerSpecificOptions = buildOpenAIProviderOptions(
assistant, assistant,
model, model,
capabilities, capabilities,
serviceTier, serviceTier,
textVerbosity textVerbosity
) )
providerSpecificOptions = options
} }
break break
case 'anthropic': case 'anthropic':
@ -197,10 +201,13 @@ export function buildProviderOptions(
case 'openrouter': case 'openrouter':
case 'openai-compatible': { case 'openai-compatible': {
// 对于其他 provider使用通用的构建逻辑 // 对于其他 provider使用通用的构建逻辑
const genericOptions = buildGenericProviderOptions(rawProviderId, assistant, model, capabilities)
providerSpecificOptions = { providerSpecificOptions = {
...buildGenericProviderOptions(assistant, model, capabilities), [rawProviderId]: {
serviceTier, ...genericOptions[rawProviderId],
textVerbosity serviceTier,
textVerbosity
}
} }
break break
} }
@ -236,48 +243,108 @@ export function buildProviderOptions(
case 'huggingface': case 'huggingface':
providerSpecificOptions = buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier) providerSpecificOptions = buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier)
break break
case SystemProviderIds.ollama:
providerSpecificOptions = buildOllamaProviderOptions(assistant, capabilities)
break
case SystemProviderIds.gateway:
providerSpecificOptions = buildAIGatewayOptions(assistant, model, capabilities, serviceTier, textVerbosity)
break
default: default:
// 对于其他 provider使用通用的构建逻辑 // 对于其他 provider使用通用的构建逻辑
providerSpecificOptions = buildGenericProviderOptions(rawProviderId, assistant, model, capabilities)
// Merge serviceTier and textVerbosity
providerSpecificOptions = { providerSpecificOptions = {
...buildGenericProviderOptions(assistant, model, capabilities), ...providerSpecificOptions,
serviceTier, [rawProviderId]: {
textVerbosity ...providerSpecificOptions[rawProviderId],
serviceTier,
textVerbosity
}
} }
} }
} else { } else {
throw error throw error
} }
} }
logger.debug('Built providerSpecificOptions', { providerSpecificOptions })
// 获取自定义参数并分离标准参数和 provider 特定参数 /**
* Retrieve custom parameters and separate standard parameters from provider-specific parameters.
*/
const customParams = getCustomParameters(assistant) const customParams = getCustomParameters(assistant)
const { standardParams, providerParams } = extractAiSdkStandardParams(customParams) const { standardParams, providerParams } = extractAiSdkStandardParams(customParams)
logger.debug('Extracted standardParams and providerParams', { standardParams, providerParams })
// 合并 provider 特定的自定义参数到 providerSpecificOptions /**
providerSpecificOptions = { * Get the actual AI SDK provider ID(s) from the already-built providerSpecificOptions.
...providerSpecificOptions, * For proxy providers (cherryin, aihubmix, newapi), this will be the actual SDK provider (e.g., 'google', 'openai', 'anthropic')
...providerParams * For regular providers, this will be the provider itself
} */
const actualAiSdkProviderIds = Object.keys(providerSpecificOptions)
let rawProviderKey = const primaryAiSdkProviderId = actualAiSdkProviderIds[0] // Use the first one as primary for non-scoped params
{
'google-vertex': 'google', /**
'google-vertex-anthropic': 'anthropic', * Merge custom parameters into providerSpecificOptions.
'azure-anthropic': 'anthropic', * Simple logic:
'ai-gateway': 'gateway', * 1. If key is in actualAiSdkProviderIds merge directly (user knows the actual AI SDK provider ID)
azure: 'openai', * 2. If key == rawProviderId:
'azure-responses': 'openai' * - If it's gateway/ollama preserve (they need their own config for routing/options)
}[rawProviderId] || rawProviderId * - Otherwise map to primary (this is a proxy provider like cherryin)
* 3. Otherwise treat as regular parameter, merge to primary provider
if (rawProviderKey === 'cherryin') { *
rawProviderKey = { gemini: 'google', ['openai-response']: 'openai' }[actualProvider.type] || actualProvider.type * Example:
* - User writes `cherryin: { opt: 'val' }` mapped to `google: { opt: 'val' }` (case 2, proxy)
* - User writes `gateway: { order: [...] }` stays as `gateway: { order: [...] }` (case 2, routing config)
* - User writes `google: { opt: 'val' }` stays as `google: { opt: 'val' }` (case 1)
* - User writes `customKey: 'val'` merged to `google: { customKey: 'val' }` (case 3)
*/
for (const key of Object.keys(providerParams)) {
if (actualAiSdkProviderIds.includes(key)) {
// Case 1: Key is an actual AI SDK provider ID - merge directly
providerSpecificOptions = {
...providerSpecificOptions,
[key]: {
...providerSpecificOptions[key],
...providerParams[key]
}
}
} else if (key === rawProviderId && !actualAiSdkProviderIds.includes(rawProviderId)) {
// Case 2: Key is the current provider (not in actualAiSdkProviderIds, so it's a proxy or special provider)
// Gateway is special: it needs routing config preserved
if (key === SystemProviderIds.gateway) {
// Preserve gateway config for routing
providerSpecificOptions = {
...providerSpecificOptions,
[key]: {
...providerSpecificOptions[key],
...providerParams[key]
}
}
} else {
// Proxy provider (cherryin, etc.) - map to actual AI SDK provider
providerSpecificOptions = {
...providerSpecificOptions,
[primaryAiSdkProviderId]: {
...providerSpecificOptions[primaryAiSdkProviderId],
...providerParams[key]
}
}
}
} else {
// Case 3: Regular parameter - merge to primary provider
providerSpecificOptions = {
...providerSpecificOptions,
[primaryAiSdkProviderId]: {
...providerSpecificOptions[primaryAiSdkProviderId],
[key]: providerParams[key]
}
}
}
} }
logger.debug('Final providerSpecificOptions after merging providerParams', { providerSpecificOptions })
// 返回 AI Core SDK 要求的格式:{ 'providerId': providerOptions } 以及提取的标准参数 // 返回 AI Core SDK 要求的格式:{ 'providerId': providerOptions } 以及提取的标准参数
return { return {
providerOptions: { providerOptions: providerSpecificOptions,
[rawProviderKey]: providerSpecificOptions
},
standardParams standardParams
} }
} }
@ -295,7 +362,7 @@ function buildOpenAIProviderOptions(
}, },
serviceTier: OpenAIServiceTier, serviceTier: OpenAIServiceTier,
textVerbosity?: OpenAIVerbosity textVerbosity?: OpenAIVerbosity
): OpenAIResponsesProviderOptions { ): Record<string, OpenAIResponsesProviderOptions> {
const { enableReasoning } = capabilities const { enableReasoning } = capabilities
let providerOptions: OpenAIResponsesProviderOptions = {} let providerOptions: OpenAIResponsesProviderOptions = {}
// OpenAI 推理参数 // OpenAI 推理参数
@ -334,7 +401,9 @@ function buildOpenAIProviderOptions(
textVerbosity textVerbosity
} }
return providerOptions return {
openai: providerOptions
}
} }
/** /**
@ -348,7 +417,7 @@ function buildAnthropicProviderOptions(
enableWebSearch: boolean enableWebSearch: boolean
enableGenerateImage: boolean enableGenerateImage: boolean
} }
): AnthropicProviderOptions { ): Record<string, AnthropicProviderOptions> {
const { enableReasoning } = capabilities const { enableReasoning } = capabilities
let providerOptions: AnthropicProviderOptions = {} let providerOptions: AnthropicProviderOptions = {}
@ -361,7 +430,11 @@ function buildAnthropicProviderOptions(
} }
} }
return providerOptions return {
anthropic: {
...providerOptions
}
}
} }
/** /**
@ -375,7 +448,7 @@ function buildGeminiProviderOptions(
enableWebSearch: boolean enableWebSearch: boolean
enableGenerateImage: boolean enableGenerateImage: boolean
} }
): GoogleGenerativeAIProviderOptions { ): Record<string, GoogleGenerativeAIProviderOptions> {
const { enableReasoning, enableGenerateImage } = capabilities const { enableReasoning, enableGenerateImage } = capabilities
let providerOptions: GoogleGenerativeAIProviderOptions = {} let providerOptions: GoogleGenerativeAIProviderOptions = {}
@ -395,7 +468,11 @@ function buildGeminiProviderOptions(
} }
} }
return providerOptions return {
google: {
...providerOptions
}
}
} }
function buildXAIProviderOptions( function buildXAIProviderOptions(
@ -406,7 +483,7 @@ function buildXAIProviderOptions(
enableWebSearch: boolean enableWebSearch: boolean
enableGenerateImage: boolean enableGenerateImage: boolean
} }
): XaiProviderOptions { ): Record<string, XaiProviderOptions> {
const { enableReasoning } = capabilities const { enableReasoning } = capabilities
let providerOptions: Record<string, any> = {} let providerOptions: Record<string, any> = {}
@ -418,7 +495,11 @@ function buildXAIProviderOptions(
} }
} }
return providerOptions return {
xai: {
...providerOptions
}
}
} }
function buildCherryInProviderOptions( function buildCherryInProviderOptions(
@ -432,19 +513,20 @@ function buildCherryInProviderOptions(
actualProvider: Provider, actualProvider: Provider,
serviceTier: OpenAIServiceTier, serviceTier: OpenAIServiceTier,
textVerbosity: OpenAIVerbosity textVerbosity: OpenAIVerbosity
): OpenAIResponsesProviderOptions | AnthropicProviderOptions | GoogleGenerativeAIProviderOptions { ): Record<string, OpenAIResponsesProviderOptions | AnthropicProviderOptions | GoogleGenerativeAIProviderOptions> {
switch (actualProvider.type) { switch (actualProvider.type) {
case 'openai': case 'openai':
return buildGenericProviderOptions('cherryin', assistant, model, capabilities)
case 'openai-response': case 'openai-response':
return buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier, textVerbosity) return buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier, textVerbosity)
case 'anthropic': case 'anthropic':
return buildAnthropicProviderOptions(assistant, model, capabilities) return buildAnthropicProviderOptions(assistant, model, capabilities)
case 'gemini': case 'gemini':
return buildGeminiProviderOptions(assistant, model, capabilities) return buildGeminiProviderOptions(assistant, model, capabilities)
default:
return buildGenericProviderOptions('cherryin', assistant, model, capabilities)
} }
return {}
} }
/** /**
@ -458,7 +540,7 @@ function buildBedrockProviderOptions(
enableWebSearch: boolean enableWebSearch: boolean
enableGenerateImage: boolean enableGenerateImage: boolean
} }
): BedrockProviderOptions { ): Record<string, BedrockProviderOptions> {
const { enableReasoning } = capabilities const { enableReasoning } = capabilities
let providerOptions: BedrockProviderOptions = {} let providerOptions: BedrockProviderOptions = {}
@ -475,13 +557,35 @@ function buildBedrockProviderOptions(
providerOptions.anthropicBeta = betaHeaders providerOptions.anthropicBeta = betaHeaders
} }
return providerOptions return {
bedrock: providerOptions
}
}
function buildOllamaProviderOptions(
assistant: Assistant,
capabilities: {
enableReasoning: boolean
enableWebSearch: boolean
enableGenerateImage: boolean
}
): Record<string, OllamaCompletionProviderOptions> {
const { enableReasoning } = capabilities
const providerOptions: OllamaCompletionProviderOptions = {}
const reasoningEffort = assistant.settings?.reasoning_effort
if (enableReasoning) {
providerOptions.think = !['none', undefined].includes(reasoningEffort)
}
return {
ollama: providerOptions
}
} }
/** /**
* providerOptions provider * providerOptions provider
*/ */
function buildGenericProviderOptions( function buildGenericProviderOptions(
providerId: string,
assistant: Assistant, assistant: Assistant,
model: Model, model: Model,
capabilities: { capabilities: {
@ -524,5 +628,37 @@ function buildGenericProviderOptions(
} }
} }
return providerOptions return {
[providerId]: providerOptions
}
}
function buildAIGatewayOptions(
assistant: Assistant,
model: Model,
capabilities: {
enableReasoning: boolean
enableWebSearch: boolean
enableGenerateImage: boolean
},
serviceTier: OpenAIServiceTier,
textVerbosity?: OpenAIVerbosity
): Record<
string,
| OpenAIResponsesProviderOptions
| AnthropicProviderOptions
| GoogleGenerativeAIProviderOptions
| Record<string, unknown>
> {
if (isAnthropicModel(model)) {
return buildAnthropicProviderOptions(assistant, model, capabilities)
} else if (isOpenAIModel(model)) {
return buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier, textVerbosity)
} else if (isGeminiModel(model)) {
return buildGeminiProviderOptions(assistant, model, capabilities)
} else if (isGrokModel(model)) {
return buildXAIProviderOptions(assistant, model, capabilities)
} else {
return buildGenericProviderOptions('openai-compatible', assistant, model, capabilities)
}
} }

View File

@ -250,9 +250,25 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
enable_thinking: true, enable_thinking: true,
incremental_output: true incremental_output: true
} }
// TODO: 支持 new-api类型
case SystemProviderIds['new-api']:
case SystemProviderIds.cherryin: {
return {
extra_body: {
thinking: {
type: 'enabled' // auto is invalid
}
}
}
}
case SystemProviderIds.hunyuan: case SystemProviderIds.hunyuan:
case SystemProviderIds['tencent-cloud-ti']: case SystemProviderIds['tencent-cloud-ti']:
case SystemProviderIds.doubao: case SystemProviderIds.doubao:
case SystemProviderIds.deepseek:
case SystemProviderIds.aihubmix:
case SystemProviderIds.sophnet:
case SystemProviderIds.ppio:
case SystemProviderIds.dmxapi:
return { return {
thinking: { thinking: {
type: 'enabled' // auto is invalid type: 'enabled' // auto is invalid
@ -274,8 +290,6 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
logger.warn( logger.warn(
`Skipping thinking options for provider ${provider.name} as DeepSeek v3.1 thinking control method is unknown` `Skipping thinking options for provider ${provider.name} as DeepSeek v3.1 thinking control method is unknown`
) )
case SystemProviderIds.silicon:
// specially handled before
} }
} }
} }

View File

@ -264,9 +264,10 @@ export const CodeBlockView: React.FC<Props> = memo(({ children, language, onSave
expanded={shouldExpand} expanded={shouldExpand}
wrapped={shouldWrap} wrapped={shouldWrap}
maxHeight={`${MAX_COLLAPSED_CODE_HEIGHT}px`} maxHeight={`${MAX_COLLAPSED_CODE_HEIGHT}px`}
onRequestExpand={codeCollapsible ? () => setExpandOverride(true) : undefined}
/> />
), ),
[children, codeEditor.enabled, handleHeightChange, language, onSave, shouldExpand, shouldWrap] [children, codeCollapsible, codeEditor.enabled, handleHeightChange, language, onSave, shouldExpand, shouldWrap]
) )
// 特殊视图组件映射 // 特殊视图组件映射

View File

@ -64,7 +64,11 @@ exports[`CodeToolbar > basic rendering > should match snapshot with mixed tools
data-title="code_block.more" data-title="code_block.more"
> >
<div <div
aria-expanded="false"
aria-label="code_block.more"
class="c2" class="c2"
role="button"
tabindex="0"
> >
<div <div
class="tool-icon" class="tool-icon"

View File

@ -1,6 +1,6 @@
import type { ActionTool } from '@renderer/components/ActionTools' import type { ActionTool } from '@renderer/components/ActionTools'
import { Dropdown, Tooltip } from 'antd' import { Dropdown, Tooltip } from 'antd'
import { memo, useMemo } from 'react' import { memo, useCallback, useMemo } from 'react'
import { ToolWrapper } from './styles' import { ToolWrapper } from './styles'
@ -9,13 +9,30 @@ interface CodeToolButtonProps {
} }
const CodeToolButton = ({ tool }: CodeToolButtonProps) => { const CodeToolButton = ({ tool }: CodeToolButtonProps) => {
const handleKeyDown = useCallback(
(e: React.KeyboardEvent<HTMLDivElement>) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault()
tool.onClick?.()
}
},
[tool]
)
const mainTool = useMemo( const mainTool = useMemo(
() => ( () => (
<Tooltip key={tool.id} title={tool.tooltip} mouseEnterDelay={0.5} mouseLeaveDelay={0}> <Tooltip key={tool.id} title={tool.tooltip} mouseEnterDelay={0.5} mouseLeaveDelay={0}>
<ToolWrapper onClick={tool.onClick}>{tool.icon}</ToolWrapper> <ToolWrapper
onClick={tool.onClick}
onKeyDown={handleKeyDown}
role="button"
aria-label={tool.tooltip}
tabIndex={0}>
{tool.icon}
</ToolWrapper>
</Tooltip> </Tooltip>
), ),
[tool] [tool, handleKeyDown]
) )
if (tool.children?.length && tool.children.length > 0) { if (tool.children?.length && tool.children.length > 0) {

View File

@ -40,7 +40,19 @@ const CodeToolbar = ({ tools }: { tools: ActionTool[] }) => {
{quickToolButtons} {quickToolButtons}
{quickTools.length > 1 && ( {quickTools.length > 1 && (
<Tooltip title={t('code_block.more')} mouseEnterDelay={0.5}> <Tooltip title={t('code_block.more')} mouseEnterDelay={0.5}>
<ToolWrapper onClick={() => setShowQuickTools(!showQuickTools)} className={showQuickTools ? 'active' : ''}> <ToolWrapper
onClick={() => setShowQuickTools(!showQuickTools)}
onKeyDown={(e) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault()
setShowQuickTools(!showQuickTools)
}
}}
className={showQuickTools ? 'active' : ''}
role="button"
aria-label={t('code_block.more')}
aria-expanded={showQuickTools}
tabIndex={0}>
<EllipsisVertical className="tool-icon" /> <EllipsisVertical className="tool-icon" />
</ToolWrapper> </ToolWrapper>
</Tooltip> </Tooltip>

View File

@ -1,3 +1,4 @@
import { loggerService } from '@logger'
import { useCodeStyle } from '@renderer/context/CodeStyleProvider' import { useCodeStyle } from '@renderer/context/CodeStyleProvider'
import { useCodeHighlight } from '@renderer/hooks/useCodeHighlight' import { useCodeHighlight } from '@renderer/hooks/useCodeHighlight'
import { useSettings } from '@renderer/hooks/useSettings' import { useSettings } from '@renderer/hooks/useSettings'
@ -9,6 +10,15 @@ import React, { memo, useCallback, useEffect, useLayoutEffect, useMemo, useRef }
import type { ThemedToken } from 'shiki/core' import type { ThemedToken } from 'shiki/core'
import styled from 'styled-components' import styled from 'styled-components'
const logger = loggerService.withContext('CodeViewer')
interface SavedSelection {
startLine: number
startOffset: number
endLine: number
endOffset: number
}
interface CodeViewerProps { interface CodeViewerProps {
/** Code string value. */ /** Code string value. */
value: string value: string
@ -52,6 +62,10 @@ interface CodeViewerProps {
* @default true * @default true
*/ */
wrapped?: boolean wrapped?: boolean
/**
* Callback to request expansion when multi-line selection is detected.
*/
onRequestExpand?: () => void
} }
/** /**
@ -70,13 +84,24 @@ const CodeViewer = ({
fontSize: customFontSize, fontSize: customFontSize,
className, className,
expanded = true, expanded = true,
wrapped = true wrapped = true,
onRequestExpand
}: CodeViewerProps) => { }: CodeViewerProps) => {
const { codeShowLineNumbers: _lineNumbers, fontSize: _fontSize } = useSettings() const { codeShowLineNumbers: _lineNumbers, fontSize: _fontSize } = useSettings()
const { getShikiPreProperties, isShikiThemeDark } = useCodeStyle() const { getShikiPreProperties, isShikiThemeDark } = useCodeStyle()
const shikiThemeRef = useRef<HTMLDivElement>(null) const shikiThemeRef = useRef<HTMLDivElement>(null)
const scrollerRef = useRef<HTMLDivElement>(null) const scrollerRef = useRef<HTMLDivElement>(null)
const callerId = useRef(`${Date.now()}-${uuid()}`).current const callerId = useRef(`${Date.now()}-${uuid()}`).current
const savedSelectionRef = useRef<SavedSelection | null>(null)
// Ensure the active selection actually belongs to this CodeViewer instance
const selectionBelongsToViewer = useCallback((sel: Selection | null) => {
const scroller = scrollerRef.current
if (!scroller || !sel || sel.rangeCount === 0) return false
// Check if selection intersects with scroller
const range = sel.getRangeAt(0)
return scroller.contains(range.commonAncestorContainer)
}, [])
const fontSize = useMemo(() => customFontSize ?? _fontSize - 1, [customFontSize, _fontSize]) const fontSize = useMemo(() => customFontSize ?? _fontSize - 1, [customFontSize, _fontSize])
const lineNumbers = useMemo(() => options?.lineNumbers ?? _lineNumbers, [options?.lineNumbers, _lineNumbers]) const lineNumbers = useMemo(() => options?.lineNumbers ?? _lineNumbers, [options?.lineNumbers, _lineNumbers])
@ -112,6 +137,204 @@ const CodeViewer = ({
} }
}, [language, getShikiPreProperties, isShikiThemeDark, className]) }, [language, getShikiPreProperties, isShikiThemeDark, className])
// 保存当前选区的逻辑位置
const saveSelection = useCallback((): SavedSelection | null => {
const selection = window.getSelection()
if (!selection || selection.rangeCount === 0 || selection.isCollapsed) {
return null
}
// Only capture selections within this viewer's scroller
if (!selectionBelongsToViewer(selection)) {
return null
}
const range = selection.getRangeAt(0)
const scroller = scrollerRef.current
if (!scroller) return null
// 查找选区起始和结束位置对应的行号
const findLineAndOffset = (node: Node, offset: number): { line: number; offset: number } | null => {
// 向上查找包含 data-index 属性的元素
let element = node.nodeType === Node.ELEMENT_NODE ? (node as Element) : node.parentElement
// 跳过行号元素,找到实际的行内容
while (element) {
if (element.classList?.contains('line-number')) {
// 如果在行号上,移动到同级的 line-content
const lineContainer = element.parentElement
const lineContent = lineContainer?.querySelector('.line-content')
if (lineContent) {
element = lineContent as Element
break
}
}
if (element.hasAttribute('data-index')) {
break
}
element = element.parentElement
}
if (!element || !element.hasAttribute('data-index')) {
logger.warn('Could not find data-index element', {
nodeName: node.nodeName,
nodeType: node.nodeType
})
return null
}
const lineIndex = parseInt(element.getAttribute('data-index') || '0', 10)
const lineContent = element.querySelector('.line-content') || element
// Calculate character offset within the line
let charOffset = 0
if (node.nodeType === Node.TEXT_NODE) {
// 遍历该行的所有文本节点,找到当前节点的位置
const walker = document.createTreeWalker(lineContent as Node, NodeFilter.SHOW_TEXT)
let currentNode: Node | null
while ((currentNode = walker.nextNode())) {
if (currentNode === node) {
charOffset += offset
break
}
charOffset += currentNode.textContent?.length || 0
}
} else if (node.nodeType === Node.ELEMENT_NODE) {
// 如果是元素节点,计算之前所有文本的长度
const textBefore = (node as Element).textContent?.slice(0, offset) || ''
charOffset = textBefore.length
}
logger.debug('findLineAndOffset result', {
lineIndex,
charOffset
})
return { line: lineIndex, offset: charOffset }
}
const start = findLineAndOffset(range.startContainer, range.startOffset)
const end = findLineAndOffset(range.endContainer, range.endOffset)
if (!start || !end) {
logger.warn('saveSelection failed', {
hasStart: !!start,
hasEnd: !!end
})
return null
}
logger.debug('saveSelection success', {
startLine: start.line,
startOffset: start.offset,
endLine: end.line,
endOffset: end.offset
})
return {
startLine: start.line,
startOffset: start.offset,
endLine: end.line,
endOffset: end.offset
}
}, [selectionBelongsToViewer])
// 滚动事件处理:保存选择用于复制,但不恢复(避免选择高亮问题)
const handleScroll = useCallback(() => {
// 只保存选择状态用于复制,不在滚动时恢复选择
const saved = saveSelection()
if (saved) {
savedSelectionRef.current = saved
logger.debug('Selection saved for copy', {
startLine: saved.startLine,
endLine: saved.endLine
})
}
}, [saveSelection])
// 处理复制事件,确保跨虚拟滚动的复制能获取完整内容
const handleCopy = useCallback(
(event: ClipboardEvent) => {
const selection = window.getSelection()
// Ignore copies for selections outside this viewer
if (!selectionBelongsToViewer(selection)) {
return
}
if (!selection || selection.rangeCount === 0 || selection.isCollapsed) {
return
}
// Prefer saved selection from scroll, otherwise get it in real-time
let saved = savedSelectionRef.current
if (!saved) {
saved = saveSelection()
}
if (!saved) {
logger.warn('Cannot get selection, using browser default')
return
}
const { startLine, startOffset, endLine, endOffset } = saved
// Always use custom copy in collapsed state to handle virtual scroll edge cases
const needsCustomCopy = !expanded
logger.debug('Copy event', {
startLine,
endLine,
startOffset,
endOffset,
expanded,
needsCustomCopy,
usedSavedSelection: !!savedSelectionRef.current
})
if (needsCustomCopy) {
try {
const selectedLines: string[] = []
for (let i = startLine; i <= endLine; i++) {
const line = rawLines[i] || ''
if (i === startLine && i === endLine) {
// 单行选择
selectedLines.push(line.slice(startOffset, endOffset))
} else if (i === startLine) {
// 第一行,从 startOffset 到行尾
selectedLines.push(line.slice(startOffset))
} else if (i === endLine) {
// 最后一行,从行首到 endOffset
selectedLines.push(line.slice(0, endOffset))
} else {
// 中间的完整行
selectedLines.push(line)
}
}
const fullText = selectedLines.join('\n')
logger.debug('Custom copy success', {
linesCount: selectedLines.length,
totalLength: fullText.length,
firstLine: selectedLines[0]?.slice(0, 30),
lastLine: selectedLines[selectedLines.length - 1]?.slice(0, 30)
})
if (!event.clipboardData) {
logger.warn('clipboardData unavailable, using browser default copy')
return
}
event.clipboardData.setData('text/plain', fullText)
event.preventDefault()
} catch (error) {
logger.error('Custom copy failed', { error })
}
}
},
[selectionBelongsToViewer, expanded, saveSelection, rawLines]
)
// Virtualizer 配置 // Virtualizer 配置
const getScrollElement = useCallback(() => scrollerRef.current, []) const getScrollElement = useCallback(() => scrollerRef.current, [])
const getItemKey = useCallback((index: number) => `${callerId}-${index}`, [callerId]) const getItemKey = useCallback((index: number) => `${callerId}-${index}`, [callerId])
@ -147,6 +370,58 @@ const CodeViewer = ({
} }
}, [virtualItems, debouncedHighlightLines]) }, [virtualItems, debouncedHighlightLines])
// Monitor selection changes, clear stale selection state, and auto-expand in collapsed state
const handleSelectionChange = useMemo(
() =>
debounce(() => {
const selection = window.getSelection()
// No valid selection: clear and return
if (!selection || selection.rangeCount === 0 || selection.isCollapsed) {
savedSelectionRef.current = null
return
}
// Only handle selections within this CodeViewer
if (!selectionBelongsToViewer(selection)) {
savedSelectionRef.current = null
return
}
// In collapsed state, detect multi-line selection and request expand
if (!expanded && onRequestExpand) {
const saved = saveSelection()
if (saved && saved.endLine > saved.startLine) {
logger.debug('Multi-line selection detected in collapsed state, requesting expand', {
startLine: saved.startLine,
endLine: saved.endLine
})
onRequestExpand()
}
}
}, 100),
[expanded, onRequestExpand, saveSelection, selectionBelongsToViewer]
)
useEffect(() => {
document.addEventListener('selectionchange', handleSelectionChange)
return () => {
document.removeEventListener('selectionchange', handleSelectionChange)
handleSelectionChange.cancel()
}
}, [handleSelectionChange])
// Listen for copy events
useEffect(() => {
const scroller = scrollerRef.current
if (!scroller) return
scroller.addEventListener('copy', handleCopy as EventListener)
return () => {
scroller.removeEventListener('copy', handleCopy as EventListener)
}
}, [handleCopy])
// Report scrollHeight when it might change // Report scrollHeight when it might change
useLayoutEffect(() => { useLayoutEffect(() => {
onHeightChange?.(scrollerRef.current?.scrollHeight ?? 0) onHeightChange?.(scrollerRef.current?.scrollHeight ?? 0)
@ -160,6 +435,7 @@ const CodeViewer = ({
$wrap={wrapped} $wrap={wrapped}
$expand={expanded} $expand={expanded}
$lineHeight={estimateSize()} $lineHeight={estimateSize()}
onScroll={handleScroll}
style={ style={
{ {
'--gutter-width': `${gutterDigits}ch`, '--gutter-width': `${gutterDigits}ch`,

View File

@ -1,6 +1,6 @@
import { Tooltip } from 'antd' import { Tooltip } from 'antd'
import { Copy } from 'lucide-react' import { Copy } from 'lucide-react'
import type { FC } from 'react' import type { FC, KeyboardEvent } from 'react'
import { useTranslation } from 'react-i18next' import { useTranslation } from 'react-i18next'
import styled from 'styled-components' import styled from 'styled-components'
@ -39,8 +39,24 @@ const CopyButton: FC<CopyButtonProps> = ({
}) })
} }
const handleKeyDown = (e: KeyboardEvent<HTMLDivElement>) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault()
handleCopy()
}
}
const ariaLabel = tooltip || t('common.copy')
const button = ( const button = (
<ButtonContainer $color={color} $hoverColor={hoverColor} onClick={handleCopy}> <ButtonContainer
$color={color}
$hoverColor={hoverColor}
onClick={handleCopy}
onKeyDown={handleKeyDown}
role="button"
aria-label={ariaLabel}
tabIndex={0}>
<Copy size={size} className="copy-icon" /> <Copy size={size} className="copy-icon" />
{label && <RightText size={size}>{label}</RightText>} {label && <RightText size={size}>{label}</RightText>}
</ButtonContainer> </ButtonContainer>

View File

@ -171,7 +171,9 @@ export const Toolbar: React.FC<ToolbarProps> = ({ editor, formattingState, onCom
data-active={isActive} data-active={isActive}
disabled={isDisabled} disabled={isDisabled}
onClick={() => handleCommand(command)} onClick={() => handleCommand(command)}
data-testid={`toolbar-${command}`}> data-testid={`toolbar-${command}`}
aria-label={tooltipText}
aria-pressed={isActive}>
<Icon color={isActive ? 'var(--color-primary)' : 'var(--color-text)'} /> <Icon color={isActive ? 'var(--color-primary)' : 'var(--color-text)'} />
</ToolbarButton> </ToolbarButton>
) )

View File

@ -86,7 +86,7 @@ const WindowControls: React.FC = () => {
return ( return (
<WindowControlsContainer> <WindowControlsContainer>
<Tooltip title={t('navbar.window.minimize')} placement="bottom" mouseEnterDelay={DEFAULT_DELAY}> <Tooltip title={t('navbar.window.minimize')} placement="bottom" mouseEnterDelay={DEFAULT_DELAY}>
<ControlButton onClick={handleMinimize} aria-label="Minimize"> <ControlButton onClick={handleMinimize} aria-label={t('navbar.window.minimize')}>
<Minus size={14} /> <Minus size={14} />
</ControlButton> </ControlButton>
</Tooltip> </Tooltip>
@ -94,12 +94,14 @@ const WindowControls: React.FC = () => {
title={isMaximized ? t('navbar.window.restore') : t('navbar.window.maximize')} title={isMaximized ? t('navbar.window.restore') : t('navbar.window.maximize')}
placement="bottom" placement="bottom"
mouseEnterDelay={DEFAULT_DELAY}> mouseEnterDelay={DEFAULT_DELAY}>
<ControlButton onClick={handleMaximize} aria-label={isMaximized ? 'Restore' : 'Maximize'}> <ControlButton
onClick={handleMaximize}
aria-label={isMaximized ? t('navbar.window.restore') : t('navbar.window.maximize')}>
{isMaximized ? <WindowRestoreIcon size={14} /> : <Square size={14} />} {isMaximized ? <WindowRestoreIcon size={14} /> : <Square size={14} />}
</ControlButton> </ControlButton>
</Tooltip> </Tooltip>
<Tooltip title={t('navbar.window.close')} placement="bottom" mouseEnterDelay={DEFAULT_DELAY}> <Tooltip title={t('navbar.window.close')} placement="bottom" mouseEnterDelay={DEFAULT_DELAY}>
<ControlButton $isClose onClick={handleClose} aria-label="Close"> <ControlButton $isClose onClick={handleClose} aria-label={t('navbar.window.close')}>
<X size={17} /> <X size={17} />
</ControlButton> </ControlButton>
</Tooltip> </Tooltip>

View File

@ -12,6 +12,7 @@ import {
isDeepSeekHybridInferenceModel, isDeepSeekHybridInferenceModel,
isDoubaoSeedAfter251015, isDoubaoSeedAfter251015,
isDoubaoThinkingAutoModel, isDoubaoThinkingAutoModel,
isFixedReasoningModel,
isGeminiReasoningModel, isGeminiReasoningModel,
isGrok4FastReasoningModel, isGrok4FastReasoningModel,
isHunyuanReasoningModel, isHunyuanReasoningModel,
@ -356,6 +357,10 @@ describe('DeepSeek & Thinking Tokens', () => {
) )
).toBe(true) ).toBe(true)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v2' }))).toBe(false) expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v2' }))).toBe(false)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v3.2' }))).toBe(true)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'agent/deepseek-v3.2' }))).toBe(true)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-chat' }))).toBe(true)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v3.2-speciale' }))).toBe(false)
const allowed = createModel({ id: 'deepseek-v3.1', provider: 'doubao' }) const allowed = createModel({ id: 'deepseek-v3.1', provider: 'doubao' })
expect(isSupportedThinkingTokenModel(allowed)).toBe(true) expect(isSupportedThinkingTokenModel(allowed)).toBe(true)
@ -364,6 +369,37 @@ describe('DeepSeek & Thinking Tokens', () => {
expect(isSupportedThinkingTokenModel(disallowed)).toBe(false) expect(isSupportedThinkingTokenModel(disallowed)).toBe(false)
}) })
it('supports DeepSeek v3.1+ models from newly added providers', () => {
// Test newly added providers for DeepSeek thinking token support
const newProviders = ['deepseek', 'cherryin', 'new-api', 'aihubmix', 'sophnet', 'dmxapi']
newProviders.forEach((provider) => {
const model = createModel({ id: 'deepseek-v3.1', provider })
expect(
isSupportedThinkingTokenModel(model),
`Provider ${provider} should support thinking tokens for deepseek-v3.1`
).toBe(true)
})
})
it('tests various prefix patterns for isDeepSeekHybridInferenceModel', () => {
// Test with custom prefixes
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'custom-deepseek-v3.2' }))).toBe(true)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'prefix-deepseek-v3.1' }))).toBe(true)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'agent/deepseek-v3.2' }))).toBe(true)
// Test that speciale is properly excluded
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'custom-deepseek-v3.2-speciale' }))).toBe(false)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'agent/deepseek-v3.2-speciale' }))).toBe(false)
// Test basic deepseek-chat
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-chat' }))).toBe(true)
// Test version variations
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v3.1.2' }))).toBe(true)
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v3-1' }))).toBe(true)
})
it('supports Gemini thinking models while filtering image variants', () => { it('supports Gemini thinking models while filtering image variants', () => {
expect(isSupportedThinkingTokenModel(createModel({ id: 'gemini-2.5-flash-latest' }))).toBe(true) expect(isSupportedThinkingTokenModel(createModel({ id: 'gemini-2.5-flash-latest' }))).toBe(true)
expect(isSupportedThinkingTokenModel(createModel({ id: 'gemini-2.5-flash-image' }))).toBe(false) expect(isSupportedThinkingTokenModel(createModel({ id: 'gemini-2.5-flash-image' }))).toBe(false)
@ -535,6 +571,41 @@ describe('isReasoningModel', () => {
const magistral = createModel({ id: 'magistral-reasoning' }) const magistral = createModel({ id: 'magistral-reasoning' })
expect(isReasoningModel(magistral)).toBe(true) expect(isReasoningModel(magistral)).toBe(true)
}) })
it('identifies fixed reasoning models', () => {
const models = [
'deepseek-reasoner',
'o1-preview',
'o1-mini',
'qwq-32b-preview',
'step-3-minimax',
'generic-reasoning-model',
'some-random-model-thinking',
'some-random-model-think',
'deepseek-v3.2-speciale'
]
models.forEach((id) => {
const model = createModel({ id })
expect(isFixedReasoningModel(model), `Model ${id} should be reasoning`).toBe(true)
})
})
it('excludes non-fixed reasoning models from isFixedReasoningModel', () => {
// Models that support thinking tokens or reasoning effort should NOT be fixed reasoning models
const nonFixedModels = [
{ id: 'deepseek-v3.2', provider: 'deepseek' }, // Supports thinking tokens
{ id: 'deepseek-chat', provider: 'deepseek' }, // Supports thinking tokens
{ id: 'claude-3-opus-20240229', provider: 'anthropic' }, // Supports thinking tokens via extended_thinking
{ id: 'gpt-4o', provider: 'openai' }, // Not a reasoning model at all
{ id: 'gpt-4', provider: 'openai' } // Not a reasoning model at all
]
nonFixedModels.forEach(({ id, provider }) => {
const model = createModel({ id, provider })
expect(isFixedReasoningModel(model), `Model ${id} should NOT be fixed reasoning`).toBe(false)
})
})
}) })
describe('Thinking model classification', () => { describe('Thinking model classification', () => {

View File

@ -140,12 +140,8 @@ describe('isFunctionCallingModel', () => {
it('excludes explicitly blocked ids', () => { it('excludes explicitly blocked ids', () => {
expect(isFunctionCallingModel(createModel({ id: 'gemini-1.5-flash' }))).toBe(false) expect(isFunctionCallingModel(createModel({ id: 'gemini-1.5-flash' }))).toBe(false)
}) expect(isFunctionCallingModel(createModel({ id: 'deepseek-v3.2-speciale' }))).toBe(false)
expect(isFunctionCallingModel(createModel({ id: 'deepseek/deepseek-v3.2-speciale' }))).toBe(false)
it('forces support for trusted providers', () => {
for (const provider of ['deepseek', 'anthropic', 'kimi', 'moonshot']) {
expect(isFunctionCallingModel(createModel({ provider }))).toBe(true)
}
}) })
it('returns true when identified as deepseek hybrid inference model', () => { it('returns true when identified as deepseek hybrid inference model', () => {
@ -157,4 +153,19 @@ describe('isFunctionCallingModel', () => {
deepSeekHybridMock.mockReturnValueOnce(true) deepSeekHybridMock.mockReturnValueOnce(true)
expect(isFunctionCallingModel(createModel({ id: 'deepseek-v3-1', provider: 'dashscope' }))).toBe(false) expect(isFunctionCallingModel(createModel({ id: 'deepseek-v3-1', provider: 'dashscope' }))).toBe(false)
}) })
it('supports anthropic models through claude regex match', () => {
expect(isFunctionCallingModel(createModel({ id: 'claude-3-5-sonnet', provider: 'anthropic' }))).toBe(true)
expect(isFunctionCallingModel(createModel({ id: 'claude-3-opus', provider: 'anthropic' }))).toBe(true)
})
it('supports kimi models through kimi-k2 regex match', () => {
expect(isFunctionCallingModel(createModel({ id: 'kimi-k2-0711-preview', provider: 'moonshot' }))).toBe(true)
expect(isFunctionCallingModel(createModel({ id: 'kimi-k2', provider: 'kimi' }))).toBe(true)
})
it('supports deepseek models through deepseek regex match', () => {
expect(isFunctionCallingModel(createModel({ id: 'deepseek-chat', provider: 'deepseek' }))).toBe(true)
expect(isFunctionCallingModel(createModel({ id: 'deepseek-coder', provider: 'deepseek' }))).toBe(true)
})
}) })

View File

@ -252,18 +252,22 @@ describe('model utils', () => {
describe('getModelSupportedVerbosity', () => { describe('getModelSupportedVerbosity', () => {
it('returns only "high" for GPT-5 Pro models', () => { it('returns only "high" for GPT-5 Pro models', () => {
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, 'high']) expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, null, 'high'])
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([undefined, 'high']) expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([
undefined,
null,
'high'
])
}) })
it('returns all levels for non-Pro GPT-5 models', () => { it('returns all levels for non-Pro GPT-5 models', () => {
const previewModel = createModel({ id: 'gpt-5-preview' }) const previewModel = createModel({ id: 'gpt-5-preview' })
expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, 'low', 'medium', 'high']) expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, null, 'low', 'medium', 'high'])
}) })
it('returns all levels for GPT-5.1 models', () => { it('returns all levels for GPT-5.1 models', () => {
const gpt51Model = createModel({ id: 'gpt-5.1-preview' }) const gpt51Model = createModel({ id: 'gpt-5.1-preview' })
expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, 'low', 'medium', 'high']) expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, null, 'low', 'medium', 'high'])
}) })
it('returns only undefined for non-GPT-5 models', () => { it('returns only undefined for non-GPT-5 models', () => {

View File

@ -1853,7 +1853,7 @@ export const SYSTEM_MODELS: Record<SystemProviderId | 'defaultModel', Model[]> =
} }
], ],
huggingface: [], huggingface: [],
'ai-gateway': [], gateway: [],
cerebras: [ cerebras: [
{ {
id: 'gpt-oss-120b', id: 'gpt-oss-120b',

View File

@ -21,7 +21,7 @@ import { isTextToImageModel } from './vision'
// Reasoning models // Reasoning models
export const REASONING_REGEX = export const REASONING_REGEX =
/^(?!.*-non-reasoning\b)(o\d+(?:-[\w-]+)?|.*\b(?:reasoning|reasoner|thinking)\b.*|.*-[rR]\d+.*|.*\bqwq(?:-[\w-]+)?\b.*|.*\bhunyuan-t1(?:-[\w-]+)?\b.*|.*\bglm-zero-preview\b.*|.*\bgrok-(?:3-mini|4|4-fast)(?:-[\w-]+)?\b.*)$/i /^(?!.*-non-reasoning\b)(o\d+(?:-[\w-]+)?|.*\b(?:reasoning|reasoner|thinking|think)\b.*|.*-[rR]\d+.*|.*\bqwq(?:-[\w-]+)?\b.*|.*\bhunyuan-t1(?:-[\w-]+)?\b.*|.*\bglm-zero-preview\b.*|.*\bgrok-(?:3-mini|4|4-fast)(?:-[\w-]+)?\b.*)$/i
// 模型类型到支持的reasoning_effort的映射表 // 模型类型到支持的reasoning_effort的映射表
// TODO: refactor this. too many identical options // TODO: refactor this. too many identical options
@ -161,7 +161,13 @@ function _isSupportedThinkingTokenModel(model: Model): boolean {
'nvidia', 'nvidia',
'ppio', 'ppio',
'hunyuan', 'hunyuan',
'tencent-cloud-ti' 'tencent-cloud-ti',
'deepseek',
'cherryin',
'new-api',
'aihubmix',
'sophnet',
'dmxapi'
] satisfies SystemProviderId[] ] satisfies SystemProviderId[]
).some((id) => id === model.provider) ).some((id) => id === model.provider)
} }
@ -462,15 +468,19 @@ export const isSupportedThinkingTokenZhipuModel = (model: Model): boolean => {
export const isDeepSeekHybridInferenceModel = (model: Model) => { export const isDeepSeekHybridInferenceModel = (model: Model) => {
const { idResult, nameResult } = withModelIdAndNameAsId(model, (model) => { const { idResult, nameResult } = withModelIdAndNameAsId(model, (model) => {
const modelId = getLowerBaseModelName(model.id) const modelId = getLowerBaseModelName(model.id)
// deepseek官方使用chat和reasoner做推理控制其他provider需要单独判断id可能会有所差别
// openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型这里有风险 // openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型这里有风险
// 这里假定所有deepseek-chat都是deepseek-v3.2
// Matches: "deepseek-v3" followed by ".digit" or "-digit". // Matches: "deepseek-v3" followed by ".digit" or "-digit".
// Optionally, this can be followed by ".alphanumeric_sequence" or "-alphanumeric_sequence" // Optionally, this can be followed by ".alphanumeric_sequence" or "-alphanumeric_sequence"
// until the end of the string. // until the end of the string.
// Examples: deepseek-v3.1, deepseek-v3-1, deepseek-v3.1.2, deepseek-v3.1-alpha // Examples: deepseek-v3.1, deepseek-v3-1, deepseek-v3.1.2, deepseek-v3.1-alpha
// Does NOT match: deepseek-v3.123 (missing separator after '1'), deepseek-v3.x (x isn't a digit) // Does NOT match: deepseek-v3.123 (missing separator after '1'), deepseek-v3.x (x isn't a digit)
// TODO: move to utils and add test cases // TODO: move to utils and add test cases
return /deepseek-v3(?:\.\d|-\d)(?:(\.|-)\w+)?$/.test(modelId) || modelId.includes('deepseek-chat-v3.1') return (
/(\w+-)?deepseek-v3(?:\.\d|-\d)(?:(\.|-)(?!speciale$)\w+)?$/.test(modelId) ||
modelId.includes('deepseek-chat-v3.1') ||
modelId.includes('deepseek-chat')
)
}) })
return idResult || nameResult return idResult || nameResult
} }
@ -545,7 +555,8 @@ export function isReasoningModel(model?: Model): boolean {
isMiniMaxReasoningModel(model) || isMiniMaxReasoningModel(model) ||
modelId.includes('magistral') || modelId.includes('magistral') ||
modelId.includes('pangu-pro-moe') || modelId.includes('pangu-pro-moe') ||
modelId.includes('seed-oss') modelId.includes('seed-oss') ||
modelId.includes('deepseek-v3.2-speciale')
) { ) {
return true return true
} }
@ -596,3 +607,17 @@ export const findTokenLimit = (modelId: string): { min: number; max: number } |
} }
return undefined return undefined
} }
/**
* Determines if a model is a fixed reasoning model.
*
* A model is considered a fixed reasoning model if it meets all of the following criteria:
* - It is a reasoning model
* - It does NOT support thinking tokens
* - It does NOT support reasoning effort
*
* @param model - The model to check
* @returns `true` if the model is a fixed reasoning model, `false` otherwise
*/
export const isFixedReasoningModel = (model: Model) =>
isReasoningModel(model) && !isSupportedThinkingTokenModel(model) && !isSupportedReasoningEffortModel(model)

View File

@ -46,7 +46,8 @@ const FUNCTION_CALLING_EXCLUDED_MODELS = [
'glm-4\\.5v', 'glm-4\\.5v',
'gemini-2.5-flash-image(?:-[\\w-]+)?', 'gemini-2.5-flash-image(?:-[\\w-]+)?',
'gemini-2.0-flash-preview-image-generation', 'gemini-2.0-flash-preview-image-generation',
'gemini-3(?:\\.\\d+)?-pro-image(?:-[\\w-]+)?' 'gemini-3(?:\\.\\d+)?-pro-image(?:-[\\w-]+)?',
'deepseek-v3.2-speciale'
] ]
export const FUNCTION_CALLING_REGEX = new RegExp( export const FUNCTION_CALLING_REGEX = new RegExp(
@ -85,10 +86,6 @@ export function isFunctionCallingModel(model?: Model): boolean {
} }
} }
if (['deepseek', 'anthropic', 'kimi', 'moonshot'].includes(model.provider)) {
return true
}
// 2025/08/26 百炼与火山引擎均不支持 v3.1 函数调用 // 2025/08/26 百炼与火山引擎均不支持 v3.1 函数调用
// 先默认支持 // 先默认支持
if (isDeepSeekHybridInferenceModel(model)) { if (isDeepSeekHybridInferenceModel(model)) {

View File

@ -11,7 +11,8 @@ import {
isGPT51SeriesModel, isGPT51SeriesModel,
isOpenAIChatCompletionOnlyModel, isOpenAIChatCompletionOnlyModel,
isOpenAIOpenWeightModel, isOpenAIOpenWeightModel,
isOpenAIReasoningModel isOpenAIReasoningModel,
isSupportVerbosityModel
} from './openai' } from './openai'
import { isQwenMTModel } from './qwen' import { isQwenMTModel } from './qwen'
import { isFunctionCallingModel } from './tooluse' import { isFunctionCallingModel } from './tooluse'
@ -156,10 +157,10 @@ const MODEL_SUPPORTED_VERBOSITY: readonly {
* For GPT-5-pro, only 'high' is supported; for other GPT-5 models, 'low', 'medium', and 'high' are supported. * For GPT-5-pro, only 'high' is supported; for other GPT-5 models, 'low', 'medium', and 'high' are supported.
* For GPT-5.1 series models, 'low', 'medium', and 'high' are supported. * For GPT-5.1 series models, 'low', 'medium', and 'high' are supported.
* @param model - The model to check * @param model - The model to check
* @returns An array of supported verbosity levels, always including `undefined` as the first element * @returns An array of supported verbosity levels, always including `undefined` as the first element and `null` when applicable
*/ */
export const getModelSupportedVerbosity = (model: Model | undefined | null): OpenAIVerbosity[] => { export const getModelSupportedVerbosity = (model: Model | undefined | null): OpenAIVerbosity[] => {
if (!model) { if (!model || !isSupportVerbosityModel(model)) {
return [undefined] return [undefined]
} }
@ -167,7 +168,7 @@ export const getModelSupportedVerbosity = (model: Model | undefined | null): Ope
for (const { validator, values } of MODEL_SUPPORTED_VERBOSITY) { for (const { validator, values } of MODEL_SUPPORTED_VERBOSITY) {
if (validator(model)) { if (validator(model)) {
supportedValues = [...values] supportedValues = [null, ...values]
break break
} }
} }
@ -180,6 +181,11 @@ export const isGeminiModel = (model: Model) => {
return modelId.includes('gemini') return modelId.includes('gemini')
} }
export const isGrokModel = (model: Model) => {
const modelId = getLowerBaseModelName(model.id)
return modelId.includes('grok')
}
// zhipu 视觉推理模型用这组 special token 标记推理结果 // zhipu 视觉推理模型用这组 special token 标记推理结果
export const ZHIPU_RESULT_TOKENS = ['<|begin_of_box|>', '<|end_of_box|>'] as const export const ZHIPU_RESULT_TOKENS = ['<|begin_of_box|>', '<|end_of_box|>'] as const

View File

@ -53,7 +53,10 @@ const visionAllowedModels = [
'llama-4(?:-[\\w-]+)?', 'llama-4(?:-[\\w-]+)?',
'step-1o(?:.*vision)?', 'step-1o(?:.*vision)?',
'step-1v(?:-[\\w-]+)?', 'step-1v(?:-[\\w-]+)?',
'qwen-omni(?:-[\\w-]+)?' 'qwen-omni(?:-[\\w-]+)?',
'mistral-large-(2512|latest)',
'mistral-medium-(2508|latest)',
'mistral-small-(2506|latest)'
] ]
const visionExcludedModels = [ const visionExcludedModels = [

View File

@ -676,10 +676,10 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
isSystem: true, isSystem: true,
enabled: false enabled: false
}, },
'ai-gateway': { gateway: {
id: 'ai-gateway', id: 'gateway',
name: 'AI Gateway', name: 'Vercel AI Gateway',
type: 'ai-gateway', type: 'gateway',
apiKey: '', apiKey: '',
apiHost: 'https://ai-gateway.vercel.sh/v1/ai', apiHost: 'https://ai-gateway.vercel.sh/v1/ai',
models: [], models: [],
@ -762,7 +762,7 @@ export const PROVIDER_LOGO_MAP: AtLeast<SystemProviderId, string> = {
longcat: LongCatProviderLogo, longcat: LongCatProviderLogo,
huggingface: HuggingfaceProviderLogo, huggingface: HuggingfaceProviderLogo,
sophnet: SophnetProviderLogo, sophnet: SophnetProviderLogo,
'ai-gateway': AIGatewayProviderLogo, gateway: AIGatewayProviderLogo,
cerebras: CerebrasProviderLogo cerebras: CerebrasProviderLogo
} as const } as const
@ -927,7 +927,7 @@ export const PROVIDER_URLS: Record<SystemProviderId, ProviderUrls> = {
websites: { websites: {
official: 'https://www.dmxapi.cn/register?aff=bwwY', official: 'https://www.dmxapi.cn/register?aff=bwwY',
apiKey: 'https://www.dmxapi.cn/register?aff=bwwY', apiKey: 'https://www.dmxapi.cn/register?aff=bwwY',
docs: 'https://dmxapi.cn/models.html#code-block', docs: 'https://doc.dmxapi.cn/',
models: 'https://www.dmxapi.cn/pricing' models: 'https://www.dmxapi.cn/pricing'
} }
}, },
@ -1413,7 +1413,7 @@ export const PROVIDER_URLS: Record<SystemProviderId, ProviderUrls> = {
models: 'https://huggingface.co/models' models: 'https://huggingface.co/models'
} }
}, },
'ai-gateway': { gateway: {
api: { api: {
url: 'https://ai-gateway.vercel.sh/v1/ai' url: 'https://ai-gateway.vercel.sh/v1/ai'
}, },

View File

@ -51,7 +51,7 @@ export function useTextareaResize(options: UseTextareaResizeOptions = {}): UseTe
const { maxHeight = 400, minHeight = 30, autoResize = true } = options const { maxHeight = 400, minHeight = 30, autoResize = true } = options
const textareaRef = useRef<TextAreaRef>(null) const textareaRef = useRef<TextAreaRef>(null)
const [customHeight, setCustomHeight] = useState<number>() const [customHeight, setCustomHeight] = useState<number | undefined>(undefined)
const [isExpanded, setIsExpanded] = useState(false) const [isExpanded, setIsExpanded] = useState(false)
const resize = useCallback( const resize = useCallback(

View File

@ -87,7 +87,7 @@ const providerKeyMap = {
longcat: 'provider.longcat', longcat: 'provider.longcat',
huggingface: 'provider.huggingface', huggingface: 'provider.huggingface',
sophnet: 'provider.sophnet', sophnet: 'provider.sophnet',
'ai-gateway': 'provider.ai-gateway', gateway: 'provider.ai-gateway',
cerebras: 'provider.cerebras' cerebras: 'provider.cerebras'
} as const } as const

View File

@ -2531,7 +2531,7 @@
}, },
"provider": { "provider": {
"302ai": "302.AI", "302ai": "302.AI",
"ai-gateway": "AI Gateway", "ai-gateway": "Vercel AI Gateway",
"aihubmix": "AiHubMix", "aihubmix": "AiHubMix",
"aionly": "AiOnly", "aionly": "AiOnly",
"alayanew": "Alaya NeW", "alayanew": "Alaya NeW",

View File

@ -2531,7 +2531,7 @@
}, },
"provider": { "provider": {
"302ai": "302.AI", "302ai": "302.AI",
"ai-gateway": "AI Gateway", "ai-gateway": "Vercel AI Gateway",
"aihubmix": "AiHubMix", "aihubmix": "AiHubMix",
"aionly": "唯一AI (AiOnly)", "aionly": "唯一AI (AiOnly)",
"alayanew": "Alaya NeW", "alayanew": "Alaya NeW",

View File

@ -177,8 +177,10 @@ const AgentSessionInputbarInner: FC<InnerProps> = ({ assistant, agentId, session
resize: resizeTextArea, resize: resizeTextArea,
focus: focusTextarea, focus: focusTextarea,
setExpanded, setExpanded,
isExpanded: textareaIsExpanded isExpanded: textareaIsExpanded,
} = useTextareaResize({ maxHeight: 400, minHeight: 30 }) customHeight,
setCustomHeight
} = useTextareaResize({ maxHeight: 500, minHeight: 30 })
const { sendMessageShortcut, apiServer } = useSettings() const { sendMessageShortcut, apiServer } = useSettings()
const { t } = useTranslation() const { t } = useTranslation()
@ -474,6 +476,8 @@ const AgentSessionInputbarInner: FC<InnerProps> = ({ assistant, agentId, session
text={text} text={text}
onTextChange={setText} onTextChange={setText}
textareaRef={textareaRef} textareaRef={textareaRef}
height={customHeight}
onHeightChange={setCustomHeight}
resizeTextArea={resizeTextArea} resizeTextArea={resizeTextArea}
focusTextarea={focusTextarea} focusTextarea={focusTextarea}
placeholder={placeholderText} placeholder={placeholderText}

View File

@ -143,9 +143,11 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
resize: resizeTextArea, resize: resizeTextArea,
focus: focusTextarea, focus: focusTextarea,
setExpanded, setExpanded,
isExpanded: textareaIsExpanded isExpanded: textareaIsExpanded,
customHeight,
setCustomHeight
} = useTextareaResize({ } = useTextareaResize({
maxHeight: 400, maxHeight: 500,
minHeight: 30 minHeight: 30
}) })
@ -257,7 +259,7 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
setText('') setText('')
setFiles([]) setFiles([])
setTimeoutTimer('sendMessage_1', () => setText(''), 500) setTimeoutTimer('sendMessage_1', () => setText(''), 500)
setTimeoutTimer('sendMessage_2', () => resizeTextArea(true), 0) setTimeoutTimer('sendMessage_2', () => resizeTextArea(), 0)
} catch (error) { } catch (error) {
logger.warn('Failed to send message:', error as Error) logger.warn('Failed to send message:', error as Error)
parent?.recordException(error as Error) parent?.recordException(error as Error)
@ -478,6 +480,8 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
text={text} text={text}
onTextChange={setText} onTextChange={setText}
textareaRef={textareaRef} textareaRef={textareaRef}
height={customHeight}
onHeightChange={setCustomHeight}
resizeTextArea={resizeTextArea} resizeTextArea={resizeTextArea}
focusTextarea={focusTextarea} focusTextarea={focusTextarea}
isLoading={loading} isLoading={loading}

View File

@ -1,4 +1,5 @@
import type { FC } from 'react' import type { FC, KeyboardEvent } from 'react'
import { useTranslation } from 'react-i18next'
interface Props { interface Props {
disabled: boolean disabled: boolean
@ -6,10 +7,24 @@ interface Props {
} }
const SendMessageButton: FC<Props> = ({ disabled, sendMessage }) => { const SendMessageButton: FC<Props> = ({ disabled, sendMessage }) => {
const { t } = useTranslation()
const handleKeyDown = (e: KeyboardEvent<HTMLElement>) => {
if (!disabled && (e.key === 'Enter' || e.key === ' ')) {
e.preventDefault()
sendMessage()
}
}
return ( return (
<i <i
className="iconfont icon-ic_send" className="iconfont icon-ic_send"
onClick={sendMessage} onClick={disabled ? undefined : sendMessage}
onKeyDown={handleKeyDown}
role="button"
aria-label={t('chat.input.send')}
aria-disabled={disabled}
tabIndex={disabled ? -1 : 0}
style={{ style={{
cursor: disabled ? 'not-allowed' : 'pointer', cursor: disabled ? 'not-allowed' : 'pointer',
color: disabled ? 'var(--color-text-3)' : 'var(--color-primary)', color: disabled ? 'var(--color-text-3)' : 'var(--color-primary)',

View File

@ -50,6 +50,9 @@ export interface InputbarCoreProps {
resizeTextArea: (force?: boolean) => void resizeTextArea: (force?: boolean) => void
focusTextarea: () => void focusTextarea: () => void
height: number | undefined
onHeightChange: (height: number) => void
supportedExts: string[] supportedExts: string[]
isLoading: boolean isLoading: boolean
@ -104,6 +107,8 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
textareaRef, textareaRef,
resizeTextArea, resizeTextArea,
focusTextarea, focusTextarea,
height,
onHeightChange,
supportedExts, supportedExts,
isLoading, isLoading,
onPause, onPause,
@ -131,8 +136,6 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
} = useSettings() } = useSettings()
const quickPanelTriggersEnabled = forceEnableQuickPanelTriggers ?? enableQuickPanelTriggers const quickPanelTriggersEnabled = forceEnableQuickPanelTriggers ?? enableQuickPanelTriggers
const [textareaHeight, setTextareaHeight] = useState<number>()
const { t } = useTranslation() const { t } = useTranslation()
const [isTranslating, setIsTranslating] = useState(false) const [isTranslating, setIsTranslating] = useState(false)
const { getLanguageByLangcode } = useTranslate() const { getLanguageByLangcode } = useTranslate()
@ -178,8 +181,10 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
enabled: config.enableDragDrop, enabled: config.enableDragDrop,
t t
}) })
// 判断是否可以发送:文本不为空或有文件 // 判断是否有内容:文本不为空或有文件
const cannotSend = isEmpty && files.length === 0 const noContent = isEmpty && files.length === 0
// 发送入口统一禁用条件:空内容、正在生成、全局搜索态
const isSendDisabled = noContent || isLoading || searching
useEffect(() => { useEffect(() => {
setExtensions(supportedExts) setExtensions(supportedExts)
@ -310,7 +315,7 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
const isEnterPressed = event.key === 'Enter' && !event.nativeEvent.isComposing const isEnterPressed = event.key === 'Enter' && !event.nativeEvent.isComposing
if (isEnterPressed) { if (isEnterPressed) {
if (isSendMessageKeyPressed(event, sendMessageShortcut) && !cannotSend) { if (isSendMessageKeyPressed(event, sendMessageShortcut) && !isSendDisabled) {
handleSendMessage() handleSendMessage()
event.preventDefault() event.preventDefault()
return return
@ -356,7 +361,7 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
translate, translate,
handleToggleExpanded, handleToggleExpanded,
sendMessageShortcut, sendMessageShortcut,
cannotSend, isSendDisabled,
handleSendMessage, handleSendMessage,
setText, setText,
setTimeoutTimer, setTimeoutTimer,
@ -538,8 +543,8 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
const handleMouseMove = (e: MouseEvent) => { const handleMouseMove = (e: MouseEvent) => {
const deltaY = startDragY.current - e.clientY const deltaY = startDragY.current - e.clientY
const newHeight = Math.max(40, Math.min(400, startHeight.current + deltaY)) const newHeight = Math.max(40, Math.min(500, startHeight.current + deltaY))
setTextareaHeight(newHeight) onHeightChange(newHeight)
} }
const handleMouseUp = () => { const handleMouseUp = () => {
@ -550,7 +555,7 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
document.addEventListener('mousemove', handleMouseMove) document.addEventListener('mousemove', handleMouseMove)
document.addEventListener('mouseup', handleMouseUp) document.addEventListener('mouseup', handleMouseUp)
}, },
[config.enableDragDrop, setTextareaHeight, textareaRef] [config.enableDragDrop, onHeightChange, textareaRef]
) )
const onQuote = useCallback( const onQuote = useCallback(
@ -617,7 +622,7 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
const rightSectionExtras = useMemo(() => { const rightSectionExtras = useMemo(() => {
const extras: React.ReactNode[] = [] const extras: React.ReactNode[] = []
extras.push(<TranslateButton key="translate" text={text} onTranslated={onTranslated} isLoading={isTranslating} />) extras.push(<TranslateButton key="translate" text={text} onTranslated={onTranslated} isLoading={isTranslating} />)
extras.push(<SendMessageButton sendMessage={handleSendMessage} disabled={cannotSend || isLoading || searching} />) extras.push(<SendMessageButton sendMessage={handleSendMessage} disabled={isSendDisabled} />)
if (isLoading) { if (isLoading) {
extras.push( extras.push(
@ -630,7 +635,7 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
} }
return <>{extras}</> return <>{extras}</>
}, [text, onTranslated, isTranslating, handleSendMessage, cannotSend, isLoading, searching, t, onPause]) }, [text, onTranslated, isTranslating, handleSendMessage, isSendDisabled, isLoading, t, onPause])
const quickPanelElement = config.enableQuickPanel ? <QuickPanelView setInputText={setText} /> : null const quickPanelElement = config.enableQuickPanel ? <QuickPanelView setInputText={setText} /> : null
@ -667,11 +672,11 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
variant="borderless" variant="borderless"
spellCheck={enableSpellCheck} spellCheck={enableSpellCheck}
rows={2} rows={2}
autoSize={textareaHeight ? false : { minRows: 2, maxRows: 20 }} autoSize={height ? false : { minRows: 2, maxRows: 20 }}
styles={{ textarea: TextareaStyle }} styles={{ textarea: TextareaStyle }}
style={{ style={{
fontSize, fontSize,
height: textareaHeight, height: height,
minHeight: '30px' minHeight: '30px'
}} }}
disabled={isTranslating || searching} disabled={isTranslating || searching}

View File

@ -31,7 +31,7 @@ const ActivityDirectoryButton: FC<Props> = ({ quickPanel, quickPanelController,
return ( return (
<Tooltip placement="top" title={t('chat.input.activity_directory.title')} mouseLeaveDelay={0} arrow> <Tooltip placement="top" title={t('chat.input.activity_directory.title')} mouseLeaveDelay={0} arrow>
<ActionIconButton onClick={handleOpenQuickPanel}> <ActionIconButton onClick={handleOpenQuickPanel} aria-label={t('chat.input.activity_directory.title')}>
<FolderOpen size={18} /> <FolderOpen size={18} />
</ActionIconButton> </ActionIconButton>
</Tooltip> </Tooltip>

View File

@ -152,13 +152,15 @@ const AttachmentButton: FC<Props> = ({ quickPanel, couldAddImageFile, extensions
} }
}, [couldAddImageFile, openQuickPanel, quickPanel, t]) }, [couldAddImageFile, openQuickPanel, quickPanel, t])
const ariaLabel = couldAddImageFile ? t('chat.input.upload.image_or_document') : t('chat.input.upload.document')
return ( return (
<Tooltip <Tooltip placement="top" title={ariaLabel} mouseLeaveDelay={0} arrow>
placement="top" <ActionIconButton
title={couldAddImageFile ? t('chat.input.upload.image_or_document') : t('chat.input.upload.document')} onClick={openFileSelectDialog}
mouseLeaveDelay={0} active={files.length > 0}
arrow> disabled={disabled}
<ActionIconButton onClick={openFileSelectDialog} active={files.length > 0} disabled={disabled}> aria-label={ariaLabel}>
<Paperclip size={18} /> <Paperclip size={18} />
</ActionIconButton> </ActionIconButton>
</Tooltip> </Tooltip>

View File

@ -15,18 +15,18 @@ interface Props {
const GenerateImageButton: FC<Props> = ({ model, assistant, onEnableGenerateImage }) => { const GenerateImageButton: FC<Props> = ({ model, assistant, onEnableGenerateImage }) => {
const { t } = useTranslation() const { t } = useTranslation()
const ariaLabel = isGenerateImageModel(model)
? t('chat.input.generate_image')
: t('chat.input.generate_image_not_supported')
return ( return (
<Tooltip <Tooltip placement="top" title={ariaLabel} mouseLeaveDelay={0} arrow>
placement="top"
title={
isGenerateImageModel(model) ? t('chat.input.generate_image') : t('chat.input.generate_image_not_supported')
}
mouseLeaveDelay={0}
arrow>
<ActionIconButton <ActionIconButton
onClick={onEnableGenerateImage} onClick={onEnableGenerateImage}
active={assistant.enableGenerateImage} active={assistant.enableGenerateImage}
disabled={!isGenerateImageModel(model)}> disabled={!isGenerateImageModel(model)}
aria-label={ariaLabel}
aria-pressed={assistant.enableGenerateImage}>
<Image size={18} /> <Image size={18} />
</ActionIconButton> </ActionIconButton>
</Tooltip> </Tooltip>

View File

@ -124,7 +124,8 @@ const KnowledgeBaseButton: FC<Props> = ({ quickPanel, selectedBases, onSelect, d
<ActionIconButton <ActionIconButton
onClick={handleOpenQuickPanel} onClick={handleOpenQuickPanel}
active={selectedBases && selectedBases.length > 0} active={selectedBases && selectedBases.length > 0}
disabled={disabled}> disabled={disabled}
aria-label={t('chat.input.knowledge_base')}>
<FileSearch size={18} /> <FileSearch size={18} />
</ActionIconButton> </ActionIconButton>
</Tooltip> </Tooltip>

View File

@ -516,7 +516,10 @@ const MCPToolsButton: FC<Props> = ({ quickPanel, setInputValue, resizeTextArea,
return ( return (
<Tooltip placement="top" title={t('settings.mcp.title')} mouseLeaveDelay={0} arrow> <Tooltip placement="top" title={t('settings.mcp.title')} mouseLeaveDelay={0} arrow>
<ActionIconButton onClick={handleOpenQuickPanel} active={assistant.mcpServers && assistant.mcpServers.length > 0}> <ActionIconButton
onClick={handleOpenQuickPanel}
active={assistant.mcpServers && assistant.mcpServers.length > 0}
aria-label={t('settings.mcp.title')}>
<Hammer size={18} /> <Hammer size={18} />
</ActionIconButton> </ActionIconButton>
</Tooltip> </Tooltip>

View File

@ -46,7 +46,10 @@ const MentionModelsButton: FC<Props> = ({
return ( return (
<Tooltip placement="top" title={t('assistants.presets.edit.model.select.title')} mouseLeaveDelay={0} arrow> <Tooltip placement="top" title={t('assistants.presets.edit.model.select.title')} mouseLeaveDelay={0} arrow>
<ActionIconButton onClick={handleOpenQuickPanel} active={mentionedModels.length > 0}> <ActionIconButton
onClick={handleOpenQuickPanel}
active={mentionedModels.length > 0}
aria-label={t('assistants.presets.edit.model.select.title')}>
<AtSign size={18} /> <AtSign size={18} />
</ActionIconButton> </ActionIconButton>
</Tooltip> </Tooltip>

View File

@ -20,7 +20,9 @@ const NewContextButton: FC<Props> = ({ onNewContext }) => {
title={t('chat.input.new.context', { Command: newContextShortcut })} title={t('chat.input.new.context', { Command: newContextShortcut })}
mouseLeaveDelay={0} mouseLeaveDelay={0}
arrow> arrow>
<ActionIconButton onClick={onNewContext}> <ActionIconButton
onClick={onNewContext}
aria-label={t('chat.input.new.context', { Command: newContextShortcut })}>
<Eraser size={18} /> <Eraser size={18} />
</ActionIconButton> </ActionIconButton>
</Tooltip> </Tooltip>

View File

@ -250,7 +250,7 @@ const QuickPhrasesButton = ({ quickPanel, setInputValue, resizeTextArea, assista
return ( return (
<> <>
<Tooltip placement="top" title={t('settings.quickPhrase.title')} mouseLeaveDelay={0} arrow> <Tooltip placement="top" title={t('settings.quickPhrase.title')} mouseLeaveDelay={0} arrow>
<ActionIconButton onClick={handleOpenQuickPanel}> <ActionIconButton onClick={handleOpenQuickPanel} aria-label={t('settings.quickPhrase.title')}>
<Zap size={18} /> <Zap size={18} />
</ActionIconButton> </ActionIconButton>
</Tooltip> </Tooltip>

View File

@ -37,7 +37,11 @@ const SlashCommandsButton: FC<Props> = ({ quickPanelController, session, openPan
return ( return (
<Tooltip placement="top" title={t('chat.input.slash_commands.title')} mouseLeaveDelay={0} arrow> <Tooltip placement="top" title={t('chat.input.slash_commands.title')} mouseLeaveDelay={0} arrow>
<ActionIconButton onClick={handleOpenQuickPanel} active={isActive} disabled={!hasCommands}> <ActionIconButton
onClick={handleOpenQuickPanel}
active={isActive}
disabled={!hasCommands}
aria-label={t('chat.input.slash_commands.title')}>
<Terminal size={18} /> <Terminal size={18} />
</ActionIconButton> </ActionIconButton>
</Tooltip> </Tooltip>

View File

@ -11,6 +11,7 @@ import { QuickPanelReservedSymbol, useQuickPanel } from '@renderer/components/Qu
import { import {
getThinkModelType, getThinkModelType,
isDoubaoThinkingAutoModel, isDoubaoThinkingAutoModel,
isFixedReasoningModel,
isGPT5SeriesReasoningModel, isGPT5SeriesReasoningModel,
isOpenAIWebSearchModel, isOpenAIWebSearchModel,
MODEL_SUPPORTED_OPTIONS MODEL_SUPPORTED_OPTIONS
@ -42,6 +43,8 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
// 确定当前模型支持的选项类型 // 确定当前模型支持的选项类型
const modelType = useMemo(() => getThinkModelType(model), [model]) const modelType = useMemo(() => getThinkModelType(model), [model])
const isFixedReasoning = isFixedReasoningModel(model)
// 获取当前模型支持的选项 // 获取当前模型支持的选项
const supportedOptions: ThinkingOption[] = useMemo(() => { const supportedOptions: ThinkingOption[] = useMemo(() => {
if (modelType === 'doubao') { if (modelType === 'doubao') {
@ -111,6 +114,8 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
}, [quickPanelHook, panelItems, t]) }, [quickPanelHook, panelItems, t])
const handleOpenQuickPanel = useCallback(() => { const handleOpenQuickPanel = useCallback(() => {
if (isFixedReasoning) return
if (quickPanelHook.isVisible && quickPanelHook.symbol === QuickPanelReservedSymbol.Thinking) { if (quickPanelHook.isVisible && quickPanelHook.symbol === QuickPanelReservedSymbol.Thinking) {
quickPanelHook.close() quickPanelHook.close()
return return
@ -121,9 +126,11 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
return return
} }
openQuickPanel() openQuickPanel()
}, [openQuickPanel, quickPanelHook, isThinkingEnabled, supportedOptions, disableThinking]) }, [openQuickPanel, quickPanelHook, isThinkingEnabled, supportedOptions, disableThinking, isFixedReasoning])
useEffect(() => { useEffect(() => {
if (isFixedReasoning) return
const disposeMenu = quickPanel.registerRootMenu([ const disposeMenu = quickPanel.registerRootMenu([
{ {
label: t('assistants.settings.reasoning_effort.label'), label: t('assistants.settings.reasoning_effort.label'),
@ -140,19 +147,22 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
disposeMenu() disposeMenu()
disposeTrigger() disposeTrigger()
} }
}, [currentReasoningEffort, openQuickPanel, quickPanel, t]) }, [currentReasoningEffort, openQuickPanel, quickPanel, t, isFixedReasoning])
const ariaLabel = isFixedReasoning
? t('chat.input.thinking.label')
: isThinkingEnabled && supportedOptions.includes('none')
? t('common.close')
: t('assistants.settings.reasoning_effort.label')
return ( return (
<Tooltip <Tooltip placement="top" title={ariaLabel} mouseLeaveDelay={0} arrow>
placement="top" <ActionIconButton
title={ onClick={handleOpenQuickPanel}
isThinkingEnabled && supportedOptions.includes('none') active={isFixedReasoning || currentReasoningEffort !== 'none'}
? t('common.close') aria-label={ariaLabel}
: t('assistants.settings.reasoning_effort.label') aria-pressed={currentReasoningEffort !== 'none'}
} style={isFixedReasoning ? { cursor: 'default' } : undefined}>
mouseLeaveDelay={0}
arrow>
<ActionIconButton onClick={handleOpenQuickPanel} active={currentReasoningEffort !== 'none'}>
{ThinkingIcon(currentReasoningEffort)} {ThinkingIcon(currentReasoningEffort)}
</ActionIconButton> </ActionIconButton>
</Tooltip> </Tooltip>

View File

@ -48,7 +48,11 @@ const UrlContextButton: FC<Props> = ({ assistantId }) => {
return ( return (
<Tooltip placement="top" title={t('chat.input.url_context')} arrow> <Tooltip placement="top" title={t('chat.input.url_context')} arrow>
<ActionIconButton onClick={handleToggle} active={assistant.enableUrlContext}> <ActionIconButton
onClick={handleToggle}
active={assistant.enableUrlContext}
aria-label={t('chat.input.url_context')}
aria-pressed={assistant.enableUrlContext}>
<Link size={18} /> <Link size={18} />
</ActionIconButton> </ActionIconButton>
</Tooltip> </Tooltip>

View File

@ -25,13 +25,15 @@ const WebSearchButton: FC<Props> = ({ quickPanelController, assistantId }) => {
} }
}, [enableWebSearch, toggleQuickPanel, updateWebSearchProvider]) }, [enableWebSearch, toggleQuickPanel, updateWebSearchProvider])
const ariaLabel = enableWebSearch ? t('common.close') : t('chat.input.web_search.label')
return ( return (
<Tooltip <Tooltip placement="top" title={ariaLabel} mouseLeaveDelay={0} arrow>
placement="top" <ActionIconButton
title={enableWebSearch ? t('common.close') : t('chat.input.web_search.label')} onClick={onClick}
mouseLeaveDelay={0} active={!!enableWebSearch}
arrow> aria-label={ariaLabel}
<ActionIconButton onClick={onClick} active={!!enableWebSearch}> aria-pressed={!!enableWebSearch}>
<WebSearchProviderIcon pid={selectedProviderId} /> <WebSearchProviderIcon pid={selectedProviderId} />
</ActionIconButton> </ActionIconButton>
</Tooltip> </Tooltip>

View File

@ -1,4 +1,4 @@
import { isSupportedReasoningEffortModel, isSupportedThinkingTokenModel } from '@renderer/config/models' import { isReasoningModel } from '@renderer/config/models'
import ThinkingButton from '@renderer/pages/home/Inputbar/tools/components/ThinkingButton' import ThinkingButton from '@renderer/pages/home/Inputbar/tools/components/ThinkingButton'
import { defineTool, registerTool, TopicType } from '@renderer/pages/home/Inputbar/types' import { defineTool, registerTool, TopicType } from '@renderer/pages/home/Inputbar/types'
@ -6,7 +6,7 @@ const thinkingTool = defineTool({
key: 'thinking', key: 'thinking',
label: (t) => t('chat.input.thinking.label'), label: (t) => t('chat.input.thinking.label'),
visibleInScopes: [TopicType.Chat], visibleInScopes: [TopicType.Chat],
condition: ({ model }) => isSupportedThinkingTokenModel(model) || isSupportedReasoningEffortModel(model), condition: ({ model }) => isReasoningModel(model),
render: ({ assistant, model, quickPanel }) => ( render: ({ assistant, model, quickPanel }) => (
<ThinkingButton quickPanel={quickPanel} model={model} assistantId={assistant.id} /> <ThinkingButton quickPanel={quickPanel} model={model} assistantId={assistant.id} />
) )

View File

@ -24,12 +24,12 @@ import { useTranslation } from 'react-i18next'
import { useSelector } from 'react-redux' import { useSelector } from 'react-redux'
type VerbosityOption = { type VerbosityOption = {
value: NonNullable<OpenAIVerbosity> | 'undefined' value: NonNullable<OpenAIVerbosity> | 'undefined' | 'null'
label: string label: string
} }
type SummaryTextOption = { type SummaryTextOption = {
value: NonNullable<OpenAISummaryText> | 'undefined' value: NonNullable<OpenAISummaryText> | 'undefined' | 'null'
label: string label: string
} }
@ -85,6 +85,10 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
value: 'undefined', value: 'undefined',
label: t('common.ignore') label: t('common.ignore')
}, },
{
value: 'null',
label: t('common.off')
},
{ {
value: 'auto', value: 'auto',
label: t('settings.openai.summary_text_mode.auto') label: t('settings.openai.summary_text_mode.auto')
@ -105,6 +109,10 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
value: 'undefined', value: 'undefined',
label: t('common.ignore') label: t('common.ignore')
}, },
{
value: 'null',
label: t('common.off')
},
{ {
value: 'low', value: 'low',
label: t('settings.openai.verbosity.low') label: t('settings.openai.verbosity.low')
@ -203,9 +211,9 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
</Tooltip> </Tooltip>
</SettingRowTitleSmall> </SettingRowTitleSmall>
<Selector <Selector
value={summaryText} value={toOptionValue(summaryText)}
onChange={(value) => { onChange={(value) => {
setSummaryText(value as OpenAISummaryText) setSummaryText(toRealValue(value))
}} }}
options={summaryTextOptions} options={summaryTextOptions}
/> />
@ -222,9 +230,9 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
</Tooltip> </Tooltip>
</SettingRowTitleSmall> </SettingRowTitleSmall>
<Selector <Selector
value={verbosity} value={toOptionValue(verbosity)}
onChange={(value) => { onChange={(value) => {
setVerbosity(value as OpenAIVerbosity) setVerbosity(toRealValue(value))
}} }}
options={verbosityOptions} options={verbosityOptions}
/> />

View File

@ -4,6 +4,7 @@ import ObsidianExportPopup from '@renderer/components/Popups/ObsidianExportPopup
import PromptPopup from '@renderer/components/Popups/PromptPopup' import PromptPopup from '@renderer/components/Popups/PromptPopup'
import SaveToKnowledgePopup from '@renderer/components/Popups/SaveToKnowledgePopup' import SaveToKnowledgePopup from '@renderer/components/Popups/SaveToKnowledgePopup'
import { isMac } from '@renderer/config/constant' import { isMac } from '@renderer/config/constant'
import { db } from '@renderer/databases'
import { useAssistant, useAssistants } from '@renderer/hooks/useAssistant' import { useAssistant, useAssistants } from '@renderer/hooks/useAssistant'
import { useInPlaceEdit } from '@renderer/hooks/useInPlaceEdit' import { useInPlaceEdit } from '@renderer/hooks/useInPlaceEdit'
import { useNotesSettings } from '@renderer/hooks/useNotesSettings' import { useNotesSettings } from '@renderer/hooks/useNotesSettings'
@ -11,6 +12,7 @@ import { modelGenerating } from '@renderer/hooks/useRuntime'
import { useSettings } from '@renderer/hooks/useSettings' import { useSettings } from '@renderer/hooks/useSettings'
import { finishTopicRenaming, startTopicRenaming, TopicManager } from '@renderer/hooks/useTopic' import { finishTopicRenaming, startTopicRenaming, TopicManager } from '@renderer/hooks/useTopic'
import { fetchMessagesSummary } from '@renderer/services/ApiService' import { fetchMessagesSummary } from '@renderer/services/ApiService'
import { getDefaultTopic } from '@renderer/services/AssistantService'
import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService' import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService'
import type { RootState } from '@renderer/store' import type { RootState } from '@renderer/store'
import store from '@renderer/store' import store from '@renderer/store'
@ -65,7 +67,7 @@ export const Topics: React.FC<Props> = ({ assistant: _assistant, activeTopic, se
const { t } = useTranslation() const { t } = useTranslation()
const { notesPath } = useNotesSettings() const { notesPath } = useNotesSettings()
const { assistants } = useAssistants() const { assistants } = useAssistants()
const { assistant, removeTopic, moveTopic, updateTopic, updateTopics } = useAssistant(_assistant.id) const { assistant, addTopic, removeTopic, moveTopic, updateTopic, updateTopics } = useAssistant(_assistant.id)
const { showTopicTime, pinTopicsToTop, setTopicPosition, topicPosition } = useSettings() const { showTopicTime, pinTopicsToTop, setTopicPosition, topicPosition } = useSettings()
const renamingTopics = useSelector((state: RootState) => state.runtime.chat.renamingTopics) const renamingTopics = useSelector((state: RootState) => state.runtime.chat.renamingTopics)
@ -138,17 +140,21 @@ export const Topics: React.FC<Props> = ({ assistant: _assistant, activeTopic, se
async (topic: Topic, e: React.MouseEvent) => { async (topic: Topic, e: React.MouseEvent) => {
e.stopPropagation() e.stopPropagation()
if (assistant.topics.length === 1) { if (assistant.topics.length === 1) {
return onClearMessages(topic) const newTopic = getDefaultTopic(assistant.id)
await db.topics.add({ id: newTopic.id, messages: [] })
addTopic(newTopic)
setActiveTopic(newTopic)
} else {
const index = findIndex(assistant.topics, (t) => t.id === topic.id)
if (topic.id === activeTopic.id) {
setActiveTopic(assistant.topics[index + 1 === assistant.topics.length ? index - 1 : index + 1])
}
} }
await modelGenerating() await modelGenerating()
const index = findIndex(assistant.topics, (t) => t.id === topic.id)
if (topic.id === activeTopic.id) {
setActiveTopic(assistant.topics[index + 1 === assistant.topics.length ? index - 1 : index + 1])
}
removeTopic(topic) removeTopic(topic)
setDeletingTopicId(null) setDeletingTopicId(null)
}, },
[activeTopic.id, assistant.topics, onClearMessages, removeTopic, setActiveTopic] [activeTopic.id, addTopic, assistant.id, assistant.topics, removeTopic, setActiveTopic]
) )
const onPinTopic = useCallback( const onPinTopic = useCallback(

View File

@ -238,19 +238,27 @@ const MinimalToolbar: FC<Props> = ({ app, webviewRef, currentUrl, onReload, onOp
<LeftSection> <LeftSection>
<ButtonGroup> <ButtonGroup>
<Tooltip title={t('minapp.popup.goBack')} placement="bottom"> <Tooltip title={t('minapp.popup.goBack')} placement="bottom">
<ToolbarButton onClick={handleGoBack} $disabled={!canGoBack}> <ToolbarButton
onClick={handleGoBack}
$disabled={!canGoBack}
aria-label={t('minapp.popup.goBack')}
aria-disabled={!canGoBack}>
<ArrowLeftOutlined /> <ArrowLeftOutlined />
</ToolbarButton> </ToolbarButton>
</Tooltip> </Tooltip>
<Tooltip title={t('minapp.popup.goForward')} placement="bottom"> <Tooltip title={t('minapp.popup.goForward')} placement="bottom">
<ToolbarButton onClick={handleGoForward} $disabled={!canGoForward}> <ToolbarButton
onClick={handleGoForward}
$disabled={!canGoForward}
aria-label={t('minapp.popup.goForward')}
aria-disabled={!canGoForward}>
<ArrowRightOutlined /> <ArrowRightOutlined />
</ToolbarButton> </ToolbarButton>
</Tooltip> </Tooltip>
<Tooltip title={t('minapp.popup.refresh')} placement="bottom"> <Tooltip title={t('minapp.popup.refresh')} placement="bottom">
<ToolbarButton onClick={onReload}> <ToolbarButton onClick={onReload} aria-label={t('minapp.popup.refresh')}>
<ReloadOutlined /> <ReloadOutlined />
</ToolbarButton> </ToolbarButton>
</Tooltip> </Tooltip>
@ -261,7 +269,7 @@ const MinimalToolbar: FC<Props> = ({ app, webviewRef, currentUrl, onReload, onOp
<ButtonGroup> <ButtonGroup>
{canOpenExternalLink && ( {canOpenExternalLink && (
<Tooltip title={t('minapp.popup.openExternal')} placement="bottom"> <Tooltip title={t('minapp.popup.openExternal')} placement="bottom">
<ToolbarButton onClick={handleOpenLink}> <ToolbarButton onClick={handleOpenLink} aria-label={t('minapp.popup.openExternal')}>
<ExportOutlined /> <ExportOutlined />
</ToolbarButton> </ToolbarButton>
</Tooltip> </Tooltip>
@ -271,7 +279,11 @@ const MinimalToolbar: FC<Props> = ({ app, webviewRef, currentUrl, onReload, onOp
<Tooltip <Tooltip
title={isPinned ? t('minapp.remove_from_launchpad') : t('minapp.add_to_launchpad')} title={isPinned ? t('minapp.remove_from_launchpad') : t('minapp.add_to_launchpad')}
placement="bottom"> placement="bottom">
<ToolbarButton onClick={handleTogglePin} $active={isPinned}> <ToolbarButton
onClick={handleTogglePin}
$active={isPinned}
aria-label={isPinned ? t('minapp.remove_from_launchpad') : t('minapp.add_to_launchpad')}
aria-pressed={isPinned}>
<PushpinOutlined /> <PushpinOutlined />
</ToolbarButton> </ToolbarButton>
</Tooltip> </Tooltip>
@ -284,21 +296,29 @@ const MinimalToolbar: FC<Props> = ({ app, webviewRef, currentUrl, onReload, onOp
: t('minapp.popup.open_link_external_off') : t('minapp.popup.open_link_external_off')
} }
placement="bottom"> placement="bottom">
<ToolbarButton onClick={handleToggleOpenExternal} $active={minappsOpenLinkExternal}> <ToolbarButton
onClick={handleToggleOpenExternal}
$active={minappsOpenLinkExternal}
aria-label={
minappsOpenLinkExternal
? t('minapp.popup.open_link_external_on')
: t('minapp.popup.open_link_external_off')
}
aria-pressed={minappsOpenLinkExternal}>
<LinkOutlined /> <LinkOutlined />
</ToolbarButton> </ToolbarButton>
</Tooltip> </Tooltip>
{isDev && ( {isDev && (
<Tooltip title={t('minapp.popup.devtools')} placement="bottom"> <Tooltip title={t('minapp.popup.devtools')} placement="bottom">
<ToolbarButton onClick={onOpenDevTools}> <ToolbarButton onClick={onOpenDevTools} aria-label={t('minapp.popup.devtools')}>
<CodeOutlined /> <CodeOutlined />
</ToolbarButton> </ToolbarButton>
</Tooltip> </Tooltip>
)} )}
<Tooltip title={t('minapp.popup.minimize')} placement="bottom"> <Tooltip title={t('minapp.popup.minimize')} placement="bottom">
<ToolbarButton onClick={handleMinimize}> <ToolbarButton onClick={handleMinimize} aria-label={t('minapp.popup.minimize')}>
<MinusOutlined /> <MinusOutlined />
</ToolbarButton> </ToolbarButton>
</Tooltip> </Tooltip>

View File

@ -259,7 +259,8 @@ const PopupContainer: React.FC<Props> = ({ provider, resolve }) => {
{ label: 'Anthropic', value: 'anthropic' }, { label: 'Anthropic', value: 'anthropic' },
{ label: 'Azure OpenAI', value: 'azure-openai' }, { label: 'Azure OpenAI', value: 'azure-openai' },
{ label: 'New API', value: 'new-api' }, { label: 'New API', value: 'new-api' },
{ label: 'CherryIN', value: 'cherryin-type' } { label: 'CherryIN', value: 'cherryin-type' },
{ label: 'Ollama', value: 'ollama' }
]} ]}
/> />
</Form.Item> </Form.Item>

View File

@ -29,6 +29,7 @@ import {
isAzureOpenAIProvider, isAzureOpenAIProvider,
isGeminiProvider, isGeminiProvider,
isNewApiProvider, isNewApiProvider,
isOllamaProvider,
isOpenAICompatibleProvider, isOpenAICompatibleProvider,
isOpenAIProvider, isOpenAIProvider,
isVertexProvider isVertexProvider
@ -278,6 +279,10 @@ const ProviderSetting: FC<Props> = ({ providerId }) => {
const hostPreview = () => { const hostPreview = () => {
const formattedApiHost = adaptProvider({ provider: { ...provider, apiHost } }).apiHost const formattedApiHost = adaptProvider({ provider: { ...provider, apiHost } }).apiHost
if (isOllamaProvider(provider)) {
return formattedApiHost + '/chat'
}
if (isOpenAICompatibleProvider(provider)) { if (isOpenAICompatibleProvider(provider)) {
return formattedApiHost + '/chat/completions' return formattedApiHost + '/chat/completions'
} }

View File

@ -4,6 +4,7 @@ import type { RootState } from '@renderer/store'
import { useAppDispatch } from '@renderer/store' import { useAppDispatch } from '@renderer/store'
import { setApiServerApiKey, setApiServerPort } from '@renderer/store/settings' import { setApiServerApiKey, setApiServerPort } from '@renderer/store/settings'
import { formatErrorMessage } from '@renderer/utils/error' import { formatErrorMessage } from '@renderer/utils/error'
import { API_SERVER_DEFAULTS } from '@shared/config/constant'
import { Alert, Button, Input, InputNumber, Tooltip, Typography } from 'antd' import { Alert, Button, Input, InputNumber, Tooltip, Typography } from 'antd'
import { Copy, ExternalLink, Play, RotateCcw, Square } from 'lucide-react' import { Copy, ExternalLink, Play, RotateCcw, Square } from 'lucide-react'
import type { FC } from 'react' import type { FC } from 'react'
@ -56,7 +57,7 @@ const ApiServerSettings: FC = () => {
} }
const handlePortChange = (value: string) => { const handlePortChange = (value: string) => {
const port = parseInt(value) || 23333 const port = parseInt(value) || API_SERVER_DEFAULTS.PORT
if (port >= 1000 && port <= 65535) { if (port >= 1000 && port <= 65535) {
dispatch(setApiServerPort(port)) dispatch(setApiServerPort(port))
} }
@ -64,7 +65,9 @@ const ApiServerSettings: FC = () => {
const openApiDocs = () => { const openApiDocs = () => {
if (apiServerRunning) { if (apiServerRunning) {
window.open(`http://localhost:${apiServerConfig.port}/api-docs`, '_blank') const host = apiServerConfig.host || API_SERVER_DEFAULTS.HOST
const port = apiServerConfig.port || API_SERVER_DEFAULTS.PORT
window.open(`http://${host}:${port}/api-docs`, '_blank')
} }
} }
@ -98,7 +101,9 @@ const ApiServerSettings: FC = () => {
{apiServerRunning ? t('apiServer.status.running') : t('apiServer.status.stopped')} {apiServerRunning ? t('apiServer.status.running') : t('apiServer.status.stopped')}
</StatusText> </StatusText>
<StatusSubtext> <StatusSubtext>
{apiServerRunning ? `http://localhost:${apiServerConfig.port}` : t('apiServer.fields.port.description')} {apiServerRunning
? `http://${apiServerConfig.host || API_SERVER_DEFAULTS.HOST}:${apiServerConfig.port || API_SERVER_DEFAULTS.PORT}`
: t('apiServer.fields.port.description')}
</StatusSubtext> </StatusSubtext>
</StatusContent> </StatusContent>
</StatusSection> </StatusSection>
@ -119,11 +124,11 @@ const ApiServerSettings: FC = () => {
{!apiServerRunning && ( {!apiServerRunning && (
<StyledInputNumber <StyledInputNumber
value={apiServerConfig.port} value={apiServerConfig.port}
onChange={(value) => handlePortChange(String(value || 23333))} onChange={(value) => handlePortChange(String(value || API_SERVER_DEFAULTS.PORT))}
min={1000} min={1000}
max={65535} max={65535}
disabled={apiServerRunning} disabled={apiServerRunning}
placeholder="23333" placeholder={String(API_SERVER_DEFAULTS.PORT)}
size="middle" size="middle"
/> />
)} )}

View File

@ -6,12 +6,13 @@ import { DEFAULT_KNOWLEDGE_DOCUMENT_COUNT, DEFAULT_KNOWLEDGE_THRESHOLD } from '@
import { getEmbeddingMaxContext } from '@renderer/config/embedings' import { getEmbeddingMaxContext } from '@renderer/config/embedings'
import { addSpan, endSpan } from '@renderer/services/SpanManagerService' import { addSpan, endSpan } from '@renderer/services/SpanManagerService'
import store from '@renderer/store' import store from '@renderer/store'
import type { import {
FileMetadata, type FileMetadata,
KnowledgeBase, type KnowledgeBase,
KnowledgeBaseParams, type KnowledgeBaseParams,
KnowledgeReference, type KnowledgeReference,
KnowledgeSearchResult type KnowledgeSearchResult,
SystemProviderIds
} from '@renderer/types' } from '@renderer/types'
import type { Chunk } from '@renderer/types/chunk' import type { Chunk } from '@renderer/types/chunk'
import { ChunkType } from '@renderer/types/chunk' import { ChunkType } from '@renderer/types/chunk'
@ -50,6 +51,9 @@ export const getKnowledgeBaseParams = (base: KnowledgeBase): KnowledgeBaseParams
baseURL = baseURL + '/openai' baseURL = baseURL + '/openai'
} else if (isAzureOpenAIProvider(actualProvider)) { } else if (isAzureOpenAIProvider(actualProvider)) {
baseURL = baseURL + '/v1' baseURL = baseURL + '/v1'
} else if (actualProvider.id === SystemProviderIds.ollama) {
// LangChain生态不需要/api结尾的URL
baseURL = baseURL.replace(/\/api$/, '')
} }
logger.info(`Knowledge base ${base.name} using baseURL: ${baseURL}`) logger.info(`Knowledge base ${base.name} using baseURL: ${baseURL}`)

View File

@ -67,7 +67,7 @@ const persistedReducer = persistReducer(
{ {
key: 'cherry-studio', key: 'cherry-studio',
storage, storage,
version: 180, version: 181,
blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs', 'toolPermissions'], blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs', 'toolPermissions'],
migrate migrate
}, },

View File

@ -32,6 +32,7 @@ import {
isSupportDeveloperRoleProvider, isSupportDeveloperRoleProvider,
isSupportStreamOptionsProvider isSupportStreamOptionsProvider
} from '@renderer/utils/provider' } from '@renderer/utils/provider'
import { API_SERVER_DEFAULTS } from '@shared/config/constant'
import { defaultByPassRules, UpgradeChannel } from '@shared/config/constant' import { defaultByPassRules, UpgradeChannel } from '@shared/config/constant'
import { isEmpty } from 'lodash' import { isEmpty } from 'lodash'
import { createMigrate } from 'redux-persist' import { createMigrate } from 'redux-persist'
@ -2032,8 +2033,8 @@ const migrateConfig = {
if (!state.settings.apiServer) { if (!state.settings.apiServer) {
state.settings.apiServer = { state.settings.apiServer = {
enabled: false, enabled: false,
host: 'localhost', host: API_SERVER_DEFAULTS.HOST,
port: 23333, port: API_SERVER_DEFAULTS.PORT,
apiKey: `cs-sk-${uuid()}` apiKey: `cs-sk-${uuid()}`
} }
} }
@ -2809,7 +2810,7 @@ const migrateConfig = {
try { try {
addProvider(state, SystemProviderIds.longcat) addProvider(state, SystemProviderIds.longcat)
addProvider(state, SystemProviderIds['ai-gateway']) addProvider(state, 'gateway')
addProvider(state, 'cerebras') addProvider(state, 'cerebras')
state.llm.providers.forEach((provider) => { state.llm.providers.forEach((provider) => {
if (provider.id === SystemProviderIds.minimax) { if (provider.id === SystemProviderIds.minimax) {
@ -2909,9 +2910,20 @@ const migrateConfig = {
}, },
'180': (state: RootState) => { '180': (state: RootState) => {
try { try {
if (state.settings.apiServer) {
state.settings.apiServer.host = API_SERVER_DEFAULTS.HOST
}
// @ts-expect-error
if (state.settings.openAI.summaryText === 'undefined') {
state.settings.openAI.summaryText = undefined
}
// @ts-expect-error
if (state.settings.openAI.verbosity === 'undefined') {
state.settings.openAI.verbosity = undefined
}
state.llm.providers.forEach((provider) => { state.llm.providers.forEach((provider) => {
if (provider.id === SystemProviderIds.ppio) { if (provider.id === SystemProviderIds.ollama) {
provider.anthropicApiHost = 'https://api.ppinfra.com/anthropic' provider.type = 'ollama'
} }
}) })
logger.info('migrate 180 success') logger.info('migrate 180 success')
@ -2920,6 +2932,40 @@ const migrateConfig = {
logger.error('migrate 180 error', error as Error) logger.error('migrate 180 error', error as Error)
return state return state
} }
},
'181': (state: RootState) => {
try {
state.llm.providers.forEach((provider) => {
if (provider.id === 'ai-gateway') {
provider.id = SystemProviderIds.gateway
}
// Also update model.provider references to avoid orphaned models
provider.models?.forEach((model) => {
if (model.provider === 'ai-gateway') {
model.provider = SystemProviderIds.gateway
}
})
})
logger.info('migrate 181 success')
return state
} catch (error) {
logger.error('migrate 181 error', error as Error)
return state
}
},
'182': (state: RootState) => {
try {
state.llm.providers.forEach((provider) => {
if (provider.id === SystemProviderIds.ppio) {
provider.anthropicApiHost = 'https://api.ppinfra.com/anthropic'
}
})
logger.info('migrate 182 success')
return state
} catch (error) {
logger.error('migrate 182 error', error as Error)
return state
}
} }
} }

View File

@ -18,7 +18,7 @@ import type {
import { ThemeMode } from '@renderer/types' import { ThemeMode } from '@renderer/types'
import type { OpenAISummaryText, OpenAIVerbosity } from '@renderer/types/aiCoreTypes' import type { OpenAISummaryText, OpenAIVerbosity } from '@renderer/types/aiCoreTypes'
import { uuid } from '@renderer/utils' import { uuid } from '@renderer/utils'
import { UpgradeChannel } from '@shared/config/constant' import { API_SERVER_DEFAULTS, UpgradeChannel } from '@shared/config/constant'
import type { RemoteSyncState } from './backup' import type { RemoteSyncState } from './backup'
@ -410,8 +410,8 @@ export const initialState: SettingsState = {
// API Server // API Server
apiServer: { apiServer: {
enabled: false, enabled: false,
host: 'localhost', host: API_SERVER_DEFAULTS.HOST,
port: 23333, port: API_SERVER_DEFAULTS.PORT,
apiKey: `cs-sk-${uuid()}` apiKey: `cs-sk-${uuid()}`
}, },
showMessageOutline: false showMessageOutline: false

View File

@ -1,5 +1,5 @@
import type OpenAI from '@cherrystudio/openai' import type OpenAI from '@cherrystudio/openai'
import type { NotNull, NotUndefined } from '@types' import type { NotUndefined } from '@types'
import type { ImageModel, LanguageModel } from 'ai' import type { ImageModel, LanguageModel } from 'ai'
import type { generateObject, generateText, ModelMessage, streamObject, streamText } from 'ai' import type { generateObject, generateText, ModelMessage, streamObject, streamText } from 'ai'
import * as z from 'zod' import * as z from 'zod'
@ -31,18 +31,26 @@ export type GenerateObjectParams = Omit<Parameters<typeof generateObject>[0], 'm
export type AiSdkModel = LanguageModel | ImageModel export type AiSdkModel = LanguageModel | ImageModel
// The original type unite both undefined and null. /**
// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs. * Constrains the verbosity of the model's response. Lower values will result in more concise responses, while higher values will result in more verbose responses.
// Parameter would not be passed into request if it's undefined. *
export type OpenAIVerbosity = NotNull<OpenAI.Responses.ResponseTextConfig['verbosity']> * The original type unites both undefined and null.
* When undefined, the parameter is omitted from the request.
* When null, verbosity is explicitly disabled.
*/
export type OpenAIVerbosity = OpenAI.Responses.ResponseTextConfig['verbosity']
export type ValidOpenAIVerbosity = NotUndefined<OpenAIVerbosity> export type ValidOpenAIVerbosity = NotUndefined<OpenAIVerbosity>
export type OpenAIReasoningEffort = OpenAI.ReasoningEffort export type OpenAIReasoningEffort = OpenAI.ReasoningEffort
// The original type unite both undefined and null. /**
// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs. * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process.
// Parameter would not be passed into request if it's undefined. *
export type OpenAISummaryText = NotNull<OpenAI.Reasoning['summary']> * The original type unites both undefined and null.
* When undefined, the parameter is omitted from the request.
* When null, verbosity is explicitly disabled.
*/
export type OpenAISummaryText = OpenAI.Reasoning['summary']
const AiSdkParamsSchema = z.enum([ const AiSdkParamsSchema = z.enum([
'maxOutputTokens', 'maxOutputTokens',

View File

@ -96,6 +96,9 @@ export type ReasoningEffortOptionalParams = {
include_thoughts?: boolean include_thoughts?: boolean
} }
} }
thinking?: {
type: 'enabled' | 'disabled'
}
thinking_budget?: number thinking_budget?: number
reasoning_effort?: OpenAI.Chat.Completions.ChatCompletionCreateParams['reasoning_effort'] | 'auto' reasoning_effort?: OpenAI.Chat.Completions.ChatCompletionCreateParams['reasoning_effort'] | 'auto'
} }
@ -128,10 +131,6 @@ export type OpenAIExtraBody = {
source_lang: 'auto' source_lang: 'auto'
target_lang: string target_lang: string
} }
// for gpt-5 series models verbosity control
text?: {
verbosity?: 'low' | 'medium' | 'high'
}
} }
// image is for openrouter. audio is ignored for now // image is for openrouter. audio is ignored for now
export type OpenAIModality = OpenAI.ChatCompletionModality | 'image' export type OpenAIModality = OpenAI.ChatCompletionModality | 'image'

View File

@ -6,6 +6,7 @@ import {
formatApiHost, formatApiHost,
formatApiKeys, formatApiKeys,
formatAzureOpenAIApiHost, formatAzureOpenAIApiHost,
formatOllamaApiHost,
formatVertexApiHost, formatVertexApiHost,
getTrailingApiVersion, getTrailingApiVersion,
hasAPIVersion, hasAPIVersion,
@ -330,6 +331,73 @@ describe('api', () => {
}) })
}) })
describe('formatOllamaApiHost', () => {
it('removes trailing slash and appends /api for basic hosts', () => {
expect(formatOllamaApiHost('https://api.ollama.com/')).toBe('https://api.ollama.com/api')
expect(formatOllamaApiHost('http://localhost:11434/')).toBe('http://localhost:11434/api')
})
it('appends /api when no suffix is present', () => {
expect(formatOllamaApiHost('https://api.ollama.com')).toBe('https://api.ollama.com/api')
expect(formatOllamaApiHost('http://localhost:11434')).toBe('http://localhost:11434/api')
})
it('removes /v1 suffix and appends /api', () => {
expect(formatOllamaApiHost('https://api.ollama.com/v1')).toBe('https://api.ollama.com/api')
expect(formatOllamaApiHost('http://localhost:11434/v1/')).toBe('http://localhost:11434/api')
})
it('removes /api suffix and keeps /api', () => {
expect(formatOllamaApiHost('https://api.ollama.com/api')).toBe('https://api.ollama.com/api')
expect(formatOllamaApiHost('http://localhost:11434/api/')).toBe('http://localhost:11434/api')
})
it('removes /chat suffix and appends /api', () => {
expect(formatOllamaApiHost('https://api.ollama.com/chat')).toBe('https://api.ollama.com/api')
expect(formatOllamaApiHost('http://localhost:11434/chat/')).toBe('http://localhost:11434/api')
})
it('handles multiple suffix combinations correctly', () => {
expect(formatOllamaApiHost('https://api.ollama.com/v1/chat')).toBe('https://api.ollama.com/v1/api')
expect(formatOllamaApiHost('https://api.ollama.com/chat/v1')).toBe('https://api.ollama.com/api')
expect(formatOllamaApiHost('https://api.ollama.com/api/chat')).toBe('https://api.ollama.com/api/api')
})
it('preserves complex paths while handling suffixes', () => {
expect(formatOllamaApiHost('https://api.ollama.com/custom/path')).toBe('https://api.ollama.com/custom/path/api')
expect(formatOllamaApiHost('https://api.ollama.com/custom/path/')).toBe('https://api.ollama.com/custom/path/api')
expect(formatOllamaApiHost('https://api.ollama.com/custom/path/v1')).toBe(
'https://api.ollama.com/custom/path/api'
)
})
it('handles edge cases with multiple slashes', () => {
expect(formatOllamaApiHost('https://api.ollama.com//')).toBe('https://api.ollama.com//api')
expect(formatOllamaApiHost('https://api.ollama.com///v1///')).toBe('https://api.ollama.com///v1///api')
})
it('handles localhost with different ports', () => {
expect(formatOllamaApiHost('http://localhost:3000')).toBe('http://localhost:3000/api')
expect(formatOllamaApiHost('http://127.0.0.1:11434/')).toBe('http://127.0.0.1:11434/api')
expect(formatOllamaApiHost('https://localhost:8080/v1')).toBe('https://localhost:8080/api')
})
it('handles IP addresses', () => {
expect(formatOllamaApiHost('http://192.168.1.100:11434')).toBe('http://192.168.1.100:11434/api')
expect(formatOllamaApiHost('https://10.0.0.1:8080/v1/')).toBe('https://10.0.0.1:8080/api')
})
it('handles empty strings and edge cases', () => {
expect(formatOllamaApiHost('')).toBe('/api')
expect(formatOllamaApiHost('/')).toBe('/api')
})
it('preserves protocol and handles mixed case', () => {
expect(formatOllamaApiHost('HTTPS://API.OLLAMA.COM')).toBe('HTTPS://API.OLLAMA.COM/api')
expect(formatOllamaApiHost('HTTP://localhost:11434/V1/')).toBe('HTTP://localhost:11434/V1/api')
})
})
describe('getTrailingApiVersion', () => { describe('getTrailingApiVersion', () => {
it('extracts trailing API version from URL', () => { it('extracts trailing API version from URL', () => {
expect(getTrailingApiVersion('https://api.example.com/v1')).toBe('v1') expect(getTrailingApiVersion('https://api.example.com/v1')).toBe('v1')

View File

@ -222,6 +222,9 @@ describe('naming', () => {
it('should remove trailing :free', () => { it('should remove trailing :free', () => {
expect(getLowerBaseModelName('gpt-4:free')).toBe('gpt-4') expect(getLowerBaseModelName('gpt-4:free')).toBe('gpt-4')
}) })
it('should remove trailing (free)', () => {
expect(getLowerBaseModelName('agent/gpt-4(free)')).toBe('gpt-4')
})
}) })
describe('getFirstCharacter', () => { describe('getFirstCharacter', () => {

View File

@ -189,7 +189,7 @@ describe('provider utils', () => {
expect(isAnthropicProvider(createProvider({ type: 'anthropic' }))).toBe(true) expect(isAnthropicProvider(createProvider({ type: 'anthropic' }))).toBe(true)
expect(isGeminiProvider(createProvider({ type: 'gemini' }))).toBe(true) expect(isGeminiProvider(createProvider({ type: 'gemini' }))).toBe(true)
expect(isAIGatewayProvider(createProvider({ type: 'ai-gateway' }))).toBe(true) expect(isAIGatewayProvider(createProvider({ type: 'gateway' }))).toBe(true)
}) })
it('computes API version support', () => { it('computes API version support', () => {

View File

@ -10,6 +10,7 @@ export {
isCherryAIProvider, isCherryAIProvider,
isGeminiProvider, isGeminiProvider,
isNewApiProvider, isNewApiProvider,
isOllamaProvider,
isOpenAICompatibleProvider, isOpenAICompatibleProvider,
isOpenAIProvider, isOpenAIProvider,
isPerplexityProvider, isPerplexityProvider,

View File

@ -72,8 +72,22 @@ const ActionIcons: FC<{
(action: ActionItem) => { (action: ActionItem) => {
const displayName = action.isBuiltIn ? t(action.name) : action.name const displayName = action.isBuiltIn ? t(action.name) : action.name
const handleKeyDown = (e: React.KeyboardEvent<HTMLDivElement>) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault()
handleAction(action)
}
}
return ( return (
<ActionButton key={action.id} onClick={() => handleAction(action)} title={isCompact ? displayName : undefined}> <ActionButton
key={action.id}
onClick={() => handleAction(action)}
onKeyDown={handleKeyDown}
title={isCompact ? displayName : undefined}
role="button"
aria-label={displayName}
tabIndex={0}>
<ActionIcon> <ActionIcon>
{action.id === 'copy' ? ( {action.id === 'copy' ? (
renderCopyIcon() renderCopyIcon()

View File

@ -1,4 +1,4 @@
@host=http://localhost:23333 @host=http://127.0.0.1:23333
@token=cs-sk-af798ed4-7cf5-4fd7-ae4b-df203b164194 @token=cs-sk-af798ed4-7cf5-4fd7-ae4b-df203b164194
@agent_id=agent_1758092281575_tn9dxio9k @agent_id=agent_1758092281575_tn9dxio9k
@ -56,4 +56,3 @@ Content-Type: application/json
"max_turns": 5 "max_turns": 5
} }
} }

View File

@ -1,5 +1,5 @@
@host=http://localhost:23333 @host=http://127.0.0.1:23333
@token=cs-sk-af798ed4-7cf5-4fd7-ae4b-df203b164194 @token=cs-sk-af798ed4-7cf5-4fd7-ae4b-df203b164194
@agent_id=agent_1758092281575_tn9dxio9k @agent_id=agent_1758092281575_tn9dxio9k
@session_id=session_1758278828236_mqj91e7c0 @session_id=session_1758278828236_mqj91e7c0

View File

@ -1,4 +1,4 @@
@host=http://localhost:23333 @host=http://127.0.0.1:23333
@token=cs-sk-af798ed4-7cf5-4fd7-ae4b-df203b164194 @token=cs-sk-af798ed4-7cf5-4fd7-ae4b-df203b164194
@agent_id=agent_1758092281575_tn9dxio9k @agent_id=agent_1758092281575_tn9dxio9k

View File

@ -128,16 +128,15 @@ __metadata:
languageName: node languageName: node
linkType: hard linkType: hard
"@ai-sdk/deepseek@npm:^1.0.29": "@ai-sdk/deepseek@npm:^1.0.31":
version: 1.0.29 version: 1.0.31
resolution: "@ai-sdk/deepseek@npm:1.0.29" resolution: "@ai-sdk/deepseek@npm:1.0.31"
dependencies: dependencies:
"@ai-sdk/openai-compatible": "npm:1.0.27"
"@ai-sdk/provider": "npm:2.0.0" "@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.17" "@ai-sdk/provider-utils": "npm:3.0.18"
peerDependencies: peerDependencies:
zod: ^3.25.76 || ^4.1.8 zod: ^3.25.76 || ^4.1.8
checksum: 10c0/f43fba5c72e3f2d8ddc79d68c656cb4fc5fcd488c97b0a5371ad728e2d5c7a8c61fe9125a2a471b7648d99646cd2c78aad2d462c1469942bb4046763c5f13f38 checksum: 10c0/851965392ce03c85ffacf74900ec159bccef491b9bf6142ac08bc25f4d2bbf4df1d754e76fe9793403dee4a8da76fb6b7a9ded84491ec309bdea9aa478e6f542
languageName: node languageName: node
linkType: hard linkType: hard
@ -243,6 +242,18 @@ __metadata:
languageName: node languageName: node
linkType: hard linkType: hard
"@ai-sdk/openai-compatible@npm:^1.0.28":
version: 1.0.28
resolution: "@ai-sdk/openai-compatible@npm:1.0.28"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@ai-sdk/provider-utils": "npm:3.0.18"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/f484774e0094a12674f392d925038a296191723b4c76bd833eabf1b334cf3c84fe77a2e2c5fbac974ec5e18340e113c6a81c86d957c9529a7a60e87cd390ada8
languageName: node
linkType: hard
"@ai-sdk/openai-compatible@patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch": "@ai-sdk/openai-compatible@patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch":
version: 1.0.27 version: 1.0.27
resolution: "@ai-sdk/openai-compatible@patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch::version=1.0.27&hash=c44b76" resolution: "@ai-sdk/openai-compatible@patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch::version=1.0.27&hash=c44b76"
@ -304,6 +315,19 @@ __metadata:
languageName: node languageName: node
linkType: hard linkType: hard
"@ai-sdk/provider-utils@npm:3.0.18":
version: 3.0.18
resolution: "@ai-sdk/provider-utils@npm:3.0.18"
dependencies:
"@ai-sdk/provider": "npm:2.0.0"
"@standard-schema/spec": "npm:^1.0.0"
eventsource-parser: "npm:^3.0.6"
peerDependencies:
zod: ^3.25.76 || ^4.1.8
checksum: 10c0/209c15b0dceef0ba95a7d3de544be0a417ad4a0bd5143496b3966a35fedf144156d93a42ff8c3d7db56781b9836bafc8c132c98978c49240e55bc1a36e18a67f
languageName: node
linkType: hard
"@ai-sdk/provider@npm:2.0.0, @ai-sdk/provider@npm:^2.0.0": "@ai-sdk/provider@npm:2.0.0, @ai-sdk/provider@npm:^2.0.0":
version: 2.0.0 version: 2.0.0
resolution: "@ai-sdk/provider@npm:2.0.0" resolution: "@ai-sdk/provider@npm:2.0.0"
@ -1830,7 +1854,7 @@ __metadata:
dependencies: dependencies:
"@ai-sdk/anthropic": "npm:^2.0.49" "@ai-sdk/anthropic": "npm:^2.0.49"
"@ai-sdk/azure": "npm:^2.0.74" "@ai-sdk/azure": "npm:^2.0.74"
"@ai-sdk/deepseek": "npm:^1.0.29" "@ai-sdk/deepseek": "npm:^1.0.31"
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch" "@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
"@ai-sdk/provider": "npm:^2.0.0" "@ai-sdk/provider": "npm:^2.0.0"
"@ai-sdk/provider-utils": "npm:^3.0.17" "@ai-sdk/provider-utils": "npm:^3.0.17"
@ -1851,6 +1875,7 @@ __metadata:
version: 0.0.0-use.local version: 0.0.0-use.local
resolution: "@cherrystudio/ai-sdk-provider@workspace:packages/ai-sdk-provider" resolution: "@cherrystudio/ai-sdk-provider@workspace:packages/ai-sdk-provider"
dependencies: dependencies:
"@ai-sdk/openai-compatible": "npm:^1.0.28"
"@ai-sdk/provider": "npm:^2.0.0" "@ai-sdk/provider": "npm:^2.0.0"
"@ai-sdk/provider-utils": "npm:^3.0.17" "@ai-sdk/provider-utils": "npm:^3.0.17"
tsdown: "npm:^0.13.3" tsdown: "npm:^0.13.3"
@ -10232,6 +10257,7 @@ __metadata:
notion-helper: "npm:^1.3.22" notion-helper: "npm:^1.3.22"
npx-scope-finder: "npm:^1.2.0" npx-scope-finder: "npm:^1.2.0"
officeparser: "npm:^4.2.0" officeparser: "npm:^4.2.0"
ollama-ai-provider-v2: "npm:^1.5.5"
os-proxy-config: "npm:^1.1.2" os-proxy-config: "npm:^1.1.2"
oxlint: "npm:^1.22.0" oxlint: "npm:^1.22.0"
oxlint-tsgolint: "npm:^0.2.0" oxlint-tsgolint: "npm:^0.2.0"
@ -19934,6 +19960,18 @@ __metadata:
languageName: node languageName: node
linkType: hard linkType: hard
"ollama-ai-provider-v2@npm:^1.5.5":
version: 1.5.5
resolution: "ollama-ai-provider-v2@npm:1.5.5"
dependencies:
"@ai-sdk/provider": "npm:^2.0.0"
"@ai-sdk/provider-utils": "npm:^3.0.17"
peerDependencies:
zod: ^4.0.16
checksum: 10c0/da40c8097bd8205c46eccfbd13e77c51a6ce97a29b886adfc9e1b8444460b558138d1ed4428491fcc9378d46f649dd0a9b1e5b13cf6bbc8f5385e8b321734e72
languageName: node
linkType: hard
"ollama@npm:^0.5.12": "ollama@npm:^0.5.12":
version: 0.5.16 version: 0.5.16
resolution: "ollama@npm:0.5.16" resolution: "ollama@npm:0.5.16"