mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-01 09:49:03 +08:00
Merge remote-tracking branch 'origin/main' into feat/proxy-api-server
This commit is contained in:
commit
0e6830b1bf
2
.github/workflows/dispatch-docs-update.yml
vendored
2
.github/workflows/dispatch-docs-update.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
||||
echo "tag=${{ github.event.release.tag_name }}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Dispatch update-download-version workflow to cherry-studio-docs
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
uses: peter-evans/repository-dispatch@v4
|
||||
with:
|
||||
token: ${{ secrets.REPO_DISPATCH_TOKEN }}
|
||||
repository: CherryHQ/cherry-studio-docs
|
||||
|
||||
@ -318,6 +318,7 @@
|
||||
"motion": "^12.10.5",
|
||||
"notion-helper": "^1.3.22",
|
||||
"npx-scope-finder": "^1.2.0",
|
||||
"ollama-ai-provider-v2": "^1.5.5",
|
||||
"oxlint": "^1.22.0",
|
||||
"oxlint-tsgolint": "^0.2.0",
|
||||
"p-queue": "^8.1.0",
|
||||
|
||||
@ -41,6 +41,7 @@
|
||||
"ai": "^5.0.26"
|
||||
},
|
||||
"dependencies": {
|
||||
"@ai-sdk/openai-compatible": "^1.0.28",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
"@ai-sdk/provider-utils": "^3.0.17"
|
||||
},
|
||||
|
||||
@ -2,7 +2,6 @@ import { AnthropicMessagesLanguageModel } from '@ai-sdk/anthropic/internal'
|
||||
import { GoogleGenerativeAILanguageModel } from '@ai-sdk/google/internal'
|
||||
import type { OpenAIProviderSettings } from '@ai-sdk/openai'
|
||||
import {
|
||||
OpenAIChatLanguageModel,
|
||||
OpenAICompletionLanguageModel,
|
||||
OpenAIEmbeddingModel,
|
||||
OpenAIImageModel,
|
||||
@ -10,6 +9,7 @@ import {
|
||||
OpenAISpeechModel,
|
||||
OpenAITranscriptionModel
|
||||
} from '@ai-sdk/openai/internal'
|
||||
import { OpenAICompatibleChatLanguageModel } from '@ai-sdk/openai-compatible'
|
||||
import {
|
||||
type EmbeddingModelV2,
|
||||
type ImageModelV2,
|
||||
@ -118,7 +118,7 @@ const createCustomFetch = (originalFetch?: any) => {
|
||||
return originalFetch ? originalFetch(url, options) : fetch(url, options)
|
||||
}
|
||||
}
|
||||
class CherryInOpenAIChatLanguageModel extends OpenAIChatLanguageModel {
|
||||
class CherryInOpenAIChatLanguageModel extends OpenAICompatibleChatLanguageModel {
|
||||
constructor(modelId: string, settings: any) {
|
||||
super(modelId, {
|
||||
...settings,
|
||||
|
||||
@ -41,7 +41,7 @@
|
||||
"dependencies": {
|
||||
"@ai-sdk/anthropic": "^2.0.49",
|
||||
"@ai-sdk/azure": "^2.0.74",
|
||||
"@ai-sdk/deepseek": "^1.0.29",
|
||||
"@ai-sdk/deepseek": "^1.0.31",
|
||||
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
"@ai-sdk/provider-utils": "^3.0.17",
|
||||
|
||||
@ -35,7 +35,6 @@ export interface WebSearchPluginConfig {
|
||||
anthropic?: AnthropicSearchConfig
|
||||
xai?: ProviderOptionsMap['xai']['searchParameters']
|
||||
google?: GoogleSearchConfig
|
||||
'google-vertex'?: GoogleSearchConfig
|
||||
openrouter?: OpenRouterSearchConfig
|
||||
}
|
||||
|
||||
@ -44,7 +43,6 @@ export interface WebSearchPluginConfig {
|
||||
*/
|
||||
export const DEFAULT_WEB_SEARCH_CONFIG: WebSearchPluginConfig = {
|
||||
google: {},
|
||||
'google-vertex': {},
|
||||
openai: {},
|
||||
'openai-chat': {},
|
||||
xai: {
|
||||
@ -97,55 +95,28 @@ export type WebSearchToolInputSchema = {
|
||||
'openai-chat': InferToolInput<OpenAIChatWebSearchTool>
|
||||
}
|
||||
|
||||
export const switchWebSearchTool = (providerId: string, config: WebSearchPluginConfig, params: any) => {
|
||||
switch (providerId) {
|
||||
case 'openai': {
|
||||
if (config.openai) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = openai.tools.webSearch(config.openai)
|
||||
}
|
||||
break
|
||||
}
|
||||
case 'openai-chat': {
|
||||
if (config['openai-chat']) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search_preview = openai.tools.webSearchPreview(config['openai-chat'])
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
case 'anthropic': {
|
||||
if (config.anthropic) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = anthropic.tools.webSearch_20250305(config.anthropic)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
case 'google': {
|
||||
// case 'google-vertex':
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = google.tools.googleSearch(config.google || {})
|
||||
break
|
||||
}
|
||||
|
||||
case 'xai': {
|
||||
if (config.xai) {
|
||||
const searchOptions = createXaiOptions({
|
||||
searchParameters: { ...config.xai, mode: 'on' }
|
||||
})
|
||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
case 'openrouter': {
|
||||
if (config.openrouter) {
|
||||
const searchOptions = createOpenRouterOptions(config.openrouter)
|
||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||
}
|
||||
break
|
||||
}
|
||||
export const switchWebSearchTool = (config: WebSearchPluginConfig, params: any) => {
|
||||
if (config.openai) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = openai.tools.webSearch(config.openai)
|
||||
} else if (config['openai-chat']) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search_preview = openai.tools.webSearchPreview(config['openai-chat'])
|
||||
} else if (config.anthropic) {
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = anthropic.tools.webSearch_20250305(config.anthropic)
|
||||
} else if (config.google) {
|
||||
// case 'google-vertex':
|
||||
if (!params.tools) params.tools = {}
|
||||
params.tools.web_search = google.tools.googleSearch(config.google || {})
|
||||
} else if (config.xai) {
|
||||
const searchOptions = createXaiOptions({
|
||||
searchParameters: { ...config.xai, mode: 'on' }
|
||||
})
|
||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||
} else if (config.openrouter) {
|
||||
const searchOptions = createOpenRouterOptions(config.openrouter)
|
||||
params.providerOptions = mergeProviderOptions(params.providerOptions, searchOptions)
|
||||
}
|
||||
return params
|
||||
}
|
||||
|
||||
@ -4,7 +4,6 @@
|
||||
*/
|
||||
|
||||
import { definePlugin } from '../../'
|
||||
import type { AiRequestContext } from '../../types'
|
||||
import type { WebSearchPluginConfig } from './helper'
|
||||
import { DEFAULT_WEB_SEARCH_CONFIG, switchWebSearchTool } from './helper'
|
||||
|
||||
@ -18,15 +17,8 @@ export const webSearchPlugin = (config: WebSearchPluginConfig = DEFAULT_WEB_SEAR
|
||||
name: 'webSearch',
|
||||
enforce: 'pre',
|
||||
|
||||
transformParams: async (params: any, context: AiRequestContext) => {
|
||||
const { providerId } = context
|
||||
switchWebSearchTool(providerId, config, params)
|
||||
|
||||
if (providerId === 'cherryin' || providerId === 'cherryin-chat') {
|
||||
// cherryin.gemini
|
||||
const _providerId = params.model.provider.split('.')[1]
|
||||
switchWebSearchTool(_providerId, config, params)
|
||||
}
|
||||
transformParams: async (params: any) => {
|
||||
switchWebSearchTool(config, params)
|
||||
return params
|
||||
}
|
||||
})
|
||||
|
||||
@ -102,6 +102,17 @@ export function formatVertexApiHost(
|
||||
return formatApiHost(trimmedHost)
|
||||
}
|
||||
|
||||
/**
|
||||
* 格式化 Ollama 的 API 主机地址。
|
||||
*/
|
||||
export function formatOllamaApiHost(host: string): string {
|
||||
const normalizedHost = withoutTrailingSlash(host)
|
||||
?.replace(/\/v1$/, '')
|
||||
?.replace(/\/api$/, '')
|
||||
?.replace(/\/chat$/, '')
|
||||
return formatApiHost(normalizedHost + '/api', false)
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats an API host URL by normalizing it and optionally appending an API version.
|
||||
*
|
||||
|
||||
@ -7,6 +7,11 @@ export const documentExts = ['.pdf', '.doc', '.docx', '.pptx', '.xlsx', '.odt',
|
||||
export const thirdPartyApplicationExts = ['.draftsExport']
|
||||
export const bookExts = ['.epub']
|
||||
|
||||
export const API_SERVER_DEFAULTS = {
|
||||
HOST: '127.0.0.1',
|
||||
PORT: 23333
|
||||
}
|
||||
|
||||
/**
|
||||
* A flat array of all file extensions known by the linguist database.
|
||||
* This is the primary source for identifying code files.
|
||||
|
||||
@ -52,11 +52,12 @@ export function isAwsBedrockProvider<P extends MinimalProvider>(provider: P): bo
|
||||
return provider.type === 'aws-bedrock'
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if provider is AI Gateway type
|
||||
*/
|
||||
export function isAIGatewayProvider<P extends MinimalProvider>(provider: P): boolean {
|
||||
return provider.type === 'ai-gateway'
|
||||
return provider.type === 'gateway'
|
||||
}
|
||||
|
||||
export function isOllamaProvider<P extends MinimalProvider>(provider: P): boolean {
|
||||
return provider.type === 'ollama'
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -9,6 +9,7 @@
|
||||
import {
|
||||
formatApiHost,
|
||||
formatAzureOpenAIApiHost,
|
||||
formatOllamaApiHost,
|
||||
formatVertexApiHost,
|
||||
routeToEndpoint,
|
||||
withoutTrailingSlash
|
||||
@ -18,6 +19,7 @@ import {
|
||||
isAzureOpenAIProvider,
|
||||
isCherryAIProvider,
|
||||
isGeminiProvider,
|
||||
isOllamaProvider,
|
||||
isPerplexityProvider,
|
||||
isVertexProvider
|
||||
} from './detection'
|
||||
@ -77,6 +79,8 @@ export function formatProviderApiHost<T extends MinimalProvider>(provider: T, co
|
||||
}
|
||||
} else if (formatted.id === SystemProviderIds.copilot || formatted.id === SystemProviderIds.github) {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, false)
|
||||
} else if (isOllamaProvider(formatted)) {
|
||||
formatted.apiHost = formatOllamaApiHost(formatted.apiHost)
|
||||
} else if (isGeminiProvider(formatted)) {
|
||||
formatted.apiHost = formatApiHost(formatted.apiHost, true, 'v1beta')
|
||||
} else if (isAzureOpenAIProvider(formatted)) {
|
||||
|
||||
@ -19,6 +19,7 @@ export {
|
||||
isCherryAIProvider,
|
||||
isGeminiProvider,
|
||||
isNewApiProvider,
|
||||
isOllamaProvider,
|
||||
isOpenAICompatibleProvider,
|
||||
isOpenAIProvider,
|
||||
isPerplexityProvider,
|
||||
|
||||
@ -79,12 +79,12 @@ export const SHARED_PROVIDER_CONFIGS: ProviderConfig[] = [
|
||||
aliases: ['hf', 'hugging-face']
|
||||
},
|
||||
{
|
||||
id: 'ai-gateway',
|
||||
name: 'AI Gateway',
|
||||
id: 'gateway',
|
||||
name: 'Vercel AI Gateway',
|
||||
import: () => import('@ai-sdk/gateway'),
|
||||
creatorFunctionName: 'createGateway',
|
||||
supportsImageGeneration: true,
|
||||
aliases: ['gateway']
|
||||
aliases: ['ai-gateway']
|
||||
},
|
||||
{
|
||||
id: 'cerebras',
|
||||
@ -92,6 +92,13 @@ export const SHARED_PROVIDER_CONFIGS: ProviderConfig[] = [
|
||||
import: () => import('@ai-sdk/cerebras'),
|
||||
creatorFunctionName: 'createCerebras',
|
||||
supportsImageGeneration: false
|
||||
},
|
||||
{
|
||||
id: 'ollama',
|
||||
name: 'Ollama',
|
||||
import: () => import('ollama-ai-provider-v2'),
|
||||
creatorFunctionName: 'createOllama',
|
||||
supportsImageGeneration: false
|
||||
}
|
||||
] as const
|
||||
|
||||
|
||||
@ -6,8 +6,10 @@
|
||||
*/
|
||||
|
||||
import { formatPrivateKey, hasProviderConfig, ProviderConfigFactory } from '@cherrystudio/ai-core/provider'
|
||||
import { isEmpty } from 'lodash'
|
||||
|
||||
import { routeToEndpoint } from '../api'
|
||||
import { isOllamaProvider } from './detection'
|
||||
import { getAiSdkProviderId } from './mapping'
|
||||
import type { MinimalProvider } from './types'
|
||||
import { SystemProviderIds } from './types'
|
||||
@ -157,6 +159,19 @@ export function providerToAiSdkConfig(
|
||||
}
|
||||
}
|
||||
|
||||
if (isOllamaProvider(provider)) {
|
||||
return {
|
||||
providerId: 'ollama',
|
||||
options: {
|
||||
...baseConfig,
|
||||
headers: {
|
||||
...provider.extra_headers,
|
||||
Authorization: !isEmpty(baseConfig.apiKey) ? `Bearer ${baseConfig.apiKey}` : undefined
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build extra options
|
||||
const extraOptions: Record<string, unknown> = {}
|
||||
if (endpoint) {
|
||||
|
||||
@ -11,7 +11,8 @@ export const ProviderTypeSchema = z.enum([
|
||||
'aws-bedrock',
|
||||
'vertex-anthropic',
|
||||
'new-api',
|
||||
'ai-gateway'
|
||||
'gateway',
|
||||
'ollama'
|
||||
])
|
||||
|
||||
export type ProviderType = z.infer<typeof ProviderTypeSchema>
|
||||
@ -98,7 +99,7 @@ export const SystemProviderIdSchema = z.enum([
|
||||
'longcat',
|
||||
'huggingface',
|
||||
'sophnet',
|
||||
'ai-gateway',
|
||||
'gateway',
|
||||
'cerebras'
|
||||
])
|
||||
|
||||
@ -167,7 +168,7 @@ export const SystemProviderIds = {
|
||||
aionly: 'aionly',
|
||||
longcat: 'longcat',
|
||||
huggingface: 'huggingface',
|
||||
'ai-gateway': 'ai-gateway',
|
||||
gateway: 'gateway',
|
||||
cerebras: 'cerebras'
|
||||
} as const satisfies Record<SystemProviderId, SystemProviderId>
|
||||
|
||||
|
||||
@ -27,5 +27,10 @@ export const getLowerBaseModelName = (id: string, delimiter: string = '/'): stri
|
||||
if (baseModelName.endsWith(':free')) {
|
||||
return baseModelName.replace(':free', '')
|
||||
}
|
||||
|
||||
// for cherryin
|
||||
if (baseModelName.endsWith('(free)')) {
|
||||
return baseModelName.replace('(free)', '')
|
||||
}
|
||||
return baseModelName
|
||||
}
|
||||
|
||||
@ -91,23 +91,6 @@ function createIssueCard(issueData) {
|
||||
|
||||
return {
|
||||
elements: [
|
||||
{
|
||||
tag: 'div',
|
||||
text: {
|
||||
tag: 'lark_md',
|
||||
content: `**🐛 New GitHub Issue #${issueNumber}**`
|
||||
}
|
||||
},
|
||||
{
|
||||
tag: 'hr'
|
||||
},
|
||||
{
|
||||
tag: 'div',
|
||||
text: {
|
||||
tag: 'lark_md',
|
||||
content: `**📝 Title:** ${issueTitle}`
|
||||
}
|
||||
},
|
||||
{
|
||||
tag: 'div',
|
||||
text: {
|
||||
@ -158,7 +141,7 @@ function createIssueCard(issueData) {
|
||||
template: 'blue',
|
||||
title: {
|
||||
tag: 'plain_text',
|
||||
content: '🆕 Cherry Studio - New Issue'
|
||||
content: `#${issueNumber} - ${issueTitle}`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import { API_SERVER_DEFAULTS } from '@shared/config/constant'
|
||||
import type { ApiServerConfig } from '@types'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
|
||||
@ -6,9 +7,6 @@ import { reduxService } from '../services/ReduxService'
|
||||
|
||||
const logger = loggerService.withContext('ApiServerConfig')
|
||||
|
||||
const defaultHost = 'localhost'
|
||||
const defaultPort = 23333
|
||||
|
||||
class ConfigManager {
|
||||
private _config: ApiServerConfig | null = null
|
||||
|
||||
@ -30,8 +28,8 @@ class ConfigManager {
|
||||
}
|
||||
this._config = {
|
||||
enabled: serverSettings?.enabled ?? false,
|
||||
port: serverSettings?.port ?? defaultPort,
|
||||
host: defaultHost,
|
||||
port: serverSettings?.port ?? API_SERVER_DEFAULTS.PORT,
|
||||
host: serverSettings?.host ?? API_SERVER_DEFAULTS.HOST,
|
||||
apiKey: apiKey
|
||||
}
|
||||
return this._config
|
||||
@ -39,8 +37,8 @@ class ConfigManager {
|
||||
logger.warn('Failed to load config from Redux, using defaults', { error })
|
||||
this._config = {
|
||||
enabled: false,
|
||||
port: defaultPort,
|
||||
host: defaultHost,
|
||||
port: API_SERVER_DEFAULTS.PORT,
|
||||
host: API_SERVER_DEFAULTS.HOST,
|
||||
apiKey: this.generateApiKey()
|
||||
}
|
||||
return this._config
|
||||
|
||||
@ -20,8 +20,8 @@ const swaggerOptions: swaggerJSDoc.Options = {
|
||||
},
|
||||
servers: [
|
||||
{
|
||||
url: 'http://localhost:23333',
|
||||
description: 'Local development server'
|
||||
url: '/',
|
||||
description: 'Current server'
|
||||
}
|
||||
],
|
||||
components: {
|
||||
|
||||
@ -19,19 +19,9 @@ export default class EmbeddingsFactory {
|
||||
})
|
||||
}
|
||||
if (provider === 'ollama') {
|
||||
if (baseURL.includes('v1/')) {
|
||||
return new OllamaEmbeddings({
|
||||
model: model,
|
||||
baseUrl: baseURL.replace('v1/', ''),
|
||||
requestOptions: {
|
||||
// @ts-ignore expected
|
||||
'encoding-format': 'float'
|
||||
}
|
||||
})
|
||||
}
|
||||
return new OllamaEmbeddings({
|
||||
model: model,
|
||||
baseUrl: baseURL,
|
||||
baseUrl: baseURL.replace(/\/api$/, ''),
|
||||
requestOptions: {
|
||||
// @ts-ignore expected
|
||||
'encoding-format': 'float'
|
||||
|
||||
@ -189,7 +189,7 @@ export default class ModernAiProvider {
|
||||
config: ModernAiProviderConfig
|
||||
): Promise<CompletionsResult> {
|
||||
// ai-gateway不是image/generation 端点,所以就先不走legacy了
|
||||
if (config.isImageGenerationEndpoint && this.getActualProvider().id !== SystemProviderIds['ai-gateway']) {
|
||||
if (config.isImageGenerationEndpoint && this.getActualProvider().id !== SystemProviderIds.gateway) {
|
||||
// 使用 legacy 实现处理图像生成(支持图片编辑等高级功能)
|
||||
if (!config.uiMessages) {
|
||||
throw new Error('uiMessages is required for image generation endpoint')
|
||||
@ -480,7 +480,7 @@ export default class ModernAiProvider {
|
||||
|
||||
// 代理其他方法到原有实现
|
||||
public async models() {
|
||||
if (this.actualProvider.id === SystemProviderIds['ai-gateway']) {
|
||||
if (this.actualProvider.id === SystemProviderIds.gateway) {
|
||||
const formatModel = function (models: GatewayLanguageModelEntry[]): Model[] {
|
||||
return models.map((m) => ({
|
||||
id: m.id,
|
||||
|
||||
@ -9,6 +9,7 @@ import {
|
||||
import { REFERENCE_PROMPT } from '@renderer/config/prompts'
|
||||
import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio'
|
||||
import { getAssistantSettings } from '@renderer/services/AssistantService'
|
||||
import type { RootState } from '@renderer/store'
|
||||
import type {
|
||||
Assistant,
|
||||
GenerateImageParams,
|
||||
@ -245,23 +246,20 @@ export abstract class BaseApiClient<
|
||||
|
||||
protected getVerbosity(model?: Model): OpenAIVerbosity {
|
||||
try {
|
||||
const state = window.store?.getState()
|
||||
const state = window.store?.getState() as RootState
|
||||
const verbosity = state?.settings?.openAI?.verbosity
|
||||
|
||||
if (verbosity && ['low', 'medium', 'high'].includes(verbosity)) {
|
||||
// If model is provided, check if the verbosity is supported by the model
|
||||
if (model) {
|
||||
const supportedVerbosity = getModelSupportedVerbosity(model)
|
||||
// Use user's verbosity if supported, otherwise use the first supported option
|
||||
return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0]
|
||||
}
|
||||
return verbosity
|
||||
// If model is provided, check if the verbosity is supported by the model
|
||||
if (model) {
|
||||
const supportedVerbosity = getModelSupportedVerbosity(model)
|
||||
// Use user's verbosity if supported, otherwise use the first supported option
|
||||
return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0]
|
||||
}
|
||||
return verbosity
|
||||
} catch (error) {
|
||||
logger.warn('Failed to get verbosity from state:', error as Error)
|
||||
logger.warn('Failed to get verbosity from state. Fallback to undefined.', error as Error)
|
||||
return undefined
|
||||
}
|
||||
|
||||
return 'medium'
|
||||
}
|
||||
|
||||
protected getTimeout(model: Model) {
|
||||
|
||||
@ -32,7 +32,6 @@ import {
|
||||
isSupportedThinkingTokenModel,
|
||||
isSupportedThinkingTokenQwenModel,
|
||||
isSupportedThinkingTokenZhipuModel,
|
||||
isSupportVerbosityModel,
|
||||
isVisionModel,
|
||||
MODEL_SUPPORTED_REASONING_EFFORT,
|
||||
ZHIPU_RESULT_TOKENS
|
||||
@ -714,13 +713,8 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
||||
...modalities,
|
||||
// groq 有不同的 service tier 配置,不符合 openai 接口类型
|
||||
service_tier: this.getServiceTier(model) as OpenAIServiceTier,
|
||||
...(isSupportVerbosityModel(model)
|
||||
? {
|
||||
text: {
|
||||
verbosity: this.getVerbosity(model)
|
||||
}
|
||||
}
|
||||
: {}),
|
||||
// verbosity. getVerbosity ensures the returned value is valid.
|
||||
verbosity: this.getVerbosity(model),
|
||||
...this.getProviderSpecificParameters(assistant, model),
|
||||
...reasoningEffort,
|
||||
// ...getOpenAIWebSearchParams(model, enableWebSearch),
|
||||
|
||||
@ -11,7 +11,7 @@ import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||
import { getAssistantSettings } from '@renderer/services/AssistantService'
|
||||
import store from '@renderer/store'
|
||||
import type { SettingsState } from '@renderer/store/settings'
|
||||
import type { Assistant, GenerateImageParams, Model, Provider } from '@renderer/types'
|
||||
import { type Assistant, type GenerateImageParams, type Model, type Provider } from '@renderer/types'
|
||||
import type {
|
||||
OpenAIResponseSdkMessageParam,
|
||||
OpenAIResponseSdkParams,
|
||||
@ -25,7 +25,8 @@ import type {
|
||||
OpenAISdkRawOutput,
|
||||
ReasoningEffortOptionalParams
|
||||
} from '@renderer/types/sdk'
|
||||
import { formatApiHost } from '@renderer/utils/api'
|
||||
import { formatApiHost, withoutTrailingSlash } from '@renderer/utils/api'
|
||||
import { isOllamaProvider } from '@renderer/utils/provider'
|
||||
|
||||
import { BaseApiClient } from '../BaseApiClient'
|
||||
|
||||
@ -115,6 +116,34 @@ export abstract class OpenAIBaseClient<
|
||||
}))
|
||||
.filter(isSupportedModel)
|
||||
}
|
||||
|
||||
if (isOllamaProvider(this.provider)) {
|
||||
const baseUrl = withoutTrailingSlash(this.getBaseURL(false))
|
||||
.replace(/\/v1$/, '')
|
||||
.replace(/\/api$/, '')
|
||||
const response = await fetch(`${baseUrl}/api/tags`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${this.apiKey}`,
|
||||
...this.defaultHeaders(),
|
||||
...this.provider.extra_headers
|
||||
}
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Ollama server returned ${response.status} ${response.statusText}`)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
if (!data?.models || !Array.isArray(data.models)) {
|
||||
throw new Error('Invalid response from Ollama API: missing models array')
|
||||
}
|
||||
|
||||
return data.models.map((model) => ({
|
||||
id: model.name,
|
||||
object: 'model',
|
||||
owned_by: 'ollama'
|
||||
}))
|
||||
}
|
||||
const response = await sdk.models.list()
|
||||
if (this.provider.id === 'together') {
|
||||
// @ts-ignore key is not typed
|
||||
|
||||
@ -4,7 +4,7 @@ import { isGemini3Model, isSupportedThinkingTokenQwenModel } from '@renderer/con
|
||||
import type { MCPTool } from '@renderer/types'
|
||||
import { type Assistant, type Message, type Model, type Provider, SystemProviderIds } from '@renderer/types'
|
||||
import type { Chunk } from '@renderer/types/chunk'
|
||||
import { isSupportEnableThinkingProvider } from '@renderer/utils/provider'
|
||||
import { isOllamaProvider, isSupportEnableThinkingProvider } from '@renderer/utils/provider'
|
||||
import { openrouterReasoningMiddleware, skipGeminiThoughtSignatureMiddleware } from '@shared/middleware'
|
||||
import type { LanguageModelMiddleware } from 'ai'
|
||||
import { extractReasoningMiddleware, simulateStreamingMiddleware } from 'ai'
|
||||
@ -239,6 +239,7 @@ function addModelSpecificMiddlewares(builder: AiSdkMiddlewareBuilder, config: Ai
|
||||
// Use /think or /no_think suffix to control thinking mode
|
||||
if (
|
||||
config.provider &&
|
||||
!isOllamaProvider(config.provider) &&
|
||||
isSupportedThinkingTokenQwenModel(config.model) &&
|
||||
!isSupportEnableThinkingProvider(config.provider)
|
||||
) {
|
||||
|
||||
@ -11,12 +11,16 @@ import { vertex } from '@ai-sdk/google-vertex/edge'
|
||||
import { combineHeaders } from '@ai-sdk/provider-utils'
|
||||
import type { AnthropicSearchConfig, WebSearchPluginConfig } from '@cherrystudio/ai-core/built-in/plugins'
|
||||
import { isBaseProvider } from '@cherrystudio/ai-core/core/providers/schemas'
|
||||
import type { BaseProviderId } from '@cherrystudio/ai-core/provider'
|
||||
import { loggerService } from '@logger'
|
||||
import {
|
||||
isAnthropicModel,
|
||||
isFixedReasoningModel,
|
||||
isGeminiModel,
|
||||
isGenerateImageModel,
|
||||
isGrokModel,
|
||||
isOpenAIModel,
|
||||
isOpenRouterBuiltInWebSearchModel,
|
||||
isReasoningModel,
|
||||
isSupportedReasoningEffortModel,
|
||||
isSupportedThinkingTokenModel,
|
||||
isWebSearchModel
|
||||
@ -24,11 +28,12 @@ import {
|
||||
import { getDefaultModel } from '@renderer/services/AssistantService'
|
||||
import store from '@renderer/store'
|
||||
import type { CherryWebSearchConfig } from '@renderer/store/websearch'
|
||||
import { type Assistant, type MCPTool, type Provider } from '@renderer/types'
|
||||
import type { Model } from '@renderer/types'
|
||||
import { type Assistant, type MCPTool, type Provider, SystemProviderIds } from '@renderer/types'
|
||||
import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
|
||||
import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern'
|
||||
import { replacePromptVariables } from '@renderer/utils/prompt'
|
||||
import { isAwsBedrockProvider } from '@renderer/utils/provider'
|
||||
import { isAIGatewayProvider, isAwsBedrockProvider } from '@renderer/utils/provider'
|
||||
import type { ModelMessage, Tool } from 'ai'
|
||||
import { stepCountIs } from 'ai'
|
||||
|
||||
@ -43,6 +48,25 @@ const logger = loggerService.withContext('parameterBuilder')
|
||||
|
||||
type ProviderDefinedTool = Extract<Tool<any, any>, { type: 'provider-defined' }>
|
||||
|
||||
function mapVertexAIGatewayModelToProviderId(model: Model): BaseProviderId | undefined {
|
||||
if (isAnthropicModel(model)) {
|
||||
return 'anthropic'
|
||||
}
|
||||
if (isGeminiModel(model)) {
|
||||
return 'google'
|
||||
}
|
||||
if (isGrokModel(model)) {
|
||||
return 'xai'
|
||||
}
|
||||
if (isOpenAIModel(model)) {
|
||||
return 'openai'
|
||||
}
|
||||
logger.warn(
|
||||
`[mapVertexAIGatewayModelToProviderId] Unknown model type for AI Gateway: ${model.id}. Web search will not be enabled.`
|
||||
)
|
||||
return undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* 构建 AI SDK 流式参数
|
||||
* 这是主要的参数构建函数,整合所有转换逻辑
|
||||
@ -83,7 +107,7 @@ export async function buildStreamTextParams(
|
||||
const enableReasoning =
|
||||
((isSupportedThinkingTokenModel(model) || isSupportedReasoningEffortModel(model)) &&
|
||||
assistant.settings?.reasoning_effort !== undefined) ||
|
||||
(isReasoningModel(model) && (!isSupportedThinkingTokenModel(model) || !isSupportedReasoningEffortModel(model)))
|
||||
isFixedReasoningModel(model)
|
||||
|
||||
// 判断是否使用内置搜索
|
||||
// 条件:没有外部搜索提供商 && (用户开启了内置搜索 || 模型强制使用内置搜索)
|
||||
@ -117,6 +141,11 @@ export async function buildStreamTextParams(
|
||||
if (enableWebSearch) {
|
||||
if (isBaseProvider(aiSdkProviderId)) {
|
||||
webSearchPluginConfig = buildProviderBuiltinWebSearchConfig(aiSdkProviderId, webSearchConfig, model)
|
||||
} else if (isAIGatewayProvider(provider) || SystemProviderIds.gateway === provider.id) {
|
||||
const aiSdkProviderId = mapVertexAIGatewayModelToProviderId(model)
|
||||
if (aiSdkProviderId) {
|
||||
webSearchPluginConfig = buildProviderBuiltinWebSearchConfig(aiSdkProviderId, webSearchConfig, model)
|
||||
}
|
||||
}
|
||||
if (!tools) {
|
||||
tools = {}
|
||||
|
||||
@ -218,7 +218,6 @@ export async function prepareSpecialProviderConfig(
|
||||
...(config.options.headers ? config.options.headers : {}),
|
||||
'Content-Type': 'application/json',
|
||||
'anthropic-version': '2023-06-01',
|
||||
'anthropic-beta': 'oauth-2025-04-20',
|
||||
Authorization: `Bearer ${oauthToken}`
|
||||
},
|
||||
baseURL: 'https://api.anthropic.com/v1',
|
||||
|
||||
@ -27,7 +27,8 @@ vi.mock('@cherrystudio/ai-core/provider', async (importOriginal) => {
|
||||
'xai',
|
||||
'deepseek',
|
||||
'openrouter',
|
||||
'openai-compatible'
|
||||
'openai-compatible',
|
||||
'cherryin'
|
||||
]
|
||||
if (baseProviders.includes(id)) {
|
||||
return { success: true, data: id }
|
||||
@ -37,7 +38,15 @@ vi.mock('@cherrystudio/ai-core/provider', async (importOriginal) => {
|
||||
},
|
||||
customProviderIdSchema: {
|
||||
safeParse: vi.fn((id) => {
|
||||
const customProviders = ['google-vertex', 'google-vertex-anthropic', 'bedrock']
|
||||
const customProviders = [
|
||||
'google-vertex',
|
||||
'google-vertex-anthropic',
|
||||
'bedrock',
|
||||
'gateway',
|
||||
'aihubmix',
|
||||
'newapi',
|
||||
'ollama'
|
||||
]
|
||||
if (customProviders.includes(id)) {
|
||||
return { success: true, data: id }
|
||||
}
|
||||
@ -47,20 +56,7 @@ vi.mock('@cherrystudio/ai-core/provider', async (importOriginal) => {
|
||||
}
|
||||
})
|
||||
|
||||
vi.mock('../provider/factory', () => ({
|
||||
getAiSdkProviderId: vi.fn((provider) => {
|
||||
// Simulate the provider ID mapping
|
||||
const mapping: Record<string, string> = {
|
||||
[SystemProviderIds.gemini]: 'google',
|
||||
[SystemProviderIds.openai]: 'openai',
|
||||
[SystemProviderIds.anthropic]: 'anthropic',
|
||||
[SystemProviderIds.grok]: 'xai',
|
||||
[SystemProviderIds.deepseek]: 'deepseek',
|
||||
[SystemProviderIds.openrouter]: 'openrouter'
|
||||
}
|
||||
return mapping[provider.id] || provider.id
|
||||
})
|
||||
}))
|
||||
// Don't mock getAiSdkProviderId - use real implementation for more accurate tests
|
||||
|
||||
vi.mock('@renderer/config/models', async (importOriginal) => ({
|
||||
...(await importOriginal()),
|
||||
@ -179,8 +175,11 @@ describe('options utils', () => {
|
||||
provider: SystemProviderIds.openai
|
||||
} as Model
|
||||
|
||||
beforeEach(() => {
|
||||
beforeEach(async () => {
|
||||
vi.clearAllMocks()
|
||||
// Reset getCustomParameters to return empty object by default
|
||||
const { getCustomParameters } = await import('../reasoning')
|
||||
vi.mocked(getCustomParameters).mockReturnValue({})
|
||||
})
|
||||
|
||||
describe('buildProviderOptions', () => {
|
||||
@ -391,7 +390,6 @@ describe('options utils', () => {
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
expect(result.providerOptions).toHaveProperty('deepseek')
|
||||
expect(result.providerOptions.deepseek).toBeDefined()
|
||||
})
|
||||
@ -461,10 +459,14 @@ describe('options utils', () => {
|
||||
}
|
||||
)
|
||||
|
||||
expect(result.providerOptions.openai).toHaveProperty('custom_param')
|
||||
expect(result.providerOptions.openai.custom_param).toBe('custom_value')
|
||||
expect(result.providerOptions.openai).toHaveProperty('another_param')
|
||||
expect(result.providerOptions.openai.another_param).toBe(123)
|
||||
expect(result.providerOptions).toStrictEqual({
|
||||
openai: {
|
||||
custom_param: 'custom_value',
|
||||
another_param: 123,
|
||||
serviceTier: undefined,
|
||||
textVerbosity: undefined
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
it('should extract AI SDK standard params from custom parameters', async () => {
|
||||
@ -696,5 +698,459 @@ describe('options utils', () => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('AI Gateway provider', () => {
|
||||
const gatewayProvider: Provider = {
|
||||
id: SystemProviderIds.gateway,
|
||||
name: 'Vercel AI Gateway',
|
||||
type: 'gateway',
|
||||
apiKey: 'test-key',
|
||||
apiHost: 'https://gateway.vercel.com',
|
||||
isSystem: true
|
||||
} as Provider
|
||||
|
||||
it('should build OpenAI options for OpenAI models through gateway', () => {
|
||||
const openaiModel: Model = {
|
||||
id: 'openai/gpt-4',
|
||||
name: 'GPT-4',
|
||||
provider: SystemProviderIds.gateway
|
||||
} as Model
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, openaiModel, gatewayProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
expect(result.providerOptions).toHaveProperty('openai')
|
||||
expect(result.providerOptions.openai).toBeDefined()
|
||||
})
|
||||
|
||||
it('should build Anthropic options for Anthropic models through gateway', () => {
|
||||
const anthropicModel: Model = {
|
||||
id: 'anthropic/claude-3-5-sonnet-20241022',
|
||||
name: 'Claude 3.5 Sonnet',
|
||||
provider: SystemProviderIds.gateway
|
||||
} as Model
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, anthropicModel, gatewayProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
expect(result.providerOptions).toHaveProperty('anthropic')
|
||||
expect(result.providerOptions.anthropic).toBeDefined()
|
||||
})
|
||||
|
||||
it('should build Google options for Gemini models through gateway', () => {
|
||||
const geminiModel: Model = {
|
||||
id: 'google/gemini-2.0-flash-exp',
|
||||
name: 'Gemini 2.0 Flash',
|
||||
provider: SystemProviderIds.gateway
|
||||
} as Model
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, geminiModel, gatewayProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
expect(result.providerOptions).toHaveProperty('google')
|
||||
expect(result.providerOptions.google).toBeDefined()
|
||||
})
|
||||
|
||||
it('should build xAI options for Grok models through gateway', () => {
|
||||
const grokModel: Model = {
|
||||
id: 'xai/grok-2-latest',
|
||||
name: 'Grok 2',
|
||||
provider: SystemProviderIds.gateway
|
||||
} as Model
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, grokModel, gatewayProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
expect(result.providerOptions).toHaveProperty('xai')
|
||||
expect(result.providerOptions.xai).toBeDefined()
|
||||
})
|
||||
|
||||
it('should include reasoning parameters for Anthropic models when enabled', () => {
|
||||
const anthropicModel: Model = {
|
||||
id: 'anthropic/claude-3-5-sonnet-20241022',
|
||||
name: 'Claude 3.5 Sonnet',
|
||||
provider: SystemProviderIds.gateway
|
||||
} as Model
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, anthropicModel, gatewayProvider, {
|
||||
enableReasoning: true,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
expect(result.providerOptions.anthropic).toHaveProperty('thinking')
|
||||
expect(result.providerOptions.anthropic.thinking).toEqual({
|
||||
type: 'enabled',
|
||||
budgetTokens: 5000
|
||||
})
|
||||
})
|
||||
|
||||
it('should merge gateway routing options from custom parameters', async () => {
|
||||
const { getCustomParameters } = await import('../reasoning')
|
||||
|
||||
vi.mocked(getCustomParameters).mockReturnValue({
|
||||
gateway: {
|
||||
order: ['vertex', 'anthropic'],
|
||||
only: ['vertex', 'anthropic']
|
||||
}
|
||||
})
|
||||
|
||||
const anthropicModel: Model = {
|
||||
id: 'anthropic/claude-3-5-sonnet-20241022',
|
||||
name: 'Claude 3.5 Sonnet',
|
||||
provider: SystemProviderIds.gateway
|
||||
} as Model
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, anthropicModel, gatewayProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
// Should have both anthropic provider options and gateway routing options
|
||||
expect(result.providerOptions).toHaveProperty('anthropic')
|
||||
expect(result.providerOptions).toHaveProperty('gateway')
|
||||
expect(result.providerOptions.gateway).toEqual({
|
||||
order: ['vertex', 'anthropic'],
|
||||
only: ['vertex', 'anthropic']
|
||||
})
|
||||
})
|
||||
|
||||
it('should combine provider-specific options with gateway routing options', async () => {
|
||||
const { getCustomParameters } = await import('../reasoning')
|
||||
|
||||
vi.mocked(getCustomParameters).mockReturnValue({
|
||||
gateway: {
|
||||
order: ['openai', 'anthropic']
|
||||
}
|
||||
})
|
||||
|
||||
const openaiModel: Model = {
|
||||
id: 'openai/gpt-4',
|
||||
name: 'GPT-4',
|
||||
provider: SystemProviderIds.gateway
|
||||
} as Model
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, openaiModel, gatewayProvider, {
|
||||
enableReasoning: true,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
// Should have OpenAI provider options with reasoning
|
||||
expect(result.providerOptions.openai).toBeDefined()
|
||||
expect(result.providerOptions.openai).toHaveProperty('reasoningEffort')
|
||||
|
||||
// Should also have gateway routing options
|
||||
expect(result.providerOptions.gateway).toBeDefined()
|
||||
expect(result.providerOptions.gateway.order).toEqual(['openai', 'anthropic'])
|
||||
})
|
||||
|
||||
it('should build generic options for unknown model types through gateway', () => {
|
||||
const unknownModel: Model = {
|
||||
id: 'unknown-provider/model-name',
|
||||
name: 'Unknown Model',
|
||||
provider: SystemProviderIds.gateway
|
||||
} as Model
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, unknownModel, gatewayProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
expect(result.providerOptions).toHaveProperty('openai-compatible')
|
||||
expect(result.providerOptions['openai-compatible']).toBeDefined()
|
||||
})
|
||||
})
|
||||
|
||||
describe('Proxy provider custom parameters mapping', () => {
|
||||
it('should map cherryin provider ID to actual AI SDK provider ID (Google)', async () => {
|
||||
const { getCustomParameters } = await import('../reasoning')
|
||||
|
||||
// Mock Cherry In provider that uses Google SDK
|
||||
const cherryinProvider = {
|
||||
id: 'cherryin',
|
||||
name: 'Cherry In',
|
||||
type: 'gemini', // Using Google SDK
|
||||
apiKey: 'test-key',
|
||||
apiHost: 'https://cherryin.com',
|
||||
models: [] as Model[]
|
||||
} as Provider
|
||||
|
||||
const geminiModel: Model = {
|
||||
id: 'gemini-2.0-flash-exp',
|
||||
name: 'Gemini 2.0 Flash',
|
||||
provider: 'cherryin'
|
||||
} as Model
|
||||
|
||||
// User provides custom parameters with Cherry Studio provider ID
|
||||
vi.mocked(getCustomParameters).mockReturnValue({
|
||||
cherryin: {
|
||||
customOption1: 'value1',
|
||||
customOption2: 'value2'
|
||||
}
|
||||
})
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, geminiModel, cherryinProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
// Should map to 'google' AI SDK provider, not 'cherryin'
|
||||
expect(result.providerOptions).toHaveProperty('google')
|
||||
expect(result.providerOptions).not.toHaveProperty('cherryin')
|
||||
expect(result.providerOptions.google).toMatchObject({
|
||||
customOption1: 'value1',
|
||||
customOption2: 'value2'
|
||||
})
|
||||
})
|
||||
|
||||
it('should map cherryin provider ID to actual AI SDK provider ID (OpenAI)', async () => {
|
||||
const { getCustomParameters } = await import('../reasoning')
|
||||
|
||||
// Mock Cherry In provider that uses OpenAI SDK
|
||||
const cherryinProvider = {
|
||||
id: 'cherryin',
|
||||
name: 'Cherry In',
|
||||
type: 'openai-response', // Using OpenAI SDK
|
||||
apiKey: 'test-key',
|
||||
apiHost: 'https://cherryin.com',
|
||||
models: [] as Model[]
|
||||
} as Provider
|
||||
|
||||
const openaiModel: Model = {
|
||||
id: 'gpt-4',
|
||||
name: 'GPT-4',
|
||||
provider: 'cherryin'
|
||||
} as Model
|
||||
|
||||
// User provides custom parameters with Cherry Studio provider ID
|
||||
vi.mocked(getCustomParameters).mockReturnValue({
|
||||
cherryin: {
|
||||
customOpenAIOption: 'openai_value'
|
||||
}
|
||||
})
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, openaiModel, cherryinProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
// Should map to 'openai' AI SDK provider, not 'cherryin'
|
||||
expect(result.providerOptions).toHaveProperty('openai')
|
||||
expect(result.providerOptions).not.toHaveProperty('cherryin')
|
||||
expect(result.providerOptions.openai).toMatchObject({
|
||||
customOpenAIOption: 'openai_value'
|
||||
})
|
||||
})
|
||||
|
||||
it('should allow direct AI SDK provider ID in custom parameters', async () => {
|
||||
const { getCustomParameters } = await import('../reasoning')
|
||||
|
||||
const geminiProvider = {
|
||||
id: SystemProviderIds.gemini,
|
||||
name: 'Google',
|
||||
type: 'gemini',
|
||||
apiKey: 'test-key',
|
||||
apiHost: 'https://generativelanguage.googleapis.com',
|
||||
models: [] as Model[]
|
||||
} as Provider
|
||||
|
||||
const geminiModel: Model = {
|
||||
id: 'gemini-2.0-flash-exp',
|
||||
name: 'Gemini 2.0 Flash',
|
||||
provider: SystemProviderIds.gemini
|
||||
} as Model
|
||||
|
||||
// User provides custom parameters directly with AI SDK provider ID
|
||||
vi.mocked(getCustomParameters).mockReturnValue({
|
||||
google: {
|
||||
directGoogleOption: 'google_value'
|
||||
}
|
||||
})
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, geminiModel, geminiProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
// Should merge directly to 'google' provider
|
||||
expect(result.providerOptions.google).toMatchObject({
|
||||
directGoogleOption: 'google_value'
|
||||
})
|
||||
})
|
||||
|
||||
it('should map gateway provider custom parameters to actual AI SDK provider', async () => {
|
||||
const { getCustomParameters } = await import('../reasoning')
|
||||
|
||||
const gatewayProvider: Provider = {
|
||||
id: SystemProviderIds.gateway,
|
||||
name: 'Vercel AI Gateway',
|
||||
type: 'gateway',
|
||||
apiKey: 'test-key',
|
||||
apiHost: 'https://gateway.vercel.com',
|
||||
isSystem: true
|
||||
} as Provider
|
||||
|
||||
const anthropicModel: Model = {
|
||||
id: 'anthropic/claude-3-5-sonnet-20241022',
|
||||
name: 'Claude 3.5 Sonnet',
|
||||
provider: SystemProviderIds.gateway
|
||||
} as Model
|
||||
|
||||
// User provides both gateway routing options and gateway-scoped custom parameters
|
||||
vi.mocked(getCustomParameters).mockReturnValue({
|
||||
gateway: {
|
||||
order: ['vertex', 'anthropic'],
|
||||
only: ['vertex']
|
||||
},
|
||||
customParam: 'should_go_to_anthropic'
|
||||
})
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, anthropicModel, gatewayProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
// Gateway routing options should be preserved
|
||||
expect(result.providerOptions.gateway).toEqual({
|
||||
order: ['vertex', 'anthropic'],
|
||||
only: ['vertex']
|
||||
})
|
||||
|
||||
// Custom parameters should go to the actual AI SDK provider (anthropic)
|
||||
expect(result.providerOptions.anthropic).toMatchObject({
|
||||
customParam: 'should_go_to_anthropic'
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle mixed custom parameters (AI SDK provider ID + custom params)', async () => {
|
||||
const { getCustomParameters } = await import('../reasoning')
|
||||
|
||||
const openaiProvider: Provider = {
|
||||
id: SystemProviderIds.openai,
|
||||
name: 'OpenAI',
|
||||
type: 'openai-response',
|
||||
apiKey: 'test-key',
|
||||
apiHost: 'https://api.openai.com/v1',
|
||||
isSystem: true
|
||||
} as Provider
|
||||
|
||||
// User provides both direct AI SDK provider params and custom params
|
||||
vi.mocked(getCustomParameters).mockReturnValue({
|
||||
openai: {
|
||||
providerSpecific: 'value1'
|
||||
},
|
||||
customParam1: 'value2',
|
||||
customParam2: 123
|
||||
})
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, mockModel, openaiProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
// Should merge both into 'openai' provider options
|
||||
expect(result.providerOptions.openai).toMatchObject({
|
||||
providerSpecific: 'value1',
|
||||
customParam1: 'value2',
|
||||
customParam2: 123
|
||||
})
|
||||
})
|
||||
|
||||
// Note: For proxy providers like aihubmix/newapi, users should write AI SDK provider ID (google/anthropic)
|
||||
// instead of the Cherry Studio provider ID for custom parameters to work correctly
|
||||
|
||||
it('should handle cherryin fallback to openai-compatible with custom parameters', async () => {
|
||||
const { getCustomParameters } = await import('../reasoning')
|
||||
|
||||
// Mock cherryin provider that falls back to openai-compatible (default case)
|
||||
const cherryinProvider = {
|
||||
id: 'cherryin',
|
||||
name: 'Cherry In',
|
||||
type: 'openai',
|
||||
apiKey: 'test-key',
|
||||
apiHost: 'https://cherryin.com',
|
||||
models: [] as Model[]
|
||||
} as Provider
|
||||
|
||||
const testModel: Model = {
|
||||
id: 'some-model',
|
||||
name: 'Some Model',
|
||||
provider: 'cherryin'
|
||||
} as Model
|
||||
|
||||
// User provides custom parameters with cherryin provider ID
|
||||
vi.mocked(getCustomParameters).mockReturnValue({
|
||||
customCherryinOption: 'cherryin_value'
|
||||
})
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, testModel, cherryinProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
// When cherryin falls back to default case, it should use rawProviderId (cherryin)
|
||||
// User's cherryin params should merge with the provider options
|
||||
expect(result.providerOptions).toHaveProperty('cherryin')
|
||||
expect(result.providerOptions.cherryin).toMatchObject({
|
||||
customCherryinOption: 'cherryin_value'
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle cross-provider configurations', async () => {
|
||||
const { getCustomParameters } = await import('../reasoning')
|
||||
|
||||
const openaiProvider: Provider = {
|
||||
id: SystemProviderIds.openai,
|
||||
name: 'OpenAI',
|
||||
type: 'openai-response',
|
||||
apiKey: 'test-key',
|
||||
apiHost: 'https://api.openai.com/v1',
|
||||
isSystem: true
|
||||
} as Provider
|
||||
|
||||
// User provides parameters for multiple providers
|
||||
// In real usage, anthropic/google params would be treated as regular params for openai provider
|
||||
vi.mocked(getCustomParameters).mockReturnValue({
|
||||
openai: {
|
||||
openaiSpecific: 'openai_value'
|
||||
},
|
||||
customParam: 'value'
|
||||
})
|
||||
|
||||
const result = buildProviderOptions(mockAssistant, mockModel, openaiProvider, {
|
||||
enableReasoning: false,
|
||||
enableWebSearch: false,
|
||||
enableGenerateImage: false
|
||||
})
|
||||
|
||||
// Should have openai provider options with both scoped and custom params
|
||||
expect(result.providerOptions).toHaveProperty('openai')
|
||||
expect(result.providerOptions.openai).toMatchObject({
|
||||
openaiSpecific: 'openai_value',
|
||||
customParam: 'value'
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import type { BedrockProviderOptions } from '@ai-sdk/amazon-bedrock'
|
||||
import type { AnthropicProviderOptions } from '@ai-sdk/anthropic'
|
||||
import { type AnthropicProviderOptions } from '@ai-sdk/anthropic'
|
||||
import type { GoogleGenerativeAIProviderOptions } from '@ai-sdk/google'
|
||||
import type { OpenAIResponsesProviderOptions } from '@ai-sdk/openai'
|
||||
import type { XaiProviderOptions } from '@ai-sdk/xai'
|
||||
@ -7,6 +7,9 @@ import { baseProviderIdSchema, customProviderIdSchema } from '@cherrystudio/ai-c
|
||||
import { loggerService } from '@logger'
|
||||
import {
|
||||
getModelSupportedVerbosity,
|
||||
isAnthropicModel,
|
||||
isGeminiModel,
|
||||
isGrokModel,
|
||||
isOpenAIModel,
|
||||
isQwenMTModel,
|
||||
isSupportFlexServiceTierModel,
|
||||
@ -29,12 +32,14 @@ import {
|
||||
type OpenAIServiceTier,
|
||||
OpenAIServiceTiers,
|
||||
type Provider,
|
||||
type ServiceTier
|
||||
type ServiceTier,
|
||||
SystemProviderIds
|
||||
} from '@renderer/types'
|
||||
import { type AiSdkParam, isAiSdkParam, type OpenAIVerbosity } from '@renderer/types/aiCoreTypes'
|
||||
import { isSupportServiceTierProvider, isSupportVerbosityProvider } from '@renderer/utils/provider'
|
||||
import type { JSONValue } from 'ai'
|
||||
import { t } from 'i18next'
|
||||
import type { OllamaCompletionProviderOptions } from 'ollama-ai-provider-v2'
|
||||
|
||||
import { addAnthropicHeaders } from '../prepareParams/header'
|
||||
import { getAiSdkProviderId } from '../provider/factory'
|
||||
@ -156,8 +161,8 @@ export function buildProviderOptions(
|
||||
providerOptions: Record<string, Record<string, JSONValue>>
|
||||
standardParams: Partial<Record<AiSdkParam, any>>
|
||||
} {
|
||||
logger.debug('buildProviderOptions', { assistant, model, actualProvider, capabilities })
|
||||
const rawProviderId = getAiSdkProviderId(actualProvider)
|
||||
logger.debug('buildProviderOptions', { assistant, model, actualProvider, capabilities, rawProviderId })
|
||||
// 构建 provider 特定的选项
|
||||
let providerSpecificOptions: Record<string, any> = {}
|
||||
const serviceTier = getServiceTier(model, actualProvider)
|
||||
@ -172,14 +177,13 @@ export function buildProviderOptions(
|
||||
case 'azure':
|
||||
case 'azure-responses':
|
||||
{
|
||||
const options: OpenAIResponsesProviderOptions = buildOpenAIProviderOptions(
|
||||
providerSpecificOptions = buildOpenAIProviderOptions(
|
||||
assistant,
|
||||
model,
|
||||
capabilities,
|
||||
serviceTier,
|
||||
textVerbosity
|
||||
)
|
||||
providerSpecificOptions = options
|
||||
}
|
||||
break
|
||||
case 'anthropic':
|
||||
@ -197,10 +201,13 @@ export function buildProviderOptions(
|
||||
case 'openrouter':
|
||||
case 'openai-compatible': {
|
||||
// 对于其他 provider,使用通用的构建逻辑
|
||||
const genericOptions = buildGenericProviderOptions(rawProviderId, assistant, model, capabilities)
|
||||
providerSpecificOptions = {
|
||||
...buildGenericProviderOptions(assistant, model, capabilities),
|
||||
serviceTier,
|
||||
textVerbosity
|
||||
[rawProviderId]: {
|
||||
...genericOptions[rawProviderId],
|
||||
serviceTier,
|
||||
textVerbosity
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
@ -236,48 +243,108 @@ export function buildProviderOptions(
|
||||
case 'huggingface':
|
||||
providerSpecificOptions = buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier)
|
||||
break
|
||||
case SystemProviderIds.ollama:
|
||||
providerSpecificOptions = buildOllamaProviderOptions(assistant, capabilities)
|
||||
break
|
||||
case SystemProviderIds.gateway:
|
||||
providerSpecificOptions = buildAIGatewayOptions(assistant, model, capabilities, serviceTier, textVerbosity)
|
||||
break
|
||||
default:
|
||||
// 对于其他 provider,使用通用的构建逻辑
|
||||
providerSpecificOptions = buildGenericProviderOptions(rawProviderId, assistant, model, capabilities)
|
||||
// Merge serviceTier and textVerbosity
|
||||
providerSpecificOptions = {
|
||||
...buildGenericProviderOptions(assistant, model, capabilities),
|
||||
serviceTier,
|
||||
textVerbosity
|
||||
...providerSpecificOptions,
|
||||
[rawProviderId]: {
|
||||
...providerSpecificOptions[rawProviderId],
|
||||
serviceTier,
|
||||
textVerbosity
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
// 获取自定义参数并分离标准参数和 provider 特定参数
|
||||
logger.debug('Built providerSpecificOptions', { providerSpecificOptions })
|
||||
/**
|
||||
* Retrieve custom parameters and separate standard parameters from provider-specific parameters.
|
||||
*/
|
||||
const customParams = getCustomParameters(assistant)
|
||||
const { standardParams, providerParams } = extractAiSdkStandardParams(customParams)
|
||||
logger.debug('Extracted standardParams and providerParams', { standardParams, providerParams })
|
||||
|
||||
// 合并 provider 特定的自定义参数到 providerSpecificOptions
|
||||
providerSpecificOptions = {
|
||||
...providerSpecificOptions,
|
||||
...providerParams
|
||||
}
|
||||
|
||||
let rawProviderKey =
|
||||
{
|
||||
'google-vertex': 'google',
|
||||
'google-vertex-anthropic': 'anthropic',
|
||||
'azure-anthropic': 'anthropic',
|
||||
'ai-gateway': 'gateway',
|
||||
azure: 'openai',
|
||||
'azure-responses': 'openai'
|
||||
}[rawProviderId] || rawProviderId
|
||||
|
||||
if (rawProviderKey === 'cherryin') {
|
||||
rawProviderKey = { gemini: 'google', ['openai-response']: 'openai' }[actualProvider.type] || actualProvider.type
|
||||
/**
|
||||
* Get the actual AI SDK provider ID(s) from the already-built providerSpecificOptions.
|
||||
* For proxy providers (cherryin, aihubmix, newapi), this will be the actual SDK provider (e.g., 'google', 'openai', 'anthropic')
|
||||
* For regular providers, this will be the provider itself
|
||||
*/
|
||||
const actualAiSdkProviderIds = Object.keys(providerSpecificOptions)
|
||||
const primaryAiSdkProviderId = actualAiSdkProviderIds[0] // Use the first one as primary for non-scoped params
|
||||
|
||||
/**
|
||||
* Merge custom parameters into providerSpecificOptions.
|
||||
* Simple logic:
|
||||
* 1. If key is in actualAiSdkProviderIds → merge directly (user knows the actual AI SDK provider ID)
|
||||
* 2. If key == rawProviderId:
|
||||
* - If it's gateway/ollama → preserve (they need their own config for routing/options)
|
||||
* - Otherwise → map to primary (this is a proxy provider like cherryin)
|
||||
* 3. Otherwise → treat as regular parameter, merge to primary provider
|
||||
*
|
||||
* Example:
|
||||
* - User writes `cherryin: { opt: 'val' }` → mapped to `google: { opt: 'val' }` (case 2, proxy)
|
||||
* - User writes `gateway: { order: [...] }` → stays as `gateway: { order: [...] }` (case 2, routing config)
|
||||
* - User writes `google: { opt: 'val' }` → stays as `google: { opt: 'val' }` (case 1)
|
||||
* - User writes `customKey: 'val'` → merged to `google: { customKey: 'val' }` (case 3)
|
||||
*/
|
||||
for (const key of Object.keys(providerParams)) {
|
||||
if (actualAiSdkProviderIds.includes(key)) {
|
||||
// Case 1: Key is an actual AI SDK provider ID - merge directly
|
||||
providerSpecificOptions = {
|
||||
...providerSpecificOptions,
|
||||
[key]: {
|
||||
...providerSpecificOptions[key],
|
||||
...providerParams[key]
|
||||
}
|
||||
}
|
||||
} else if (key === rawProviderId && !actualAiSdkProviderIds.includes(rawProviderId)) {
|
||||
// Case 2: Key is the current provider (not in actualAiSdkProviderIds, so it's a proxy or special provider)
|
||||
// Gateway is special: it needs routing config preserved
|
||||
if (key === SystemProviderIds.gateway) {
|
||||
// Preserve gateway config for routing
|
||||
providerSpecificOptions = {
|
||||
...providerSpecificOptions,
|
||||
[key]: {
|
||||
...providerSpecificOptions[key],
|
||||
...providerParams[key]
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Proxy provider (cherryin, etc.) - map to actual AI SDK provider
|
||||
providerSpecificOptions = {
|
||||
...providerSpecificOptions,
|
||||
[primaryAiSdkProviderId]: {
|
||||
...providerSpecificOptions[primaryAiSdkProviderId],
|
||||
...providerParams[key]
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Case 3: Regular parameter - merge to primary provider
|
||||
providerSpecificOptions = {
|
||||
...providerSpecificOptions,
|
||||
[primaryAiSdkProviderId]: {
|
||||
...providerSpecificOptions[primaryAiSdkProviderId],
|
||||
[key]: providerParams[key]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
logger.debug('Final providerSpecificOptions after merging providerParams', { providerSpecificOptions })
|
||||
|
||||
// 返回 AI Core SDK 要求的格式:{ 'providerId': providerOptions } 以及提取的标准参数
|
||||
return {
|
||||
providerOptions: {
|
||||
[rawProviderKey]: providerSpecificOptions
|
||||
},
|
||||
providerOptions: providerSpecificOptions,
|
||||
standardParams
|
||||
}
|
||||
}
|
||||
@ -295,7 +362,7 @@ function buildOpenAIProviderOptions(
|
||||
},
|
||||
serviceTier: OpenAIServiceTier,
|
||||
textVerbosity?: OpenAIVerbosity
|
||||
): OpenAIResponsesProviderOptions {
|
||||
): Record<string, OpenAIResponsesProviderOptions> {
|
||||
const { enableReasoning } = capabilities
|
||||
let providerOptions: OpenAIResponsesProviderOptions = {}
|
||||
// OpenAI 推理参数
|
||||
@ -334,7 +401,9 @@ function buildOpenAIProviderOptions(
|
||||
textVerbosity
|
||||
}
|
||||
|
||||
return providerOptions
|
||||
return {
|
||||
openai: providerOptions
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -348,7 +417,7 @@ function buildAnthropicProviderOptions(
|
||||
enableWebSearch: boolean
|
||||
enableGenerateImage: boolean
|
||||
}
|
||||
): AnthropicProviderOptions {
|
||||
): Record<string, AnthropicProviderOptions> {
|
||||
const { enableReasoning } = capabilities
|
||||
let providerOptions: AnthropicProviderOptions = {}
|
||||
|
||||
@ -361,7 +430,11 @@ function buildAnthropicProviderOptions(
|
||||
}
|
||||
}
|
||||
|
||||
return providerOptions
|
||||
return {
|
||||
anthropic: {
|
||||
...providerOptions
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -375,7 +448,7 @@ function buildGeminiProviderOptions(
|
||||
enableWebSearch: boolean
|
||||
enableGenerateImage: boolean
|
||||
}
|
||||
): GoogleGenerativeAIProviderOptions {
|
||||
): Record<string, GoogleGenerativeAIProviderOptions> {
|
||||
const { enableReasoning, enableGenerateImage } = capabilities
|
||||
let providerOptions: GoogleGenerativeAIProviderOptions = {}
|
||||
|
||||
@ -395,7 +468,11 @@ function buildGeminiProviderOptions(
|
||||
}
|
||||
}
|
||||
|
||||
return providerOptions
|
||||
return {
|
||||
google: {
|
||||
...providerOptions
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function buildXAIProviderOptions(
|
||||
@ -406,7 +483,7 @@ function buildXAIProviderOptions(
|
||||
enableWebSearch: boolean
|
||||
enableGenerateImage: boolean
|
||||
}
|
||||
): XaiProviderOptions {
|
||||
): Record<string, XaiProviderOptions> {
|
||||
const { enableReasoning } = capabilities
|
||||
let providerOptions: Record<string, any> = {}
|
||||
|
||||
@ -418,7 +495,11 @@ function buildXAIProviderOptions(
|
||||
}
|
||||
}
|
||||
|
||||
return providerOptions
|
||||
return {
|
||||
xai: {
|
||||
...providerOptions
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function buildCherryInProviderOptions(
|
||||
@ -432,19 +513,20 @@ function buildCherryInProviderOptions(
|
||||
actualProvider: Provider,
|
||||
serviceTier: OpenAIServiceTier,
|
||||
textVerbosity: OpenAIVerbosity
|
||||
): OpenAIResponsesProviderOptions | AnthropicProviderOptions | GoogleGenerativeAIProviderOptions {
|
||||
): Record<string, OpenAIResponsesProviderOptions | AnthropicProviderOptions | GoogleGenerativeAIProviderOptions> {
|
||||
switch (actualProvider.type) {
|
||||
case 'openai':
|
||||
return buildGenericProviderOptions('cherryin', assistant, model, capabilities)
|
||||
case 'openai-response':
|
||||
return buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier, textVerbosity)
|
||||
|
||||
case 'anthropic':
|
||||
return buildAnthropicProviderOptions(assistant, model, capabilities)
|
||||
|
||||
case 'gemini':
|
||||
return buildGeminiProviderOptions(assistant, model, capabilities)
|
||||
|
||||
default:
|
||||
return buildGenericProviderOptions('cherryin', assistant, model, capabilities)
|
||||
}
|
||||
return {}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -458,7 +540,7 @@ function buildBedrockProviderOptions(
|
||||
enableWebSearch: boolean
|
||||
enableGenerateImage: boolean
|
||||
}
|
||||
): BedrockProviderOptions {
|
||||
): Record<string, BedrockProviderOptions> {
|
||||
const { enableReasoning } = capabilities
|
||||
let providerOptions: BedrockProviderOptions = {}
|
||||
|
||||
@ -475,13 +557,35 @@ function buildBedrockProviderOptions(
|
||||
providerOptions.anthropicBeta = betaHeaders
|
||||
}
|
||||
|
||||
return providerOptions
|
||||
return {
|
||||
bedrock: providerOptions
|
||||
}
|
||||
}
|
||||
|
||||
function buildOllamaProviderOptions(
|
||||
assistant: Assistant,
|
||||
capabilities: {
|
||||
enableReasoning: boolean
|
||||
enableWebSearch: boolean
|
||||
enableGenerateImage: boolean
|
||||
}
|
||||
): Record<string, OllamaCompletionProviderOptions> {
|
||||
const { enableReasoning } = capabilities
|
||||
const providerOptions: OllamaCompletionProviderOptions = {}
|
||||
const reasoningEffort = assistant.settings?.reasoning_effort
|
||||
if (enableReasoning) {
|
||||
providerOptions.think = !['none', undefined].includes(reasoningEffort)
|
||||
}
|
||||
return {
|
||||
ollama: providerOptions
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 构建通用的 providerOptions(用于其他 provider)
|
||||
*/
|
||||
function buildGenericProviderOptions(
|
||||
providerId: string,
|
||||
assistant: Assistant,
|
||||
model: Model,
|
||||
capabilities: {
|
||||
@ -524,5 +628,37 @@ function buildGenericProviderOptions(
|
||||
}
|
||||
}
|
||||
|
||||
return providerOptions
|
||||
return {
|
||||
[providerId]: providerOptions
|
||||
}
|
||||
}
|
||||
|
||||
function buildAIGatewayOptions(
|
||||
assistant: Assistant,
|
||||
model: Model,
|
||||
capabilities: {
|
||||
enableReasoning: boolean
|
||||
enableWebSearch: boolean
|
||||
enableGenerateImage: boolean
|
||||
},
|
||||
serviceTier: OpenAIServiceTier,
|
||||
textVerbosity?: OpenAIVerbosity
|
||||
): Record<
|
||||
string,
|
||||
| OpenAIResponsesProviderOptions
|
||||
| AnthropicProviderOptions
|
||||
| GoogleGenerativeAIProviderOptions
|
||||
| Record<string, unknown>
|
||||
> {
|
||||
if (isAnthropicModel(model)) {
|
||||
return buildAnthropicProviderOptions(assistant, model, capabilities)
|
||||
} else if (isOpenAIModel(model)) {
|
||||
return buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier, textVerbosity)
|
||||
} else if (isGeminiModel(model)) {
|
||||
return buildGeminiProviderOptions(assistant, model, capabilities)
|
||||
} else if (isGrokModel(model)) {
|
||||
return buildXAIProviderOptions(assistant, model, capabilities)
|
||||
} else {
|
||||
return buildGenericProviderOptions('openai-compatible', assistant, model, capabilities)
|
||||
}
|
||||
}
|
||||
|
||||
@ -250,9 +250,25 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
enable_thinking: true,
|
||||
incremental_output: true
|
||||
}
|
||||
// TODO: 支持 new-api类型
|
||||
case SystemProviderIds['new-api']:
|
||||
case SystemProviderIds.cherryin: {
|
||||
return {
|
||||
extra_body: {
|
||||
thinking: {
|
||||
type: 'enabled' // auto is invalid
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case SystemProviderIds.hunyuan:
|
||||
case SystemProviderIds['tencent-cloud-ti']:
|
||||
case SystemProviderIds.doubao:
|
||||
case SystemProviderIds.deepseek:
|
||||
case SystemProviderIds.aihubmix:
|
||||
case SystemProviderIds.sophnet:
|
||||
case SystemProviderIds.ppio:
|
||||
case SystemProviderIds.dmxapi:
|
||||
return {
|
||||
thinking: {
|
||||
type: 'enabled' // auto is invalid
|
||||
@ -274,8 +290,6 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
logger.warn(
|
||||
`Skipping thinking options for provider ${provider.name} as DeepSeek v3.1 thinking control method is unknown`
|
||||
)
|
||||
case SystemProviderIds.silicon:
|
||||
// specially handled before
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -264,9 +264,10 @@ export const CodeBlockView: React.FC<Props> = memo(({ children, language, onSave
|
||||
expanded={shouldExpand}
|
||||
wrapped={shouldWrap}
|
||||
maxHeight={`${MAX_COLLAPSED_CODE_HEIGHT}px`}
|
||||
onRequestExpand={codeCollapsible ? () => setExpandOverride(true) : undefined}
|
||||
/>
|
||||
),
|
||||
[children, codeEditor.enabled, handleHeightChange, language, onSave, shouldExpand, shouldWrap]
|
||||
[children, codeCollapsible, codeEditor.enabled, handleHeightChange, language, onSave, shouldExpand, shouldWrap]
|
||||
)
|
||||
|
||||
// 特殊视图组件映射
|
||||
|
||||
@ -64,7 +64,11 @@ exports[`CodeToolbar > basic rendering > should match snapshot with mixed tools
|
||||
data-title="code_block.more"
|
||||
>
|
||||
<div
|
||||
aria-expanded="false"
|
||||
aria-label="code_block.more"
|
||||
class="c2"
|
||||
role="button"
|
||||
tabindex="0"
|
||||
>
|
||||
<div
|
||||
class="tool-icon"
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import type { ActionTool } from '@renderer/components/ActionTools'
|
||||
import { Dropdown, Tooltip } from 'antd'
|
||||
import { memo, useMemo } from 'react'
|
||||
import { memo, useCallback, useMemo } from 'react'
|
||||
|
||||
import { ToolWrapper } from './styles'
|
||||
|
||||
@ -9,13 +9,30 @@ interface CodeToolButtonProps {
|
||||
}
|
||||
|
||||
const CodeToolButton = ({ tool }: CodeToolButtonProps) => {
|
||||
const handleKeyDown = useCallback(
|
||||
(e: React.KeyboardEvent<HTMLDivElement>) => {
|
||||
if (e.key === 'Enter' || e.key === ' ') {
|
||||
e.preventDefault()
|
||||
tool.onClick?.()
|
||||
}
|
||||
},
|
||||
[tool]
|
||||
)
|
||||
|
||||
const mainTool = useMemo(
|
||||
() => (
|
||||
<Tooltip key={tool.id} title={tool.tooltip} mouseEnterDelay={0.5} mouseLeaveDelay={0}>
|
||||
<ToolWrapper onClick={tool.onClick}>{tool.icon}</ToolWrapper>
|
||||
<ToolWrapper
|
||||
onClick={tool.onClick}
|
||||
onKeyDown={handleKeyDown}
|
||||
role="button"
|
||||
aria-label={tool.tooltip}
|
||||
tabIndex={0}>
|
||||
{tool.icon}
|
||||
</ToolWrapper>
|
||||
</Tooltip>
|
||||
),
|
||||
[tool]
|
||||
[tool, handleKeyDown]
|
||||
)
|
||||
|
||||
if (tool.children?.length && tool.children.length > 0) {
|
||||
|
||||
@ -40,7 +40,19 @@ const CodeToolbar = ({ tools }: { tools: ActionTool[] }) => {
|
||||
{quickToolButtons}
|
||||
{quickTools.length > 1 && (
|
||||
<Tooltip title={t('code_block.more')} mouseEnterDelay={0.5}>
|
||||
<ToolWrapper onClick={() => setShowQuickTools(!showQuickTools)} className={showQuickTools ? 'active' : ''}>
|
||||
<ToolWrapper
|
||||
onClick={() => setShowQuickTools(!showQuickTools)}
|
||||
onKeyDown={(e) => {
|
||||
if (e.key === 'Enter' || e.key === ' ') {
|
||||
e.preventDefault()
|
||||
setShowQuickTools(!showQuickTools)
|
||||
}
|
||||
}}
|
||||
className={showQuickTools ? 'active' : ''}
|
||||
role="button"
|
||||
aria-label={t('code_block.more')}
|
||||
aria-expanded={showQuickTools}
|
||||
tabIndex={0}>
|
||||
<EllipsisVertical className="tool-icon" />
|
||||
</ToolWrapper>
|
||||
</Tooltip>
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import { loggerService } from '@logger'
|
||||
import { useCodeStyle } from '@renderer/context/CodeStyleProvider'
|
||||
import { useCodeHighlight } from '@renderer/hooks/useCodeHighlight'
|
||||
import { useSettings } from '@renderer/hooks/useSettings'
|
||||
@ -9,6 +10,15 @@ import React, { memo, useCallback, useEffect, useLayoutEffect, useMemo, useRef }
|
||||
import type { ThemedToken } from 'shiki/core'
|
||||
import styled from 'styled-components'
|
||||
|
||||
const logger = loggerService.withContext('CodeViewer')
|
||||
|
||||
interface SavedSelection {
|
||||
startLine: number
|
||||
startOffset: number
|
||||
endLine: number
|
||||
endOffset: number
|
||||
}
|
||||
|
||||
interface CodeViewerProps {
|
||||
/** Code string value. */
|
||||
value: string
|
||||
@ -52,6 +62,10 @@ interface CodeViewerProps {
|
||||
* @default true
|
||||
*/
|
||||
wrapped?: boolean
|
||||
/**
|
||||
* Callback to request expansion when multi-line selection is detected.
|
||||
*/
|
||||
onRequestExpand?: () => void
|
||||
}
|
||||
|
||||
/**
|
||||
@ -70,13 +84,24 @@ const CodeViewer = ({
|
||||
fontSize: customFontSize,
|
||||
className,
|
||||
expanded = true,
|
||||
wrapped = true
|
||||
wrapped = true,
|
||||
onRequestExpand
|
||||
}: CodeViewerProps) => {
|
||||
const { codeShowLineNumbers: _lineNumbers, fontSize: _fontSize } = useSettings()
|
||||
const { getShikiPreProperties, isShikiThemeDark } = useCodeStyle()
|
||||
const shikiThemeRef = useRef<HTMLDivElement>(null)
|
||||
const scrollerRef = useRef<HTMLDivElement>(null)
|
||||
const callerId = useRef(`${Date.now()}-${uuid()}`).current
|
||||
const savedSelectionRef = useRef<SavedSelection | null>(null)
|
||||
// Ensure the active selection actually belongs to this CodeViewer instance
|
||||
const selectionBelongsToViewer = useCallback((sel: Selection | null) => {
|
||||
const scroller = scrollerRef.current
|
||||
if (!scroller || !sel || sel.rangeCount === 0) return false
|
||||
|
||||
// Check if selection intersects with scroller
|
||||
const range = sel.getRangeAt(0)
|
||||
return scroller.contains(range.commonAncestorContainer)
|
||||
}, [])
|
||||
|
||||
const fontSize = useMemo(() => customFontSize ?? _fontSize - 1, [customFontSize, _fontSize])
|
||||
const lineNumbers = useMemo(() => options?.lineNumbers ?? _lineNumbers, [options?.lineNumbers, _lineNumbers])
|
||||
@ -112,6 +137,204 @@ const CodeViewer = ({
|
||||
}
|
||||
}, [language, getShikiPreProperties, isShikiThemeDark, className])
|
||||
|
||||
// 保存当前选区的逻辑位置
|
||||
const saveSelection = useCallback((): SavedSelection | null => {
|
||||
const selection = window.getSelection()
|
||||
if (!selection || selection.rangeCount === 0 || selection.isCollapsed) {
|
||||
return null
|
||||
}
|
||||
|
||||
// Only capture selections within this viewer's scroller
|
||||
if (!selectionBelongsToViewer(selection)) {
|
||||
return null
|
||||
}
|
||||
|
||||
const range = selection.getRangeAt(0)
|
||||
const scroller = scrollerRef.current
|
||||
if (!scroller) return null
|
||||
|
||||
// 查找选区起始和结束位置对应的行号
|
||||
const findLineAndOffset = (node: Node, offset: number): { line: number; offset: number } | null => {
|
||||
// 向上查找包含 data-index 属性的元素
|
||||
let element = node.nodeType === Node.ELEMENT_NODE ? (node as Element) : node.parentElement
|
||||
|
||||
// 跳过行号元素,找到实际的行内容
|
||||
while (element) {
|
||||
if (element.classList?.contains('line-number')) {
|
||||
// 如果在行号上,移动到同级的 line-content
|
||||
const lineContainer = element.parentElement
|
||||
const lineContent = lineContainer?.querySelector('.line-content')
|
||||
if (lineContent) {
|
||||
element = lineContent as Element
|
||||
break
|
||||
}
|
||||
}
|
||||
if (element.hasAttribute('data-index')) {
|
||||
break
|
||||
}
|
||||
element = element.parentElement
|
||||
}
|
||||
|
||||
if (!element || !element.hasAttribute('data-index')) {
|
||||
logger.warn('Could not find data-index element', {
|
||||
nodeName: node.nodeName,
|
||||
nodeType: node.nodeType
|
||||
})
|
||||
return null
|
||||
}
|
||||
|
||||
const lineIndex = parseInt(element.getAttribute('data-index') || '0', 10)
|
||||
const lineContent = element.querySelector('.line-content') || element
|
||||
|
||||
// Calculate character offset within the line
|
||||
let charOffset = 0
|
||||
if (node.nodeType === Node.TEXT_NODE) {
|
||||
// 遍历该行的所有文本节点,找到当前节点的位置
|
||||
const walker = document.createTreeWalker(lineContent as Node, NodeFilter.SHOW_TEXT)
|
||||
let currentNode: Node | null
|
||||
while ((currentNode = walker.nextNode())) {
|
||||
if (currentNode === node) {
|
||||
charOffset += offset
|
||||
break
|
||||
}
|
||||
charOffset += currentNode.textContent?.length || 0
|
||||
}
|
||||
} else if (node.nodeType === Node.ELEMENT_NODE) {
|
||||
// 如果是元素节点,计算之前所有文本的长度
|
||||
const textBefore = (node as Element).textContent?.slice(0, offset) || ''
|
||||
charOffset = textBefore.length
|
||||
}
|
||||
|
||||
logger.debug('findLineAndOffset result', {
|
||||
lineIndex,
|
||||
charOffset
|
||||
})
|
||||
|
||||
return { line: lineIndex, offset: charOffset }
|
||||
}
|
||||
|
||||
const start = findLineAndOffset(range.startContainer, range.startOffset)
|
||||
const end = findLineAndOffset(range.endContainer, range.endOffset)
|
||||
|
||||
if (!start || !end) {
|
||||
logger.warn('saveSelection failed', {
|
||||
hasStart: !!start,
|
||||
hasEnd: !!end
|
||||
})
|
||||
return null
|
||||
}
|
||||
|
||||
logger.debug('saveSelection success', {
|
||||
startLine: start.line,
|
||||
startOffset: start.offset,
|
||||
endLine: end.line,
|
||||
endOffset: end.offset
|
||||
})
|
||||
|
||||
return {
|
||||
startLine: start.line,
|
||||
startOffset: start.offset,
|
||||
endLine: end.line,
|
||||
endOffset: end.offset
|
||||
}
|
||||
}, [selectionBelongsToViewer])
|
||||
|
||||
// 滚动事件处理:保存选择用于复制,但不恢复(避免选择高亮问题)
|
||||
const handleScroll = useCallback(() => {
|
||||
// 只保存选择状态用于复制,不在滚动时恢复选择
|
||||
const saved = saveSelection()
|
||||
if (saved) {
|
||||
savedSelectionRef.current = saved
|
||||
logger.debug('Selection saved for copy', {
|
||||
startLine: saved.startLine,
|
||||
endLine: saved.endLine
|
||||
})
|
||||
}
|
||||
}, [saveSelection])
|
||||
|
||||
// 处理复制事件,确保跨虚拟滚动的复制能获取完整内容
|
||||
const handleCopy = useCallback(
|
||||
(event: ClipboardEvent) => {
|
||||
const selection = window.getSelection()
|
||||
// Ignore copies for selections outside this viewer
|
||||
if (!selectionBelongsToViewer(selection)) {
|
||||
return
|
||||
}
|
||||
if (!selection || selection.rangeCount === 0 || selection.isCollapsed) {
|
||||
return
|
||||
}
|
||||
|
||||
// Prefer saved selection from scroll, otherwise get it in real-time
|
||||
let saved = savedSelectionRef.current
|
||||
if (!saved) {
|
||||
saved = saveSelection()
|
||||
}
|
||||
|
||||
if (!saved) {
|
||||
logger.warn('Cannot get selection, using browser default')
|
||||
return
|
||||
}
|
||||
|
||||
const { startLine, startOffset, endLine, endOffset } = saved
|
||||
|
||||
// Always use custom copy in collapsed state to handle virtual scroll edge cases
|
||||
const needsCustomCopy = !expanded
|
||||
|
||||
logger.debug('Copy event', {
|
||||
startLine,
|
||||
endLine,
|
||||
startOffset,
|
||||
endOffset,
|
||||
expanded,
|
||||
needsCustomCopy,
|
||||
usedSavedSelection: !!savedSelectionRef.current
|
||||
})
|
||||
|
||||
if (needsCustomCopy) {
|
||||
try {
|
||||
const selectedLines: string[] = []
|
||||
|
||||
for (let i = startLine; i <= endLine; i++) {
|
||||
const line = rawLines[i] || ''
|
||||
|
||||
if (i === startLine && i === endLine) {
|
||||
// 单行选择
|
||||
selectedLines.push(line.slice(startOffset, endOffset))
|
||||
} else if (i === startLine) {
|
||||
// 第一行,从 startOffset 到行尾
|
||||
selectedLines.push(line.slice(startOffset))
|
||||
} else if (i === endLine) {
|
||||
// 最后一行,从行首到 endOffset
|
||||
selectedLines.push(line.slice(0, endOffset))
|
||||
} else {
|
||||
// 中间的完整行
|
||||
selectedLines.push(line)
|
||||
}
|
||||
}
|
||||
|
||||
const fullText = selectedLines.join('\n')
|
||||
|
||||
logger.debug('Custom copy success', {
|
||||
linesCount: selectedLines.length,
|
||||
totalLength: fullText.length,
|
||||
firstLine: selectedLines[0]?.slice(0, 30),
|
||||
lastLine: selectedLines[selectedLines.length - 1]?.slice(0, 30)
|
||||
})
|
||||
|
||||
if (!event.clipboardData) {
|
||||
logger.warn('clipboardData unavailable, using browser default copy')
|
||||
return
|
||||
}
|
||||
event.clipboardData.setData('text/plain', fullText)
|
||||
event.preventDefault()
|
||||
} catch (error) {
|
||||
logger.error('Custom copy failed', { error })
|
||||
}
|
||||
}
|
||||
},
|
||||
[selectionBelongsToViewer, expanded, saveSelection, rawLines]
|
||||
)
|
||||
|
||||
// Virtualizer 配置
|
||||
const getScrollElement = useCallback(() => scrollerRef.current, [])
|
||||
const getItemKey = useCallback((index: number) => `${callerId}-${index}`, [callerId])
|
||||
@ -147,6 +370,58 @@ const CodeViewer = ({
|
||||
}
|
||||
}, [virtualItems, debouncedHighlightLines])
|
||||
|
||||
// Monitor selection changes, clear stale selection state, and auto-expand in collapsed state
|
||||
const handleSelectionChange = useMemo(
|
||||
() =>
|
||||
debounce(() => {
|
||||
const selection = window.getSelection()
|
||||
|
||||
// No valid selection: clear and return
|
||||
if (!selection || selection.rangeCount === 0 || selection.isCollapsed) {
|
||||
savedSelectionRef.current = null
|
||||
return
|
||||
}
|
||||
|
||||
// Only handle selections within this CodeViewer
|
||||
if (!selectionBelongsToViewer(selection)) {
|
||||
savedSelectionRef.current = null
|
||||
return
|
||||
}
|
||||
|
||||
// In collapsed state, detect multi-line selection and request expand
|
||||
if (!expanded && onRequestExpand) {
|
||||
const saved = saveSelection()
|
||||
if (saved && saved.endLine > saved.startLine) {
|
||||
logger.debug('Multi-line selection detected in collapsed state, requesting expand', {
|
||||
startLine: saved.startLine,
|
||||
endLine: saved.endLine
|
||||
})
|
||||
onRequestExpand()
|
||||
}
|
||||
}
|
||||
}, 100),
|
||||
[expanded, onRequestExpand, saveSelection, selectionBelongsToViewer]
|
||||
)
|
||||
|
||||
useEffect(() => {
|
||||
document.addEventListener('selectionchange', handleSelectionChange)
|
||||
return () => {
|
||||
document.removeEventListener('selectionchange', handleSelectionChange)
|
||||
handleSelectionChange.cancel()
|
||||
}
|
||||
}, [handleSelectionChange])
|
||||
|
||||
// Listen for copy events
|
||||
useEffect(() => {
|
||||
const scroller = scrollerRef.current
|
||||
if (!scroller) return
|
||||
|
||||
scroller.addEventListener('copy', handleCopy as EventListener)
|
||||
return () => {
|
||||
scroller.removeEventListener('copy', handleCopy as EventListener)
|
||||
}
|
||||
}, [handleCopy])
|
||||
|
||||
// Report scrollHeight when it might change
|
||||
useLayoutEffect(() => {
|
||||
onHeightChange?.(scrollerRef.current?.scrollHeight ?? 0)
|
||||
@ -160,6 +435,7 @@ const CodeViewer = ({
|
||||
$wrap={wrapped}
|
||||
$expand={expanded}
|
||||
$lineHeight={estimateSize()}
|
||||
onScroll={handleScroll}
|
||||
style={
|
||||
{
|
||||
'--gutter-width': `${gutterDigits}ch`,
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
import { Tooltip } from 'antd'
|
||||
import { Copy } from 'lucide-react'
|
||||
import type { FC } from 'react'
|
||||
import type { FC, KeyboardEvent } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
import styled from 'styled-components'
|
||||
|
||||
@ -39,8 +39,24 @@ const CopyButton: FC<CopyButtonProps> = ({
|
||||
})
|
||||
}
|
||||
|
||||
const handleKeyDown = (e: KeyboardEvent<HTMLDivElement>) => {
|
||||
if (e.key === 'Enter' || e.key === ' ') {
|
||||
e.preventDefault()
|
||||
handleCopy()
|
||||
}
|
||||
}
|
||||
|
||||
const ariaLabel = tooltip || t('common.copy')
|
||||
|
||||
const button = (
|
||||
<ButtonContainer $color={color} $hoverColor={hoverColor} onClick={handleCopy}>
|
||||
<ButtonContainer
|
||||
$color={color}
|
||||
$hoverColor={hoverColor}
|
||||
onClick={handleCopy}
|
||||
onKeyDown={handleKeyDown}
|
||||
role="button"
|
||||
aria-label={ariaLabel}
|
||||
tabIndex={0}>
|
||||
<Copy size={size} className="copy-icon" />
|
||||
{label && <RightText size={size}>{label}</RightText>}
|
||||
</ButtonContainer>
|
||||
|
||||
@ -171,7 +171,9 @@ export const Toolbar: React.FC<ToolbarProps> = ({ editor, formattingState, onCom
|
||||
data-active={isActive}
|
||||
disabled={isDisabled}
|
||||
onClick={() => handleCommand(command)}
|
||||
data-testid={`toolbar-${command}`}>
|
||||
data-testid={`toolbar-${command}`}
|
||||
aria-label={tooltipText}
|
||||
aria-pressed={isActive}>
|
||||
<Icon color={isActive ? 'var(--color-primary)' : 'var(--color-text)'} />
|
||||
</ToolbarButton>
|
||||
)
|
||||
|
||||
@ -86,7 +86,7 @@ const WindowControls: React.FC = () => {
|
||||
return (
|
||||
<WindowControlsContainer>
|
||||
<Tooltip title={t('navbar.window.minimize')} placement="bottom" mouseEnterDelay={DEFAULT_DELAY}>
|
||||
<ControlButton onClick={handleMinimize} aria-label="Minimize">
|
||||
<ControlButton onClick={handleMinimize} aria-label={t('navbar.window.minimize')}>
|
||||
<Minus size={14} />
|
||||
</ControlButton>
|
||||
</Tooltip>
|
||||
@ -94,12 +94,14 @@ const WindowControls: React.FC = () => {
|
||||
title={isMaximized ? t('navbar.window.restore') : t('navbar.window.maximize')}
|
||||
placement="bottom"
|
||||
mouseEnterDelay={DEFAULT_DELAY}>
|
||||
<ControlButton onClick={handleMaximize} aria-label={isMaximized ? 'Restore' : 'Maximize'}>
|
||||
<ControlButton
|
||||
onClick={handleMaximize}
|
||||
aria-label={isMaximized ? t('navbar.window.restore') : t('navbar.window.maximize')}>
|
||||
{isMaximized ? <WindowRestoreIcon size={14} /> : <Square size={14} />}
|
||||
</ControlButton>
|
||||
</Tooltip>
|
||||
<Tooltip title={t('navbar.window.close')} placement="bottom" mouseEnterDelay={DEFAULT_DELAY}>
|
||||
<ControlButton $isClose onClick={handleClose} aria-label="Close">
|
||||
<ControlButton $isClose onClick={handleClose} aria-label={t('navbar.window.close')}>
|
||||
<X size={17} />
|
||||
</ControlButton>
|
||||
</Tooltip>
|
||||
|
||||
@ -12,6 +12,7 @@ import {
|
||||
isDeepSeekHybridInferenceModel,
|
||||
isDoubaoSeedAfter251015,
|
||||
isDoubaoThinkingAutoModel,
|
||||
isFixedReasoningModel,
|
||||
isGeminiReasoningModel,
|
||||
isGrok4FastReasoningModel,
|
||||
isHunyuanReasoningModel,
|
||||
@ -356,6 +357,10 @@ describe('DeepSeek & Thinking Tokens', () => {
|
||||
)
|
||||
).toBe(true)
|
||||
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v2' }))).toBe(false)
|
||||
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v3.2' }))).toBe(true)
|
||||
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'agent/deepseek-v3.2' }))).toBe(true)
|
||||
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-chat' }))).toBe(true)
|
||||
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v3.2-speciale' }))).toBe(false)
|
||||
|
||||
const allowed = createModel({ id: 'deepseek-v3.1', provider: 'doubao' })
|
||||
expect(isSupportedThinkingTokenModel(allowed)).toBe(true)
|
||||
@ -364,6 +369,37 @@ describe('DeepSeek & Thinking Tokens', () => {
|
||||
expect(isSupportedThinkingTokenModel(disallowed)).toBe(false)
|
||||
})
|
||||
|
||||
it('supports DeepSeek v3.1+ models from newly added providers', () => {
|
||||
// Test newly added providers for DeepSeek thinking token support
|
||||
const newProviders = ['deepseek', 'cherryin', 'new-api', 'aihubmix', 'sophnet', 'dmxapi']
|
||||
|
||||
newProviders.forEach((provider) => {
|
||||
const model = createModel({ id: 'deepseek-v3.1', provider })
|
||||
expect(
|
||||
isSupportedThinkingTokenModel(model),
|
||||
`Provider ${provider} should support thinking tokens for deepseek-v3.1`
|
||||
).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
it('tests various prefix patterns for isDeepSeekHybridInferenceModel', () => {
|
||||
// Test with custom prefixes
|
||||
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'custom-deepseek-v3.2' }))).toBe(true)
|
||||
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'prefix-deepseek-v3.1' }))).toBe(true)
|
||||
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'agent/deepseek-v3.2' }))).toBe(true)
|
||||
|
||||
// Test that speciale is properly excluded
|
||||
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'custom-deepseek-v3.2-speciale' }))).toBe(false)
|
||||
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'agent/deepseek-v3.2-speciale' }))).toBe(false)
|
||||
|
||||
// Test basic deepseek-chat
|
||||
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-chat' }))).toBe(true)
|
||||
|
||||
// Test version variations
|
||||
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v3.1.2' }))).toBe(true)
|
||||
expect(isDeepSeekHybridInferenceModel(createModel({ id: 'deepseek-v3-1' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('supports Gemini thinking models while filtering image variants', () => {
|
||||
expect(isSupportedThinkingTokenModel(createModel({ id: 'gemini-2.5-flash-latest' }))).toBe(true)
|
||||
expect(isSupportedThinkingTokenModel(createModel({ id: 'gemini-2.5-flash-image' }))).toBe(false)
|
||||
@ -535,6 +571,41 @@ describe('isReasoningModel', () => {
|
||||
const magistral = createModel({ id: 'magistral-reasoning' })
|
||||
expect(isReasoningModel(magistral)).toBe(true)
|
||||
})
|
||||
|
||||
it('identifies fixed reasoning models', () => {
|
||||
const models = [
|
||||
'deepseek-reasoner',
|
||||
'o1-preview',
|
||||
'o1-mini',
|
||||
'qwq-32b-preview',
|
||||
'step-3-minimax',
|
||||
'generic-reasoning-model',
|
||||
'some-random-model-thinking',
|
||||
'some-random-model-think',
|
||||
'deepseek-v3.2-speciale'
|
||||
]
|
||||
|
||||
models.forEach((id) => {
|
||||
const model = createModel({ id })
|
||||
expect(isFixedReasoningModel(model), `Model ${id} should be reasoning`).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
it('excludes non-fixed reasoning models from isFixedReasoningModel', () => {
|
||||
// Models that support thinking tokens or reasoning effort should NOT be fixed reasoning models
|
||||
const nonFixedModels = [
|
||||
{ id: 'deepseek-v3.2', provider: 'deepseek' }, // Supports thinking tokens
|
||||
{ id: 'deepseek-chat', provider: 'deepseek' }, // Supports thinking tokens
|
||||
{ id: 'claude-3-opus-20240229', provider: 'anthropic' }, // Supports thinking tokens via extended_thinking
|
||||
{ id: 'gpt-4o', provider: 'openai' }, // Not a reasoning model at all
|
||||
{ id: 'gpt-4', provider: 'openai' } // Not a reasoning model at all
|
||||
]
|
||||
|
||||
nonFixedModels.forEach(({ id, provider }) => {
|
||||
const model = createModel({ id, provider })
|
||||
expect(isFixedReasoningModel(model), `Model ${id} should NOT be fixed reasoning`).toBe(false)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('Thinking model classification', () => {
|
||||
|
||||
@ -140,12 +140,8 @@ describe('isFunctionCallingModel', () => {
|
||||
|
||||
it('excludes explicitly blocked ids', () => {
|
||||
expect(isFunctionCallingModel(createModel({ id: 'gemini-1.5-flash' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('forces support for trusted providers', () => {
|
||||
for (const provider of ['deepseek', 'anthropic', 'kimi', 'moonshot']) {
|
||||
expect(isFunctionCallingModel(createModel({ provider }))).toBe(true)
|
||||
}
|
||||
expect(isFunctionCallingModel(createModel({ id: 'deepseek-v3.2-speciale' }))).toBe(false)
|
||||
expect(isFunctionCallingModel(createModel({ id: 'deepseek/deepseek-v3.2-speciale' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('returns true when identified as deepseek hybrid inference model', () => {
|
||||
@ -157,4 +153,19 @@ describe('isFunctionCallingModel', () => {
|
||||
deepSeekHybridMock.mockReturnValueOnce(true)
|
||||
expect(isFunctionCallingModel(createModel({ id: 'deepseek-v3-1', provider: 'dashscope' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('supports anthropic models through claude regex match', () => {
|
||||
expect(isFunctionCallingModel(createModel({ id: 'claude-3-5-sonnet', provider: 'anthropic' }))).toBe(true)
|
||||
expect(isFunctionCallingModel(createModel({ id: 'claude-3-opus', provider: 'anthropic' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('supports kimi models through kimi-k2 regex match', () => {
|
||||
expect(isFunctionCallingModel(createModel({ id: 'kimi-k2-0711-preview', provider: 'moonshot' }))).toBe(true)
|
||||
expect(isFunctionCallingModel(createModel({ id: 'kimi-k2', provider: 'kimi' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('supports deepseek models through deepseek regex match', () => {
|
||||
expect(isFunctionCallingModel(createModel({ id: 'deepseek-chat', provider: 'deepseek' }))).toBe(true)
|
||||
expect(isFunctionCallingModel(createModel({ id: 'deepseek-coder', provider: 'deepseek' }))).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
@ -252,18 +252,22 @@ describe('model utils', () => {
|
||||
|
||||
describe('getModelSupportedVerbosity', () => {
|
||||
it('returns only "high" for GPT-5 Pro models', () => {
|
||||
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, 'high'])
|
||||
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([undefined, 'high'])
|
||||
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, null, 'high'])
|
||||
expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([
|
||||
undefined,
|
||||
null,
|
||||
'high'
|
||||
])
|
||||
})
|
||||
|
||||
it('returns all levels for non-Pro GPT-5 models', () => {
|
||||
const previewModel = createModel({ id: 'gpt-5-preview' })
|
||||
expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, 'low', 'medium', 'high'])
|
||||
expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, null, 'low', 'medium', 'high'])
|
||||
})
|
||||
|
||||
it('returns all levels for GPT-5.1 models', () => {
|
||||
const gpt51Model = createModel({ id: 'gpt-5.1-preview' })
|
||||
expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, 'low', 'medium', 'high'])
|
||||
expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, null, 'low', 'medium', 'high'])
|
||||
})
|
||||
|
||||
it('returns only undefined for non-GPT-5 models', () => {
|
||||
|
||||
@ -1853,7 +1853,7 @@ export const SYSTEM_MODELS: Record<SystemProviderId | 'defaultModel', Model[]> =
|
||||
}
|
||||
],
|
||||
huggingface: [],
|
||||
'ai-gateway': [],
|
||||
gateway: [],
|
||||
cerebras: [
|
||||
{
|
||||
id: 'gpt-oss-120b',
|
||||
|
||||
@ -21,7 +21,7 @@ import { isTextToImageModel } from './vision'
|
||||
|
||||
// Reasoning models
|
||||
export const REASONING_REGEX =
|
||||
/^(?!.*-non-reasoning\b)(o\d+(?:-[\w-]+)?|.*\b(?:reasoning|reasoner|thinking)\b.*|.*-[rR]\d+.*|.*\bqwq(?:-[\w-]+)?\b.*|.*\bhunyuan-t1(?:-[\w-]+)?\b.*|.*\bglm-zero-preview\b.*|.*\bgrok-(?:3-mini|4|4-fast)(?:-[\w-]+)?\b.*)$/i
|
||||
/^(?!.*-non-reasoning\b)(o\d+(?:-[\w-]+)?|.*\b(?:reasoning|reasoner|thinking|think)\b.*|.*-[rR]\d+.*|.*\bqwq(?:-[\w-]+)?\b.*|.*\bhunyuan-t1(?:-[\w-]+)?\b.*|.*\bglm-zero-preview\b.*|.*\bgrok-(?:3-mini|4|4-fast)(?:-[\w-]+)?\b.*)$/i
|
||||
|
||||
// 模型类型到支持的reasoning_effort的映射表
|
||||
// TODO: refactor this. too many identical options
|
||||
@ -161,7 +161,13 @@ function _isSupportedThinkingTokenModel(model: Model): boolean {
|
||||
'nvidia',
|
||||
'ppio',
|
||||
'hunyuan',
|
||||
'tencent-cloud-ti'
|
||||
'tencent-cloud-ti',
|
||||
'deepseek',
|
||||
'cherryin',
|
||||
'new-api',
|
||||
'aihubmix',
|
||||
'sophnet',
|
||||
'dmxapi'
|
||||
] satisfies SystemProviderId[]
|
||||
).some((id) => id === model.provider)
|
||||
}
|
||||
@ -462,15 +468,19 @@ export const isSupportedThinkingTokenZhipuModel = (model: Model): boolean => {
|
||||
export const isDeepSeekHybridInferenceModel = (model: Model) => {
|
||||
const { idResult, nameResult } = withModelIdAndNameAsId(model, (model) => {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
// deepseek官方使用chat和reasoner做推理控制,其他provider需要单独判断,id可能会有所差别
|
||||
// openrouter: deepseek/deepseek-chat-v3.1 不知道会不会有其他provider仿照ds官方分出一个同id的作为非思考模式的模型,这里有风险
|
||||
// 这里假定所有deepseek-chat都是deepseek-v3.2
|
||||
// Matches: "deepseek-v3" followed by ".digit" or "-digit".
|
||||
// Optionally, this can be followed by ".alphanumeric_sequence" or "-alphanumeric_sequence"
|
||||
// until the end of the string.
|
||||
// Examples: deepseek-v3.1, deepseek-v3-1, deepseek-v3.1.2, deepseek-v3.1-alpha
|
||||
// Does NOT match: deepseek-v3.123 (missing separator after '1'), deepseek-v3.x (x isn't a digit)
|
||||
// TODO: move to utils and add test cases
|
||||
return /deepseek-v3(?:\.\d|-\d)(?:(\.|-)\w+)?$/.test(modelId) || modelId.includes('deepseek-chat-v3.1')
|
||||
return (
|
||||
/(\w+-)?deepseek-v3(?:\.\d|-\d)(?:(\.|-)(?!speciale$)\w+)?$/.test(modelId) ||
|
||||
modelId.includes('deepseek-chat-v3.1') ||
|
||||
modelId.includes('deepseek-chat')
|
||||
)
|
||||
})
|
||||
return idResult || nameResult
|
||||
}
|
||||
@ -545,7 +555,8 @@ export function isReasoningModel(model?: Model): boolean {
|
||||
isMiniMaxReasoningModel(model) ||
|
||||
modelId.includes('magistral') ||
|
||||
modelId.includes('pangu-pro-moe') ||
|
||||
modelId.includes('seed-oss')
|
||||
modelId.includes('seed-oss') ||
|
||||
modelId.includes('deepseek-v3.2-speciale')
|
||||
) {
|
||||
return true
|
||||
}
|
||||
@ -596,3 +607,17 @@ export const findTokenLimit = (modelId: string): { min: number; max: number } |
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines if a model is a fixed reasoning model.
|
||||
*
|
||||
* A model is considered a fixed reasoning model if it meets all of the following criteria:
|
||||
* - It is a reasoning model
|
||||
* - It does NOT support thinking tokens
|
||||
* - It does NOT support reasoning effort
|
||||
*
|
||||
* @param model - The model to check
|
||||
* @returns `true` if the model is a fixed reasoning model, `false` otherwise
|
||||
*/
|
||||
export const isFixedReasoningModel = (model: Model) =>
|
||||
isReasoningModel(model) && !isSupportedThinkingTokenModel(model) && !isSupportedReasoningEffortModel(model)
|
||||
|
||||
@ -46,7 +46,8 @@ const FUNCTION_CALLING_EXCLUDED_MODELS = [
|
||||
'glm-4\\.5v',
|
||||
'gemini-2.5-flash-image(?:-[\\w-]+)?',
|
||||
'gemini-2.0-flash-preview-image-generation',
|
||||
'gemini-3(?:\\.\\d+)?-pro-image(?:-[\\w-]+)?'
|
||||
'gemini-3(?:\\.\\d+)?-pro-image(?:-[\\w-]+)?',
|
||||
'deepseek-v3.2-speciale'
|
||||
]
|
||||
|
||||
export const FUNCTION_CALLING_REGEX = new RegExp(
|
||||
@ -85,10 +86,6 @@ export function isFunctionCallingModel(model?: Model): boolean {
|
||||
}
|
||||
}
|
||||
|
||||
if (['deepseek', 'anthropic', 'kimi', 'moonshot'].includes(model.provider)) {
|
||||
return true
|
||||
}
|
||||
|
||||
// 2025/08/26 百炼与火山引擎均不支持 v3.1 函数调用
|
||||
// 先默认支持
|
||||
if (isDeepSeekHybridInferenceModel(model)) {
|
||||
|
||||
@ -11,7 +11,8 @@ import {
|
||||
isGPT51SeriesModel,
|
||||
isOpenAIChatCompletionOnlyModel,
|
||||
isOpenAIOpenWeightModel,
|
||||
isOpenAIReasoningModel
|
||||
isOpenAIReasoningModel,
|
||||
isSupportVerbosityModel
|
||||
} from './openai'
|
||||
import { isQwenMTModel } from './qwen'
|
||||
import { isFunctionCallingModel } from './tooluse'
|
||||
@ -156,10 +157,10 @@ const MODEL_SUPPORTED_VERBOSITY: readonly {
|
||||
* For GPT-5-pro, only 'high' is supported; for other GPT-5 models, 'low', 'medium', and 'high' are supported.
|
||||
* For GPT-5.1 series models, 'low', 'medium', and 'high' are supported.
|
||||
* @param model - The model to check
|
||||
* @returns An array of supported verbosity levels, always including `undefined` as the first element
|
||||
* @returns An array of supported verbosity levels, always including `undefined` as the first element and `null` when applicable
|
||||
*/
|
||||
export const getModelSupportedVerbosity = (model: Model | undefined | null): OpenAIVerbosity[] => {
|
||||
if (!model) {
|
||||
if (!model || !isSupportVerbosityModel(model)) {
|
||||
return [undefined]
|
||||
}
|
||||
|
||||
@ -167,7 +168,7 @@ export const getModelSupportedVerbosity = (model: Model | undefined | null): Ope
|
||||
|
||||
for (const { validator, values } of MODEL_SUPPORTED_VERBOSITY) {
|
||||
if (validator(model)) {
|
||||
supportedValues = [...values]
|
||||
supportedValues = [null, ...values]
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -180,6 +181,11 @@ export const isGeminiModel = (model: Model) => {
|
||||
return modelId.includes('gemini')
|
||||
}
|
||||
|
||||
export const isGrokModel = (model: Model) => {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
return modelId.includes('grok')
|
||||
}
|
||||
|
||||
// zhipu 视觉推理模型用这组 special token 标记推理结果
|
||||
export const ZHIPU_RESULT_TOKENS = ['<|begin_of_box|>', '<|end_of_box|>'] as const
|
||||
|
||||
|
||||
@ -53,7 +53,10 @@ const visionAllowedModels = [
|
||||
'llama-4(?:-[\\w-]+)?',
|
||||
'step-1o(?:.*vision)?',
|
||||
'step-1v(?:-[\\w-]+)?',
|
||||
'qwen-omni(?:-[\\w-]+)?'
|
||||
'qwen-omni(?:-[\\w-]+)?',
|
||||
'mistral-large-(2512|latest)',
|
||||
'mistral-medium-(2508|latest)',
|
||||
'mistral-small-(2506|latest)'
|
||||
]
|
||||
|
||||
const visionExcludedModels = [
|
||||
|
||||
@ -676,10 +676,10 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
},
|
||||
'ai-gateway': {
|
||||
id: 'ai-gateway',
|
||||
name: 'AI Gateway',
|
||||
type: 'ai-gateway',
|
||||
gateway: {
|
||||
id: 'gateway',
|
||||
name: 'Vercel AI Gateway',
|
||||
type: 'gateway',
|
||||
apiKey: '',
|
||||
apiHost: 'https://ai-gateway.vercel.sh/v1/ai',
|
||||
models: [],
|
||||
@ -762,7 +762,7 @@ export const PROVIDER_LOGO_MAP: AtLeast<SystemProviderId, string> = {
|
||||
longcat: LongCatProviderLogo,
|
||||
huggingface: HuggingfaceProviderLogo,
|
||||
sophnet: SophnetProviderLogo,
|
||||
'ai-gateway': AIGatewayProviderLogo,
|
||||
gateway: AIGatewayProviderLogo,
|
||||
cerebras: CerebrasProviderLogo
|
||||
} as const
|
||||
|
||||
@ -927,7 +927,7 @@ export const PROVIDER_URLS: Record<SystemProviderId, ProviderUrls> = {
|
||||
websites: {
|
||||
official: 'https://www.dmxapi.cn/register?aff=bwwY',
|
||||
apiKey: 'https://www.dmxapi.cn/register?aff=bwwY',
|
||||
docs: 'https://dmxapi.cn/models.html#code-block',
|
||||
docs: 'https://doc.dmxapi.cn/',
|
||||
models: 'https://www.dmxapi.cn/pricing'
|
||||
}
|
||||
},
|
||||
@ -1413,7 +1413,7 @@ export const PROVIDER_URLS: Record<SystemProviderId, ProviderUrls> = {
|
||||
models: 'https://huggingface.co/models'
|
||||
}
|
||||
},
|
||||
'ai-gateway': {
|
||||
gateway: {
|
||||
api: {
|
||||
url: 'https://ai-gateway.vercel.sh/v1/ai'
|
||||
},
|
||||
|
||||
@ -51,7 +51,7 @@ export function useTextareaResize(options: UseTextareaResizeOptions = {}): UseTe
|
||||
const { maxHeight = 400, minHeight = 30, autoResize = true } = options
|
||||
|
||||
const textareaRef = useRef<TextAreaRef>(null)
|
||||
const [customHeight, setCustomHeight] = useState<number>()
|
||||
const [customHeight, setCustomHeight] = useState<number | undefined>(undefined)
|
||||
const [isExpanded, setIsExpanded] = useState(false)
|
||||
|
||||
const resize = useCallback(
|
||||
|
||||
@ -87,7 +87,7 @@ const providerKeyMap = {
|
||||
longcat: 'provider.longcat',
|
||||
huggingface: 'provider.huggingface',
|
||||
sophnet: 'provider.sophnet',
|
||||
'ai-gateway': 'provider.ai-gateway',
|
||||
gateway: 'provider.ai-gateway',
|
||||
cerebras: 'provider.cerebras'
|
||||
} as const
|
||||
|
||||
|
||||
@ -2531,7 +2531,7 @@
|
||||
},
|
||||
"provider": {
|
||||
"302ai": "302.AI",
|
||||
"ai-gateway": "AI Gateway",
|
||||
"ai-gateway": "Vercel AI Gateway",
|
||||
"aihubmix": "AiHubMix",
|
||||
"aionly": "AiOnly",
|
||||
"alayanew": "Alaya NeW",
|
||||
|
||||
@ -2531,7 +2531,7 @@
|
||||
},
|
||||
"provider": {
|
||||
"302ai": "302.AI",
|
||||
"ai-gateway": "AI Gateway",
|
||||
"ai-gateway": "Vercel AI Gateway",
|
||||
"aihubmix": "AiHubMix",
|
||||
"aionly": "唯一AI (AiOnly)",
|
||||
"alayanew": "Alaya NeW",
|
||||
|
||||
@ -177,8 +177,10 @@ const AgentSessionInputbarInner: FC<InnerProps> = ({ assistant, agentId, session
|
||||
resize: resizeTextArea,
|
||||
focus: focusTextarea,
|
||||
setExpanded,
|
||||
isExpanded: textareaIsExpanded
|
||||
} = useTextareaResize({ maxHeight: 400, minHeight: 30 })
|
||||
isExpanded: textareaIsExpanded,
|
||||
customHeight,
|
||||
setCustomHeight
|
||||
} = useTextareaResize({ maxHeight: 500, minHeight: 30 })
|
||||
const { sendMessageShortcut, apiServer } = useSettings()
|
||||
|
||||
const { t } = useTranslation()
|
||||
@ -474,6 +476,8 @@ const AgentSessionInputbarInner: FC<InnerProps> = ({ assistant, agentId, session
|
||||
text={text}
|
||||
onTextChange={setText}
|
||||
textareaRef={textareaRef}
|
||||
height={customHeight}
|
||||
onHeightChange={setCustomHeight}
|
||||
resizeTextArea={resizeTextArea}
|
||||
focusTextarea={focusTextarea}
|
||||
placeholder={placeholderText}
|
||||
|
||||
@ -143,9 +143,11 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
||||
resize: resizeTextArea,
|
||||
focus: focusTextarea,
|
||||
setExpanded,
|
||||
isExpanded: textareaIsExpanded
|
||||
isExpanded: textareaIsExpanded,
|
||||
customHeight,
|
||||
setCustomHeight
|
||||
} = useTextareaResize({
|
||||
maxHeight: 400,
|
||||
maxHeight: 500,
|
||||
minHeight: 30
|
||||
})
|
||||
|
||||
@ -257,7 +259,7 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
||||
setText('')
|
||||
setFiles([])
|
||||
setTimeoutTimer('sendMessage_1', () => setText(''), 500)
|
||||
setTimeoutTimer('sendMessage_2', () => resizeTextArea(true), 0)
|
||||
setTimeoutTimer('sendMessage_2', () => resizeTextArea(), 0)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to send message:', error as Error)
|
||||
parent?.recordException(error as Error)
|
||||
@ -478,6 +480,8 @@ const InputbarInner: FC<InputbarInnerProps> = ({ assistant: initialAssistant, se
|
||||
text={text}
|
||||
onTextChange={setText}
|
||||
textareaRef={textareaRef}
|
||||
height={customHeight}
|
||||
onHeightChange={setCustomHeight}
|
||||
resizeTextArea={resizeTextArea}
|
||||
focusTextarea={focusTextarea}
|
||||
isLoading={loading}
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import type { FC } from 'react'
|
||||
import type { FC, KeyboardEvent } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
|
||||
interface Props {
|
||||
disabled: boolean
|
||||
@ -6,10 +7,24 @@ interface Props {
|
||||
}
|
||||
|
||||
const SendMessageButton: FC<Props> = ({ disabled, sendMessage }) => {
|
||||
const { t } = useTranslation()
|
||||
|
||||
const handleKeyDown = (e: KeyboardEvent<HTMLElement>) => {
|
||||
if (!disabled && (e.key === 'Enter' || e.key === ' ')) {
|
||||
e.preventDefault()
|
||||
sendMessage()
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<i
|
||||
className="iconfont icon-ic_send"
|
||||
onClick={sendMessage}
|
||||
onClick={disabled ? undefined : sendMessage}
|
||||
onKeyDown={handleKeyDown}
|
||||
role="button"
|
||||
aria-label={t('chat.input.send')}
|
||||
aria-disabled={disabled}
|
||||
tabIndex={disabled ? -1 : 0}
|
||||
style={{
|
||||
cursor: disabled ? 'not-allowed' : 'pointer',
|
||||
color: disabled ? 'var(--color-text-3)' : 'var(--color-primary)',
|
||||
|
||||
@ -50,6 +50,9 @@ export interface InputbarCoreProps {
|
||||
resizeTextArea: (force?: boolean) => void
|
||||
focusTextarea: () => void
|
||||
|
||||
height: number | undefined
|
||||
onHeightChange: (height: number) => void
|
||||
|
||||
supportedExts: string[]
|
||||
isLoading: boolean
|
||||
|
||||
@ -104,6 +107,8 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
|
||||
textareaRef,
|
||||
resizeTextArea,
|
||||
focusTextarea,
|
||||
height,
|
||||
onHeightChange,
|
||||
supportedExts,
|
||||
isLoading,
|
||||
onPause,
|
||||
@ -131,8 +136,6 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
|
||||
} = useSettings()
|
||||
const quickPanelTriggersEnabled = forceEnableQuickPanelTriggers ?? enableQuickPanelTriggers
|
||||
|
||||
const [textareaHeight, setTextareaHeight] = useState<number>()
|
||||
|
||||
const { t } = useTranslation()
|
||||
const [isTranslating, setIsTranslating] = useState(false)
|
||||
const { getLanguageByLangcode } = useTranslate()
|
||||
@ -178,8 +181,10 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
|
||||
enabled: config.enableDragDrop,
|
||||
t
|
||||
})
|
||||
// 判断是否可以发送:文本不为空或有文件
|
||||
const cannotSend = isEmpty && files.length === 0
|
||||
// 判断是否有内容:文本不为空或有文件
|
||||
const noContent = isEmpty && files.length === 0
|
||||
// 发送入口统一禁用条件:空内容、正在生成、全局搜索态
|
||||
const isSendDisabled = noContent || isLoading || searching
|
||||
|
||||
useEffect(() => {
|
||||
setExtensions(supportedExts)
|
||||
@ -310,7 +315,7 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
|
||||
|
||||
const isEnterPressed = event.key === 'Enter' && !event.nativeEvent.isComposing
|
||||
if (isEnterPressed) {
|
||||
if (isSendMessageKeyPressed(event, sendMessageShortcut) && !cannotSend) {
|
||||
if (isSendMessageKeyPressed(event, sendMessageShortcut) && !isSendDisabled) {
|
||||
handleSendMessage()
|
||||
event.preventDefault()
|
||||
return
|
||||
@ -356,7 +361,7 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
|
||||
translate,
|
||||
handleToggleExpanded,
|
||||
sendMessageShortcut,
|
||||
cannotSend,
|
||||
isSendDisabled,
|
||||
handleSendMessage,
|
||||
setText,
|
||||
setTimeoutTimer,
|
||||
@ -538,8 +543,8 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
|
||||
|
||||
const handleMouseMove = (e: MouseEvent) => {
|
||||
const deltaY = startDragY.current - e.clientY
|
||||
const newHeight = Math.max(40, Math.min(400, startHeight.current + deltaY))
|
||||
setTextareaHeight(newHeight)
|
||||
const newHeight = Math.max(40, Math.min(500, startHeight.current + deltaY))
|
||||
onHeightChange(newHeight)
|
||||
}
|
||||
|
||||
const handleMouseUp = () => {
|
||||
@ -550,7 +555,7 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
|
||||
document.addEventListener('mousemove', handleMouseMove)
|
||||
document.addEventListener('mouseup', handleMouseUp)
|
||||
},
|
||||
[config.enableDragDrop, setTextareaHeight, textareaRef]
|
||||
[config.enableDragDrop, onHeightChange, textareaRef]
|
||||
)
|
||||
|
||||
const onQuote = useCallback(
|
||||
@ -617,7 +622,7 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
|
||||
const rightSectionExtras = useMemo(() => {
|
||||
const extras: React.ReactNode[] = []
|
||||
extras.push(<TranslateButton key="translate" text={text} onTranslated={onTranslated} isLoading={isTranslating} />)
|
||||
extras.push(<SendMessageButton sendMessage={handleSendMessage} disabled={cannotSend || isLoading || searching} />)
|
||||
extras.push(<SendMessageButton sendMessage={handleSendMessage} disabled={isSendDisabled} />)
|
||||
|
||||
if (isLoading) {
|
||||
extras.push(
|
||||
@ -630,7 +635,7 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
|
||||
}
|
||||
|
||||
return <>{extras}</>
|
||||
}, [text, onTranslated, isTranslating, handleSendMessage, cannotSend, isLoading, searching, t, onPause])
|
||||
}, [text, onTranslated, isTranslating, handleSendMessage, isSendDisabled, isLoading, t, onPause])
|
||||
|
||||
const quickPanelElement = config.enableQuickPanel ? <QuickPanelView setInputText={setText} /> : null
|
||||
|
||||
@ -667,11 +672,11 @@ export const InputbarCore: FC<InputbarCoreProps> = ({
|
||||
variant="borderless"
|
||||
spellCheck={enableSpellCheck}
|
||||
rows={2}
|
||||
autoSize={textareaHeight ? false : { minRows: 2, maxRows: 20 }}
|
||||
autoSize={height ? false : { minRows: 2, maxRows: 20 }}
|
||||
styles={{ textarea: TextareaStyle }}
|
||||
style={{
|
||||
fontSize,
|
||||
height: textareaHeight,
|
||||
height: height,
|
||||
minHeight: '30px'
|
||||
}}
|
||||
disabled={isTranslating || searching}
|
||||
|
||||
@ -31,7 +31,7 @@ const ActivityDirectoryButton: FC<Props> = ({ quickPanel, quickPanelController,
|
||||
|
||||
return (
|
||||
<Tooltip placement="top" title={t('chat.input.activity_directory.title')} mouseLeaveDelay={0} arrow>
|
||||
<ActionIconButton onClick={handleOpenQuickPanel}>
|
||||
<ActionIconButton onClick={handleOpenQuickPanel} aria-label={t('chat.input.activity_directory.title')}>
|
||||
<FolderOpen size={18} />
|
||||
</ActionIconButton>
|
||||
</Tooltip>
|
||||
|
||||
@ -152,13 +152,15 @@ const AttachmentButton: FC<Props> = ({ quickPanel, couldAddImageFile, extensions
|
||||
}
|
||||
}, [couldAddImageFile, openQuickPanel, quickPanel, t])
|
||||
|
||||
const ariaLabel = couldAddImageFile ? t('chat.input.upload.image_or_document') : t('chat.input.upload.document')
|
||||
|
||||
return (
|
||||
<Tooltip
|
||||
placement="top"
|
||||
title={couldAddImageFile ? t('chat.input.upload.image_or_document') : t('chat.input.upload.document')}
|
||||
mouseLeaveDelay={0}
|
||||
arrow>
|
||||
<ActionIconButton onClick={openFileSelectDialog} active={files.length > 0} disabled={disabled}>
|
||||
<Tooltip placement="top" title={ariaLabel} mouseLeaveDelay={0} arrow>
|
||||
<ActionIconButton
|
||||
onClick={openFileSelectDialog}
|
||||
active={files.length > 0}
|
||||
disabled={disabled}
|
||||
aria-label={ariaLabel}>
|
||||
<Paperclip size={18} />
|
||||
</ActionIconButton>
|
||||
</Tooltip>
|
||||
|
||||
@ -15,18 +15,18 @@ interface Props {
|
||||
const GenerateImageButton: FC<Props> = ({ model, assistant, onEnableGenerateImage }) => {
|
||||
const { t } = useTranslation()
|
||||
|
||||
const ariaLabel = isGenerateImageModel(model)
|
||||
? t('chat.input.generate_image')
|
||||
: t('chat.input.generate_image_not_supported')
|
||||
|
||||
return (
|
||||
<Tooltip
|
||||
placement="top"
|
||||
title={
|
||||
isGenerateImageModel(model) ? t('chat.input.generate_image') : t('chat.input.generate_image_not_supported')
|
||||
}
|
||||
mouseLeaveDelay={0}
|
||||
arrow>
|
||||
<Tooltip placement="top" title={ariaLabel} mouseLeaveDelay={0} arrow>
|
||||
<ActionIconButton
|
||||
onClick={onEnableGenerateImage}
|
||||
active={assistant.enableGenerateImage}
|
||||
disabled={!isGenerateImageModel(model)}>
|
||||
disabled={!isGenerateImageModel(model)}
|
||||
aria-label={ariaLabel}
|
||||
aria-pressed={assistant.enableGenerateImage}>
|
||||
<Image size={18} />
|
||||
</ActionIconButton>
|
||||
</Tooltip>
|
||||
|
||||
@ -124,7 +124,8 @@ const KnowledgeBaseButton: FC<Props> = ({ quickPanel, selectedBases, onSelect, d
|
||||
<ActionIconButton
|
||||
onClick={handleOpenQuickPanel}
|
||||
active={selectedBases && selectedBases.length > 0}
|
||||
disabled={disabled}>
|
||||
disabled={disabled}
|
||||
aria-label={t('chat.input.knowledge_base')}>
|
||||
<FileSearch size={18} />
|
||||
</ActionIconButton>
|
||||
</Tooltip>
|
||||
|
||||
@ -516,7 +516,10 @@ const MCPToolsButton: FC<Props> = ({ quickPanel, setInputValue, resizeTextArea,
|
||||
|
||||
return (
|
||||
<Tooltip placement="top" title={t('settings.mcp.title')} mouseLeaveDelay={0} arrow>
|
||||
<ActionIconButton onClick={handleOpenQuickPanel} active={assistant.mcpServers && assistant.mcpServers.length > 0}>
|
||||
<ActionIconButton
|
||||
onClick={handleOpenQuickPanel}
|
||||
active={assistant.mcpServers && assistant.mcpServers.length > 0}
|
||||
aria-label={t('settings.mcp.title')}>
|
||||
<Hammer size={18} />
|
||||
</ActionIconButton>
|
||||
</Tooltip>
|
||||
|
||||
@ -46,7 +46,10 @@ const MentionModelsButton: FC<Props> = ({
|
||||
|
||||
return (
|
||||
<Tooltip placement="top" title={t('assistants.presets.edit.model.select.title')} mouseLeaveDelay={0} arrow>
|
||||
<ActionIconButton onClick={handleOpenQuickPanel} active={mentionedModels.length > 0}>
|
||||
<ActionIconButton
|
||||
onClick={handleOpenQuickPanel}
|
||||
active={mentionedModels.length > 0}
|
||||
aria-label={t('assistants.presets.edit.model.select.title')}>
|
||||
<AtSign size={18} />
|
||||
</ActionIconButton>
|
||||
</Tooltip>
|
||||
|
||||
@ -20,7 +20,9 @@ const NewContextButton: FC<Props> = ({ onNewContext }) => {
|
||||
title={t('chat.input.new.context', { Command: newContextShortcut })}
|
||||
mouseLeaveDelay={0}
|
||||
arrow>
|
||||
<ActionIconButton onClick={onNewContext}>
|
||||
<ActionIconButton
|
||||
onClick={onNewContext}
|
||||
aria-label={t('chat.input.new.context', { Command: newContextShortcut })}>
|
||||
<Eraser size={18} />
|
||||
</ActionIconButton>
|
||||
</Tooltip>
|
||||
|
||||
@ -250,7 +250,7 @@ const QuickPhrasesButton = ({ quickPanel, setInputValue, resizeTextArea, assista
|
||||
return (
|
||||
<>
|
||||
<Tooltip placement="top" title={t('settings.quickPhrase.title')} mouseLeaveDelay={0} arrow>
|
||||
<ActionIconButton onClick={handleOpenQuickPanel}>
|
||||
<ActionIconButton onClick={handleOpenQuickPanel} aria-label={t('settings.quickPhrase.title')}>
|
||||
<Zap size={18} />
|
||||
</ActionIconButton>
|
||||
</Tooltip>
|
||||
|
||||
@ -37,7 +37,11 @@ const SlashCommandsButton: FC<Props> = ({ quickPanelController, session, openPan
|
||||
|
||||
return (
|
||||
<Tooltip placement="top" title={t('chat.input.slash_commands.title')} mouseLeaveDelay={0} arrow>
|
||||
<ActionIconButton onClick={handleOpenQuickPanel} active={isActive} disabled={!hasCommands}>
|
||||
<ActionIconButton
|
||||
onClick={handleOpenQuickPanel}
|
||||
active={isActive}
|
||||
disabled={!hasCommands}
|
||||
aria-label={t('chat.input.slash_commands.title')}>
|
||||
<Terminal size={18} />
|
||||
</ActionIconButton>
|
||||
</Tooltip>
|
||||
|
||||
@ -11,6 +11,7 @@ import { QuickPanelReservedSymbol, useQuickPanel } from '@renderer/components/Qu
|
||||
import {
|
||||
getThinkModelType,
|
||||
isDoubaoThinkingAutoModel,
|
||||
isFixedReasoningModel,
|
||||
isGPT5SeriesReasoningModel,
|
||||
isOpenAIWebSearchModel,
|
||||
MODEL_SUPPORTED_OPTIONS
|
||||
@ -42,6 +43,8 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
|
||||
// 确定当前模型支持的选项类型
|
||||
const modelType = useMemo(() => getThinkModelType(model), [model])
|
||||
|
||||
const isFixedReasoning = isFixedReasoningModel(model)
|
||||
|
||||
// 获取当前模型支持的选项
|
||||
const supportedOptions: ThinkingOption[] = useMemo(() => {
|
||||
if (modelType === 'doubao') {
|
||||
@ -111,6 +114,8 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
|
||||
}, [quickPanelHook, panelItems, t])
|
||||
|
||||
const handleOpenQuickPanel = useCallback(() => {
|
||||
if (isFixedReasoning) return
|
||||
|
||||
if (quickPanelHook.isVisible && quickPanelHook.symbol === QuickPanelReservedSymbol.Thinking) {
|
||||
quickPanelHook.close()
|
||||
return
|
||||
@ -121,9 +126,11 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
|
||||
return
|
||||
}
|
||||
openQuickPanel()
|
||||
}, [openQuickPanel, quickPanelHook, isThinkingEnabled, supportedOptions, disableThinking])
|
||||
}, [openQuickPanel, quickPanelHook, isThinkingEnabled, supportedOptions, disableThinking, isFixedReasoning])
|
||||
|
||||
useEffect(() => {
|
||||
if (isFixedReasoning) return
|
||||
|
||||
const disposeMenu = quickPanel.registerRootMenu([
|
||||
{
|
||||
label: t('assistants.settings.reasoning_effort.label'),
|
||||
@ -140,19 +147,22 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
|
||||
disposeMenu()
|
||||
disposeTrigger()
|
||||
}
|
||||
}, [currentReasoningEffort, openQuickPanel, quickPanel, t])
|
||||
}, [currentReasoningEffort, openQuickPanel, quickPanel, t, isFixedReasoning])
|
||||
|
||||
const ariaLabel = isFixedReasoning
|
||||
? t('chat.input.thinking.label')
|
||||
: isThinkingEnabled && supportedOptions.includes('none')
|
||||
? t('common.close')
|
||||
: t('assistants.settings.reasoning_effort.label')
|
||||
|
||||
return (
|
||||
<Tooltip
|
||||
placement="top"
|
||||
title={
|
||||
isThinkingEnabled && supportedOptions.includes('none')
|
||||
? t('common.close')
|
||||
: t('assistants.settings.reasoning_effort.label')
|
||||
}
|
||||
mouseLeaveDelay={0}
|
||||
arrow>
|
||||
<ActionIconButton onClick={handleOpenQuickPanel} active={currentReasoningEffort !== 'none'}>
|
||||
<Tooltip placement="top" title={ariaLabel} mouseLeaveDelay={0} arrow>
|
||||
<ActionIconButton
|
||||
onClick={handleOpenQuickPanel}
|
||||
active={isFixedReasoning || currentReasoningEffort !== 'none'}
|
||||
aria-label={ariaLabel}
|
||||
aria-pressed={currentReasoningEffort !== 'none'}
|
||||
style={isFixedReasoning ? { cursor: 'default' } : undefined}>
|
||||
{ThinkingIcon(currentReasoningEffort)}
|
||||
</ActionIconButton>
|
||||
</Tooltip>
|
||||
|
||||
@ -48,7 +48,11 @@ const UrlContextButton: FC<Props> = ({ assistantId }) => {
|
||||
|
||||
return (
|
||||
<Tooltip placement="top" title={t('chat.input.url_context')} arrow>
|
||||
<ActionIconButton onClick={handleToggle} active={assistant.enableUrlContext}>
|
||||
<ActionIconButton
|
||||
onClick={handleToggle}
|
||||
active={assistant.enableUrlContext}
|
||||
aria-label={t('chat.input.url_context')}
|
||||
aria-pressed={assistant.enableUrlContext}>
|
||||
<Link size={18} />
|
||||
</ActionIconButton>
|
||||
</Tooltip>
|
||||
|
||||
@ -25,13 +25,15 @@ const WebSearchButton: FC<Props> = ({ quickPanelController, assistantId }) => {
|
||||
}
|
||||
}, [enableWebSearch, toggleQuickPanel, updateWebSearchProvider])
|
||||
|
||||
const ariaLabel = enableWebSearch ? t('common.close') : t('chat.input.web_search.label')
|
||||
|
||||
return (
|
||||
<Tooltip
|
||||
placement="top"
|
||||
title={enableWebSearch ? t('common.close') : t('chat.input.web_search.label')}
|
||||
mouseLeaveDelay={0}
|
||||
arrow>
|
||||
<ActionIconButton onClick={onClick} active={!!enableWebSearch}>
|
||||
<Tooltip placement="top" title={ariaLabel} mouseLeaveDelay={0} arrow>
|
||||
<ActionIconButton
|
||||
onClick={onClick}
|
||||
active={!!enableWebSearch}
|
||||
aria-label={ariaLabel}
|
||||
aria-pressed={!!enableWebSearch}>
|
||||
<WebSearchProviderIcon pid={selectedProviderId} />
|
||||
</ActionIconButton>
|
||||
</Tooltip>
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import { isSupportedReasoningEffortModel, isSupportedThinkingTokenModel } from '@renderer/config/models'
|
||||
import { isReasoningModel } from '@renderer/config/models'
|
||||
import ThinkingButton from '@renderer/pages/home/Inputbar/tools/components/ThinkingButton'
|
||||
import { defineTool, registerTool, TopicType } from '@renderer/pages/home/Inputbar/types'
|
||||
|
||||
@ -6,7 +6,7 @@ const thinkingTool = defineTool({
|
||||
key: 'thinking',
|
||||
label: (t) => t('chat.input.thinking.label'),
|
||||
visibleInScopes: [TopicType.Chat],
|
||||
condition: ({ model }) => isSupportedThinkingTokenModel(model) || isSupportedReasoningEffortModel(model),
|
||||
condition: ({ model }) => isReasoningModel(model),
|
||||
render: ({ assistant, model, quickPanel }) => (
|
||||
<ThinkingButton quickPanel={quickPanel} model={model} assistantId={assistant.id} />
|
||||
)
|
||||
|
||||
@ -24,12 +24,12 @@ import { useTranslation } from 'react-i18next'
|
||||
import { useSelector } from 'react-redux'
|
||||
|
||||
type VerbosityOption = {
|
||||
value: NonNullable<OpenAIVerbosity> | 'undefined'
|
||||
value: NonNullable<OpenAIVerbosity> | 'undefined' | 'null'
|
||||
label: string
|
||||
}
|
||||
|
||||
type SummaryTextOption = {
|
||||
value: NonNullable<OpenAISummaryText> | 'undefined'
|
||||
value: NonNullable<OpenAISummaryText> | 'undefined' | 'null'
|
||||
label: string
|
||||
}
|
||||
|
||||
@ -85,6 +85,10 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
|
||||
value: 'undefined',
|
||||
label: t('common.ignore')
|
||||
},
|
||||
{
|
||||
value: 'null',
|
||||
label: t('common.off')
|
||||
},
|
||||
{
|
||||
value: 'auto',
|
||||
label: t('settings.openai.summary_text_mode.auto')
|
||||
@ -105,6 +109,10 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
|
||||
value: 'undefined',
|
||||
label: t('common.ignore')
|
||||
},
|
||||
{
|
||||
value: 'null',
|
||||
label: t('common.off')
|
||||
},
|
||||
{
|
||||
value: 'low',
|
||||
label: t('settings.openai.verbosity.low')
|
||||
@ -203,9 +211,9 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
|
||||
</Tooltip>
|
||||
</SettingRowTitleSmall>
|
||||
<Selector
|
||||
value={summaryText}
|
||||
value={toOptionValue(summaryText)}
|
||||
onChange={(value) => {
|
||||
setSummaryText(value as OpenAISummaryText)
|
||||
setSummaryText(toRealValue(value))
|
||||
}}
|
||||
options={summaryTextOptions}
|
||||
/>
|
||||
@ -222,9 +230,9 @@ const OpenAISettingsGroup: FC<Props> = ({ model, providerId, SettingGroup, Setti
|
||||
</Tooltip>
|
||||
</SettingRowTitleSmall>
|
||||
<Selector
|
||||
value={verbosity}
|
||||
value={toOptionValue(verbosity)}
|
||||
onChange={(value) => {
|
||||
setVerbosity(value as OpenAIVerbosity)
|
||||
setVerbosity(toRealValue(value))
|
||||
}}
|
||||
options={verbosityOptions}
|
||||
/>
|
||||
|
||||
@ -4,6 +4,7 @@ import ObsidianExportPopup from '@renderer/components/Popups/ObsidianExportPopup
|
||||
import PromptPopup from '@renderer/components/Popups/PromptPopup'
|
||||
import SaveToKnowledgePopup from '@renderer/components/Popups/SaveToKnowledgePopup'
|
||||
import { isMac } from '@renderer/config/constant'
|
||||
import { db } from '@renderer/databases'
|
||||
import { useAssistant, useAssistants } from '@renderer/hooks/useAssistant'
|
||||
import { useInPlaceEdit } from '@renderer/hooks/useInPlaceEdit'
|
||||
import { useNotesSettings } from '@renderer/hooks/useNotesSettings'
|
||||
@ -11,6 +12,7 @@ import { modelGenerating } from '@renderer/hooks/useRuntime'
|
||||
import { useSettings } from '@renderer/hooks/useSettings'
|
||||
import { finishTopicRenaming, startTopicRenaming, TopicManager } from '@renderer/hooks/useTopic'
|
||||
import { fetchMessagesSummary } from '@renderer/services/ApiService'
|
||||
import { getDefaultTopic } from '@renderer/services/AssistantService'
|
||||
import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService'
|
||||
import type { RootState } from '@renderer/store'
|
||||
import store from '@renderer/store'
|
||||
@ -65,7 +67,7 @@ export const Topics: React.FC<Props> = ({ assistant: _assistant, activeTopic, se
|
||||
const { t } = useTranslation()
|
||||
const { notesPath } = useNotesSettings()
|
||||
const { assistants } = useAssistants()
|
||||
const { assistant, removeTopic, moveTopic, updateTopic, updateTopics } = useAssistant(_assistant.id)
|
||||
const { assistant, addTopic, removeTopic, moveTopic, updateTopic, updateTopics } = useAssistant(_assistant.id)
|
||||
const { showTopicTime, pinTopicsToTop, setTopicPosition, topicPosition } = useSettings()
|
||||
|
||||
const renamingTopics = useSelector((state: RootState) => state.runtime.chat.renamingTopics)
|
||||
@ -138,17 +140,21 @@ export const Topics: React.FC<Props> = ({ assistant: _assistant, activeTopic, se
|
||||
async (topic: Topic, e: React.MouseEvent) => {
|
||||
e.stopPropagation()
|
||||
if (assistant.topics.length === 1) {
|
||||
return onClearMessages(topic)
|
||||
const newTopic = getDefaultTopic(assistant.id)
|
||||
await db.topics.add({ id: newTopic.id, messages: [] })
|
||||
addTopic(newTopic)
|
||||
setActiveTopic(newTopic)
|
||||
} else {
|
||||
const index = findIndex(assistant.topics, (t) => t.id === topic.id)
|
||||
if (topic.id === activeTopic.id) {
|
||||
setActiveTopic(assistant.topics[index + 1 === assistant.topics.length ? index - 1 : index + 1])
|
||||
}
|
||||
}
|
||||
await modelGenerating()
|
||||
const index = findIndex(assistant.topics, (t) => t.id === topic.id)
|
||||
if (topic.id === activeTopic.id) {
|
||||
setActiveTopic(assistant.topics[index + 1 === assistant.topics.length ? index - 1 : index + 1])
|
||||
}
|
||||
removeTopic(topic)
|
||||
setDeletingTopicId(null)
|
||||
},
|
||||
[activeTopic.id, assistant.topics, onClearMessages, removeTopic, setActiveTopic]
|
||||
[activeTopic.id, addTopic, assistant.id, assistant.topics, removeTopic, setActiveTopic]
|
||||
)
|
||||
|
||||
const onPinTopic = useCallback(
|
||||
|
||||
@ -238,19 +238,27 @@ const MinimalToolbar: FC<Props> = ({ app, webviewRef, currentUrl, onReload, onOp
|
||||
<LeftSection>
|
||||
<ButtonGroup>
|
||||
<Tooltip title={t('minapp.popup.goBack')} placement="bottom">
|
||||
<ToolbarButton onClick={handleGoBack} $disabled={!canGoBack}>
|
||||
<ToolbarButton
|
||||
onClick={handleGoBack}
|
||||
$disabled={!canGoBack}
|
||||
aria-label={t('minapp.popup.goBack')}
|
||||
aria-disabled={!canGoBack}>
|
||||
<ArrowLeftOutlined />
|
||||
</ToolbarButton>
|
||||
</Tooltip>
|
||||
|
||||
<Tooltip title={t('minapp.popup.goForward')} placement="bottom">
|
||||
<ToolbarButton onClick={handleGoForward} $disabled={!canGoForward}>
|
||||
<ToolbarButton
|
||||
onClick={handleGoForward}
|
||||
$disabled={!canGoForward}
|
||||
aria-label={t('minapp.popup.goForward')}
|
||||
aria-disabled={!canGoForward}>
|
||||
<ArrowRightOutlined />
|
||||
</ToolbarButton>
|
||||
</Tooltip>
|
||||
|
||||
<Tooltip title={t('minapp.popup.refresh')} placement="bottom">
|
||||
<ToolbarButton onClick={onReload}>
|
||||
<ToolbarButton onClick={onReload} aria-label={t('minapp.popup.refresh')}>
|
||||
<ReloadOutlined />
|
||||
</ToolbarButton>
|
||||
</Tooltip>
|
||||
@ -261,7 +269,7 @@ const MinimalToolbar: FC<Props> = ({ app, webviewRef, currentUrl, onReload, onOp
|
||||
<ButtonGroup>
|
||||
{canOpenExternalLink && (
|
||||
<Tooltip title={t('minapp.popup.openExternal')} placement="bottom">
|
||||
<ToolbarButton onClick={handleOpenLink}>
|
||||
<ToolbarButton onClick={handleOpenLink} aria-label={t('minapp.popup.openExternal')}>
|
||||
<ExportOutlined />
|
||||
</ToolbarButton>
|
||||
</Tooltip>
|
||||
@ -271,7 +279,11 @@ const MinimalToolbar: FC<Props> = ({ app, webviewRef, currentUrl, onReload, onOp
|
||||
<Tooltip
|
||||
title={isPinned ? t('minapp.remove_from_launchpad') : t('minapp.add_to_launchpad')}
|
||||
placement="bottom">
|
||||
<ToolbarButton onClick={handleTogglePin} $active={isPinned}>
|
||||
<ToolbarButton
|
||||
onClick={handleTogglePin}
|
||||
$active={isPinned}
|
||||
aria-label={isPinned ? t('minapp.remove_from_launchpad') : t('minapp.add_to_launchpad')}
|
||||
aria-pressed={isPinned}>
|
||||
<PushpinOutlined />
|
||||
</ToolbarButton>
|
||||
</Tooltip>
|
||||
@ -284,21 +296,29 @@ const MinimalToolbar: FC<Props> = ({ app, webviewRef, currentUrl, onReload, onOp
|
||||
: t('minapp.popup.open_link_external_off')
|
||||
}
|
||||
placement="bottom">
|
||||
<ToolbarButton onClick={handleToggleOpenExternal} $active={minappsOpenLinkExternal}>
|
||||
<ToolbarButton
|
||||
onClick={handleToggleOpenExternal}
|
||||
$active={minappsOpenLinkExternal}
|
||||
aria-label={
|
||||
minappsOpenLinkExternal
|
||||
? t('minapp.popup.open_link_external_on')
|
||||
: t('minapp.popup.open_link_external_off')
|
||||
}
|
||||
aria-pressed={minappsOpenLinkExternal}>
|
||||
<LinkOutlined />
|
||||
</ToolbarButton>
|
||||
</Tooltip>
|
||||
|
||||
{isDev && (
|
||||
<Tooltip title={t('minapp.popup.devtools')} placement="bottom">
|
||||
<ToolbarButton onClick={onOpenDevTools}>
|
||||
<ToolbarButton onClick={onOpenDevTools} aria-label={t('minapp.popup.devtools')}>
|
||||
<CodeOutlined />
|
||||
</ToolbarButton>
|
||||
</Tooltip>
|
||||
)}
|
||||
|
||||
<Tooltip title={t('minapp.popup.minimize')} placement="bottom">
|
||||
<ToolbarButton onClick={handleMinimize}>
|
||||
<ToolbarButton onClick={handleMinimize} aria-label={t('minapp.popup.minimize')}>
|
||||
<MinusOutlined />
|
||||
</ToolbarButton>
|
||||
</Tooltip>
|
||||
|
||||
@ -259,7 +259,8 @@ const PopupContainer: React.FC<Props> = ({ provider, resolve }) => {
|
||||
{ label: 'Anthropic', value: 'anthropic' },
|
||||
{ label: 'Azure OpenAI', value: 'azure-openai' },
|
||||
{ label: 'New API', value: 'new-api' },
|
||||
{ label: 'CherryIN', value: 'cherryin-type' }
|
||||
{ label: 'CherryIN', value: 'cherryin-type' },
|
||||
{ label: 'Ollama', value: 'ollama' }
|
||||
]}
|
||||
/>
|
||||
</Form.Item>
|
||||
|
||||
@ -29,6 +29,7 @@ import {
|
||||
isAzureOpenAIProvider,
|
||||
isGeminiProvider,
|
||||
isNewApiProvider,
|
||||
isOllamaProvider,
|
||||
isOpenAICompatibleProvider,
|
||||
isOpenAIProvider,
|
||||
isVertexProvider
|
||||
@ -278,6 +279,10 @@ const ProviderSetting: FC<Props> = ({ providerId }) => {
|
||||
const hostPreview = () => {
|
||||
const formattedApiHost = adaptProvider({ provider: { ...provider, apiHost } }).apiHost
|
||||
|
||||
if (isOllamaProvider(provider)) {
|
||||
return formattedApiHost + '/chat'
|
||||
}
|
||||
|
||||
if (isOpenAICompatibleProvider(provider)) {
|
||||
return formattedApiHost + '/chat/completions'
|
||||
}
|
||||
|
||||
@ -4,6 +4,7 @@ import type { RootState } from '@renderer/store'
|
||||
import { useAppDispatch } from '@renderer/store'
|
||||
import { setApiServerApiKey, setApiServerPort } from '@renderer/store/settings'
|
||||
import { formatErrorMessage } from '@renderer/utils/error'
|
||||
import { API_SERVER_DEFAULTS } from '@shared/config/constant'
|
||||
import { Alert, Button, Input, InputNumber, Tooltip, Typography } from 'antd'
|
||||
import { Copy, ExternalLink, Play, RotateCcw, Square } from 'lucide-react'
|
||||
import type { FC } from 'react'
|
||||
@ -56,7 +57,7 @@ const ApiServerSettings: FC = () => {
|
||||
}
|
||||
|
||||
const handlePortChange = (value: string) => {
|
||||
const port = parseInt(value) || 23333
|
||||
const port = parseInt(value) || API_SERVER_DEFAULTS.PORT
|
||||
if (port >= 1000 && port <= 65535) {
|
||||
dispatch(setApiServerPort(port))
|
||||
}
|
||||
@ -64,7 +65,9 @@ const ApiServerSettings: FC = () => {
|
||||
|
||||
const openApiDocs = () => {
|
||||
if (apiServerRunning) {
|
||||
window.open(`http://localhost:${apiServerConfig.port}/api-docs`, '_blank')
|
||||
const host = apiServerConfig.host || API_SERVER_DEFAULTS.HOST
|
||||
const port = apiServerConfig.port || API_SERVER_DEFAULTS.PORT
|
||||
window.open(`http://${host}:${port}/api-docs`, '_blank')
|
||||
}
|
||||
}
|
||||
|
||||
@ -98,7 +101,9 @@ const ApiServerSettings: FC = () => {
|
||||
{apiServerRunning ? t('apiServer.status.running') : t('apiServer.status.stopped')}
|
||||
</StatusText>
|
||||
<StatusSubtext>
|
||||
{apiServerRunning ? `http://localhost:${apiServerConfig.port}` : t('apiServer.fields.port.description')}
|
||||
{apiServerRunning
|
||||
? `http://${apiServerConfig.host || API_SERVER_DEFAULTS.HOST}:${apiServerConfig.port || API_SERVER_DEFAULTS.PORT}`
|
||||
: t('apiServer.fields.port.description')}
|
||||
</StatusSubtext>
|
||||
</StatusContent>
|
||||
</StatusSection>
|
||||
@ -119,11 +124,11 @@ const ApiServerSettings: FC = () => {
|
||||
{!apiServerRunning && (
|
||||
<StyledInputNumber
|
||||
value={apiServerConfig.port}
|
||||
onChange={(value) => handlePortChange(String(value || 23333))}
|
||||
onChange={(value) => handlePortChange(String(value || API_SERVER_DEFAULTS.PORT))}
|
||||
min={1000}
|
||||
max={65535}
|
||||
disabled={apiServerRunning}
|
||||
placeholder="23333"
|
||||
placeholder={String(API_SERVER_DEFAULTS.PORT)}
|
||||
size="middle"
|
||||
/>
|
||||
)}
|
||||
|
||||
@ -6,12 +6,13 @@ import { DEFAULT_KNOWLEDGE_DOCUMENT_COUNT, DEFAULT_KNOWLEDGE_THRESHOLD } from '@
|
||||
import { getEmbeddingMaxContext } from '@renderer/config/embedings'
|
||||
import { addSpan, endSpan } from '@renderer/services/SpanManagerService'
|
||||
import store from '@renderer/store'
|
||||
import type {
|
||||
FileMetadata,
|
||||
KnowledgeBase,
|
||||
KnowledgeBaseParams,
|
||||
KnowledgeReference,
|
||||
KnowledgeSearchResult
|
||||
import {
|
||||
type FileMetadata,
|
||||
type KnowledgeBase,
|
||||
type KnowledgeBaseParams,
|
||||
type KnowledgeReference,
|
||||
type KnowledgeSearchResult,
|
||||
SystemProviderIds
|
||||
} from '@renderer/types'
|
||||
import type { Chunk } from '@renderer/types/chunk'
|
||||
import { ChunkType } from '@renderer/types/chunk'
|
||||
@ -50,6 +51,9 @@ export const getKnowledgeBaseParams = (base: KnowledgeBase): KnowledgeBaseParams
|
||||
baseURL = baseURL + '/openai'
|
||||
} else if (isAzureOpenAIProvider(actualProvider)) {
|
||||
baseURL = baseURL + '/v1'
|
||||
} else if (actualProvider.id === SystemProviderIds.ollama) {
|
||||
// LangChain生态不需要/api结尾的URL
|
||||
baseURL = baseURL.replace(/\/api$/, '')
|
||||
}
|
||||
|
||||
logger.info(`Knowledge base ${base.name} using baseURL: ${baseURL}`)
|
||||
|
||||
@ -67,7 +67,7 @@ const persistedReducer = persistReducer(
|
||||
{
|
||||
key: 'cherry-studio',
|
||||
storage,
|
||||
version: 180,
|
||||
version: 181,
|
||||
blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs', 'toolPermissions'],
|
||||
migrate
|
||||
},
|
||||
|
||||
@ -32,6 +32,7 @@ import {
|
||||
isSupportDeveloperRoleProvider,
|
||||
isSupportStreamOptionsProvider
|
||||
} from '@renderer/utils/provider'
|
||||
import { API_SERVER_DEFAULTS } from '@shared/config/constant'
|
||||
import { defaultByPassRules, UpgradeChannel } from '@shared/config/constant'
|
||||
import { isEmpty } from 'lodash'
|
||||
import { createMigrate } from 'redux-persist'
|
||||
@ -2032,8 +2033,8 @@ const migrateConfig = {
|
||||
if (!state.settings.apiServer) {
|
||||
state.settings.apiServer = {
|
||||
enabled: false,
|
||||
host: 'localhost',
|
||||
port: 23333,
|
||||
host: API_SERVER_DEFAULTS.HOST,
|
||||
port: API_SERVER_DEFAULTS.PORT,
|
||||
apiKey: `cs-sk-${uuid()}`
|
||||
}
|
||||
}
|
||||
@ -2809,7 +2810,7 @@ const migrateConfig = {
|
||||
try {
|
||||
addProvider(state, SystemProviderIds.longcat)
|
||||
|
||||
addProvider(state, SystemProviderIds['ai-gateway'])
|
||||
addProvider(state, 'gateway')
|
||||
addProvider(state, 'cerebras')
|
||||
state.llm.providers.forEach((provider) => {
|
||||
if (provider.id === SystemProviderIds.minimax) {
|
||||
@ -2909,9 +2910,20 @@ const migrateConfig = {
|
||||
},
|
||||
'180': (state: RootState) => {
|
||||
try {
|
||||
if (state.settings.apiServer) {
|
||||
state.settings.apiServer.host = API_SERVER_DEFAULTS.HOST
|
||||
}
|
||||
// @ts-expect-error
|
||||
if (state.settings.openAI.summaryText === 'undefined') {
|
||||
state.settings.openAI.summaryText = undefined
|
||||
}
|
||||
// @ts-expect-error
|
||||
if (state.settings.openAI.verbosity === 'undefined') {
|
||||
state.settings.openAI.verbosity = undefined
|
||||
}
|
||||
state.llm.providers.forEach((provider) => {
|
||||
if (provider.id === SystemProviderIds.ppio) {
|
||||
provider.anthropicApiHost = 'https://api.ppinfra.com/anthropic'
|
||||
if (provider.id === SystemProviderIds.ollama) {
|
||||
provider.type = 'ollama'
|
||||
}
|
||||
})
|
||||
logger.info('migrate 180 success')
|
||||
@ -2920,6 +2932,40 @@ const migrateConfig = {
|
||||
logger.error('migrate 180 error', error as Error)
|
||||
return state
|
||||
}
|
||||
},
|
||||
'181': (state: RootState) => {
|
||||
try {
|
||||
state.llm.providers.forEach((provider) => {
|
||||
if (provider.id === 'ai-gateway') {
|
||||
provider.id = SystemProviderIds.gateway
|
||||
}
|
||||
// Also update model.provider references to avoid orphaned models
|
||||
provider.models?.forEach((model) => {
|
||||
if (model.provider === 'ai-gateway') {
|
||||
model.provider = SystemProviderIds.gateway
|
||||
}
|
||||
})
|
||||
})
|
||||
logger.info('migrate 181 success')
|
||||
return state
|
||||
} catch (error) {
|
||||
logger.error('migrate 181 error', error as Error)
|
||||
return state
|
||||
}
|
||||
},
|
||||
'182': (state: RootState) => {
|
||||
try {
|
||||
state.llm.providers.forEach((provider) => {
|
||||
if (provider.id === SystemProviderIds.ppio) {
|
||||
provider.anthropicApiHost = 'https://api.ppinfra.com/anthropic'
|
||||
}
|
||||
})
|
||||
logger.info('migrate 182 success')
|
||||
return state
|
||||
} catch (error) {
|
||||
logger.error('migrate 182 error', error as Error)
|
||||
return state
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -18,7 +18,7 @@ import type {
|
||||
import { ThemeMode } from '@renderer/types'
|
||||
import type { OpenAISummaryText, OpenAIVerbosity } from '@renderer/types/aiCoreTypes'
|
||||
import { uuid } from '@renderer/utils'
|
||||
import { UpgradeChannel } from '@shared/config/constant'
|
||||
import { API_SERVER_DEFAULTS, UpgradeChannel } from '@shared/config/constant'
|
||||
|
||||
import type { RemoteSyncState } from './backup'
|
||||
|
||||
@ -410,8 +410,8 @@ export const initialState: SettingsState = {
|
||||
// API Server
|
||||
apiServer: {
|
||||
enabled: false,
|
||||
host: 'localhost',
|
||||
port: 23333,
|
||||
host: API_SERVER_DEFAULTS.HOST,
|
||||
port: API_SERVER_DEFAULTS.PORT,
|
||||
apiKey: `cs-sk-${uuid()}`
|
||||
},
|
||||
showMessageOutline: false
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import type OpenAI from '@cherrystudio/openai'
|
||||
import type { NotNull, NotUndefined } from '@types'
|
||||
import type { NotUndefined } from '@types'
|
||||
import type { ImageModel, LanguageModel } from 'ai'
|
||||
import type { generateObject, generateText, ModelMessage, streamObject, streamText } from 'ai'
|
||||
import * as z from 'zod'
|
||||
@ -31,18 +31,26 @@ export type GenerateObjectParams = Omit<Parameters<typeof generateObject>[0], 'm
|
||||
|
||||
export type AiSdkModel = LanguageModel | ImageModel
|
||||
|
||||
// The original type unite both undefined and null.
|
||||
// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs.
|
||||
// Parameter would not be passed into request if it's undefined.
|
||||
export type OpenAIVerbosity = NotNull<OpenAI.Responses.ResponseTextConfig['verbosity']>
|
||||
/**
|
||||
* Constrains the verbosity of the model's response. Lower values will result in more concise responses, while higher values will result in more verbose responses.
|
||||
*
|
||||
* The original type unites both undefined and null.
|
||||
* When undefined, the parameter is omitted from the request.
|
||||
* When null, verbosity is explicitly disabled.
|
||||
*/
|
||||
export type OpenAIVerbosity = OpenAI.Responses.ResponseTextConfig['verbosity']
|
||||
export type ValidOpenAIVerbosity = NotUndefined<OpenAIVerbosity>
|
||||
|
||||
export type OpenAIReasoningEffort = OpenAI.ReasoningEffort
|
||||
|
||||
// The original type unite both undefined and null.
|
||||
// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs.
|
||||
// Parameter would not be passed into request if it's undefined.
|
||||
export type OpenAISummaryText = NotNull<OpenAI.Reasoning['summary']>
|
||||
/**
|
||||
* A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process.
|
||||
*
|
||||
* The original type unites both undefined and null.
|
||||
* When undefined, the parameter is omitted from the request.
|
||||
* When null, verbosity is explicitly disabled.
|
||||
*/
|
||||
export type OpenAISummaryText = OpenAI.Reasoning['summary']
|
||||
|
||||
const AiSdkParamsSchema = z.enum([
|
||||
'maxOutputTokens',
|
||||
|
||||
@ -96,6 +96,9 @@ export type ReasoningEffortOptionalParams = {
|
||||
include_thoughts?: boolean
|
||||
}
|
||||
}
|
||||
thinking?: {
|
||||
type: 'enabled' | 'disabled'
|
||||
}
|
||||
thinking_budget?: number
|
||||
reasoning_effort?: OpenAI.Chat.Completions.ChatCompletionCreateParams['reasoning_effort'] | 'auto'
|
||||
}
|
||||
@ -128,10 +131,6 @@ export type OpenAIExtraBody = {
|
||||
source_lang: 'auto'
|
||||
target_lang: string
|
||||
}
|
||||
// for gpt-5 series models verbosity control
|
||||
text?: {
|
||||
verbosity?: 'low' | 'medium' | 'high'
|
||||
}
|
||||
}
|
||||
// image is for openrouter. audio is ignored for now
|
||||
export type OpenAIModality = OpenAI.ChatCompletionModality | 'image'
|
||||
|
||||
@ -6,6 +6,7 @@ import {
|
||||
formatApiHost,
|
||||
formatApiKeys,
|
||||
formatAzureOpenAIApiHost,
|
||||
formatOllamaApiHost,
|
||||
formatVertexApiHost,
|
||||
getTrailingApiVersion,
|
||||
hasAPIVersion,
|
||||
@ -330,6 +331,73 @@ describe('api', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('formatOllamaApiHost', () => {
|
||||
it('removes trailing slash and appends /api for basic hosts', () => {
|
||||
expect(formatOllamaApiHost('https://api.ollama.com/')).toBe('https://api.ollama.com/api')
|
||||
expect(formatOllamaApiHost('http://localhost:11434/')).toBe('http://localhost:11434/api')
|
||||
})
|
||||
|
||||
it('appends /api when no suffix is present', () => {
|
||||
expect(formatOllamaApiHost('https://api.ollama.com')).toBe('https://api.ollama.com/api')
|
||||
expect(formatOllamaApiHost('http://localhost:11434')).toBe('http://localhost:11434/api')
|
||||
})
|
||||
|
||||
it('removes /v1 suffix and appends /api', () => {
|
||||
expect(formatOllamaApiHost('https://api.ollama.com/v1')).toBe('https://api.ollama.com/api')
|
||||
expect(formatOllamaApiHost('http://localhost:11434/v1/')).toBe('http://localhost:11434/api')
|
||||
})
|
||||
|
||||
it('removes /api suffix and keeps /api', () => {
|
||||
expect(formatOllamaApiHost('https://api.ollama.com/api')).toBe('https://api.ollama.com/api')
|
||||
expect(formatOllamaApiHost('http://localhost:11434/api/')).toBe('http://localhost:11434/api')
|
||||
})
|
||||
|
||||
it('removes /chat suffix and appends /api', () => {
|
||||
expect(formatOllamaApiHost('https://api.ollama.com/chat')).toBe('https://api.ollama.com/api')
|
||||
expect(formatOllamaApiHost('http://localhost:11434/chat/')).toBe('http://localhost:11434/api')
|
||||
})
|
||||
|
||||
it('handles multiple suffix combinations correctly', () => {
|
||||
expect(formatOllamaApiHost('https://api.ollama.com/v1/chat')).toBe('https://api.ollama.com/v1/api')
|
||||
expect(formatOllamaApiHost('https://api.ollama.com/chat/v1')).toBe('https://api.ollama.com/api')
|
||||
expect(formatOllamaApiHost('https://api.ollama.com/api/chat')).toBe('https://api.ollama.com/api/api')
|
||||
})
|
||||
|
||||
it('preserves complex paths while handling suffixes', () => {
|
||||
expect(formatOllamaApiHost('https://api.ollama.com/custom/path')).toBe('https://api.ollama.com/custom/path/api')
|
||||
expect(formatOllamaApiHost('https://api.ollama.com/custom/path/')).toBe('https://api.ollama.com/custom/path/api')
|
||||
expect(formatOllamaApiHost('https://api.ollama.com/custom/path/v1')).toBe(
|
||||
'https://api.ollama.com/custom/path/api'
|
||||
)
|
||||
})
|
||||
|
||||
it('handles edge cases with multiple slashes', () => {
|
||||
expect(formatOllamaApiHost('https://api.ollama.com//')).toBe('https://api.ollama.com//api')
|
||||
expect(formatOllamaApiHost('https://api.ollama.com///v1///')).toBe('https://api.ollama.com///v1///api')
|
||||
})
|
||||
|
||||
it('handles localhost with different ports', () => {
|
||||
expect(formatOllamaApiHost('http://localhost:3000')).toBe('http://localhost:3000/api')
|
||||
expect(formatOllamaApiHost('http://127.0.0.1:11434/')).toBe('http://127.0.0.1:11434/api')
|
||||
expect(formatOllamaApiHost('https://localhost:8080/v1')).toBe('https://localhost:8080/api')
|
||||
})
|
||||
|
||||
it('handles IP addresses', () => {
|
||||
expect(formatOllamaApiHost('http://192.168.1.100:11434')).toBe('http://192.168.1.100:11434/api')
|
||||
expect(formatOllamaApiHost('https://10.0.0.1:8080/v1/')).toBe('https://10.0.0.1:8080/api')
|
||||
})
|
||||
|
||||
it('handles empty strings and edge cases', () => {
|
||||
expect(formatOllamaApiHost('')).toBe('/api')
|
||||
expect(formatOllamaApiHost('/')).toBe('/api')
|
||||
})
|
||||
|
||||
it('preserves protocol and handles mixed case', () => {
|
||||
expect(formatOllamaApiHost('HTTPS://API.OLLAMA.COM')).toBe('HTTPS://API.OLLAMA.COM/api')
|
||||
expect(formatOllamaApiHost('HTTP://localhost:11434/V1/')).toBe('HTTP://localhost:11434/V1/api')
|
||||
})
|
||||
})
|
||||
|
||||
describe('getTrailingApiVersion', () => {
|
||||
it('extracts trailing API version from URL', () => {
|
||||
expect(getTrailingApiVersion('https://api.example.com/v1')).toBe('v1')
|
||||
|
||||
@ -222,6 +222,9 @@ describe('naming', () => {
|
||||
it('should remove trailing :free', () => {
|
||||
expect(getLowerBaseModelName('gpt-4:free')).toBe('gpt-4')
|
||||
})
|
||||
it('should remove trailing (free)', () => {
|
||||
expect(getLowerBaseModelName('agent/gpt-4(free)')).toBe('gpt-4')
|
||||
})
|
||||
})
|
||||
|
||||
describe('getFirstCharacter', () => {
|
||||
|
||||
@ -189,7 +189,7 @@ describe('provider utils', () => {
|
||||
|
||||
expect(isAnthropicProvider(createProvider({ type: 'anthropic' }))).toBe(true)
|
||||
expect(isGeminiProvider(createProvider({ type: 'gemini' }))).toBe(true)
|
||||
expect(isAIGatewayProvider(createProvider({ type: 'ai-gateway' }))).toBe(true)
|
||||
expect(isAIGatewayProvider(createProvider({ type: 'gateway' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('computes API version support', () => {
|
||||
|
||||
@ -10,6 +10,7 @@ export {
|
||||
isCherryAIProvider,
|
||||
isGeminiProvider,
|
||||
isNewApiProvider,
|
||||
isOllamaProvider,
|
||||
isOpenAICompatibleProvider,
|
||||
isOpenAIProvider,
|
||||
isPerplexityProvider,
|
||||
|
||||
@ -72,8 +72,22 @@ const ActionIcons: FC<{
|
||||
(action: ActionItem) => {
|
||||
const displayName = action.isBuiltIn ? t(action.name) : action.name
|
||||
|
||||
const handleKeyDown = (e: React.KeyboardEvent<HTMLDivElement>) => {
|
||||
if (e.key === 'Enter' || e.key === ' ') {
|
||||
e.preventDefault()
|
||||
handleAction(action)
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
<ActionButton key={action.id} onClick={() => handleAction(action)} title={isCompact ? displayName : undefined}>
|
||||
<ActionButton
|
||||
key={action.id}
|
||||
onClick={() => handleAction(action)}
|
||||
onKeyDown={handleKeyDown}
|
||||
title={isCompact ? displayName : undefined}
|
||||
role="button"
|
||||
aria-label={displayName}
|
||||
tabIndex={0}>
|
||||
<ActionIcon>
|
||||
{action.id === 'copy' ? (
|
||||
renderCopyIcon()
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
@host=http://localhost:23333
|
||||
@host=http://127.0.0.1:23333
|
||||
@token=cs-sk-af798ed4-7cf5-4fd7-ae4b-df203b164194
|
||||
@agent_id=agent_1758092281575_tn9dxio9k
|
||||
|
||||
@ -56,4 +56,3 @@ Content-Type: application/json
|
||||
"max_turns": 5
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
|
||||
@host=http://localhost:23333
|
||||
@host=http://127.0.0.1:23333
|
||||
@token=cs-sk-af798ed4-7cf5-4fd7-ae4b-df203b164194
|
||||
@agent_id=agent_1758092281575_tn9dxio9k
|
||||
@session_id=session_1758278828236_mqj91e7c0
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
@host=http://localhost:23333
|
||||
@host=http://127.0.0.1:23333
|
||||
@token=cs-sk-af798ed4-7cf5-4fd7-ae4b-df203b164194
|
||||
@agent_id=agent_1758092281575_tn9dxio9k
|
||||
|
||||
|
||||
52
yarn.lock
52
yarn.lock
@ -128,16 +128,15 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/deepseek@npm:^1.0.29":
|
||||
version: 1.0.29
|
||||
resolution: "@ai-sdk/deepseek@npm:1.0.29"
|
||||
"@ai-sdk/deepseek@npm:^1.0.31":
|
||||
version: 1.0.31
|
||||
resolution: "@ai-sdk/deepseek@npm:1.0.31"
|
||||
dependencies:
|
||||
"@ai-sdk/openai-compatible": "npm:1.0.27"
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.17"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.18"
|
||||
peerDependencies:
|
||||
zod: ^3.25.76 || ^4.1.8
|
||||
checksum: 10c0/f43fba5c72e3f2d8ddc79d68c656cb4fc5fcd488c97b0a5371ad728e2d5c7a8c61fe9125a2a471b7648d99646cd2c78aad2d462c1469942bb4046763c5f13f38
|
||||
checksum: 10c0/851965392ce03c85ffacf74900ec159bccef491b9bf6142ac08bc25f4d2bbf4df1d754e76fe9793403dee4a8da76fb6b7a9ded84491ec309bdea9aa478e6f542
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
@ -243,6 +242,18 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/openai-compatible@npm:^1.0.28":
|
||||
version: 1.0.28
|
||||
resolution: "@ai-sdk/openai-compatible@npm:1.0.28"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.18"
|
||||
peerDependencies:
|
||||
zod: ^3.25.76 || ^4.1.8
|
||||
checksum: 10c0/f484774e0094a12674f392d925038a296191723b4c76bd833eabf1b334cf3c84fe77a2e2c5fbac974ec5e18340e113c6a81c86d957c9529a7a60e87cd390ada8
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/openai-compatible@patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch":
|
||||
version: 1.0.27
|
||||
resolution: "@ai-sdk/openai-compatible@patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch::version=1.0.27&hash=c44b76"
|
||||
@ -304,6 +315,19 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/provider-utils@npm:3.0.18":
|
||||
version: 3.0.18
|
||||
resolution: "@ai-sdk/provider-utils@npm:3.0.18"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@standard-schema/spec": "npm:^1.0.0"
|
||||
eventsource-parser: "npm:^3.0.6"
|
||||
peerDependencies:
|
||||
zod: ^3.25.76 || ^4.1.8
|
||||
checksum: 10c0/209c15b0dceef0ba95a7d3de544be0a417ad4a0bd5143496b3966a35fedf144156d93a42ff8c3d7db56781b9836bafc8c132c98978c49240e55bc1a36e18a67f
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/provider@npm:2.0.0, @ai-sdk/provider@npm:^2.0.0":
|
||||
version: 2.0.0
|
||||
resolution: "@ai-sdk/provider@npm:2.0.0"
|
||||
@ -1830,7 +1854,7 @@ __metadata:
|
||||
dependencies:
|
||||
"@ai-sdk/anthropic": "npm:^2.0.49"
|
||||
"@ai-sdk/azure": "npm:^2.0.74"
|
||||
"@ai-sdk/deepseek": "npm:^1.0.29"
|
||||
"@ai-sdk/deepseek": "npm:^1.0.31"
|
||||
"@ai-sdk/openai-compatible": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
|
||||
"@ai-sdk/provider": "npm:^2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:^3.0.17"
|
||||
@ -1851,6 +1875,7 @@ __metadata:
|
||||
version: 0.0.0-use.local
|
||||
resolution: "@cherrystudio/ai-sdk-provider@workspace:packages/ai-sdk-provider"
|
||||
dependencies:
|
||||
"@ai-sdk/openai-compatible": "npm:^1.0.28"
|
||||
"@ai-sdk/provider": "npm:^2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:^3.0.17"
|
||||
tsdown: "npm:^0.13.3"
|
||||
@ -10232,6 +10257,7 @@ __metadata:
|
||||
notion-helper: "npm:^1.3.22"
|
||||
npx-scope-finder: "npm:^1.2.0"
|
||||
officeparser: "npm:^4.2.0"
|
||||
ollama-ai-provider-v2: "npm:^1.5.5"
|
||||
os-proxy-config: "npm:^1.1.2"
|
||||
oxlint: "npm:^1.22.0"
|
||||
oxlint-tsgolint: "npm:^0.2.0"
|
||||
@ -19934,6 +19960,18 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"ollama-ai-provider-v2@npm:^1.5.5":
|
||||
version: 1.5.5
|
||||
resolution: "ollama-ai-provider-v2@npm:1.5.5"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:^2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:^3.0.17"
|
||||
peerDependencies:
|
||||
zod: ^4.0.16
|
||||
checksum: 10c0/da40c8097bd8205c46eccfbd13e77c51a6ce97a29b886adfc9e1b8444460b558138d1ed4428491fcc9378d46f649dd0a9b1e5b13cf6bbc8f5385e8b321734e72
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"ollama@npm:^0.5.12":
|
||||
version: 0.5.16
|
||||
resolution: "ollama@npm:0.5.16"
|
||||
|
||||
Loading…
Reference in New Issue
Block a user