mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-19 06:30:10 +08:00
Compare commits
11 Commits
e3e59cd2f7
...
12dc3e8a19
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
12dc3e8a19 | ||
|
|
eb7a2cc85a | ||
|
|
fd6986076a | ||
|
|
6309cc179d | ||
|
|
c04529a23c | ||
|
|
0f1b3afa72 | ||
|
|
0cf0072b51 | ||
|
|
150bb3e3a0 | ||
|
|
67d4665deb | ||
|
|
8c9b05a55a | ||
|
|
0b5665e772 |
@ -244,6 +244,7 @@ export enum IpcChannel {
|
||||
System_GetCpuName = 'system:getCpuName',
|
||||
System_CheckGitBash = 'system:checkGitBash',
|
||||
System_GetGitBashPath = 'system:getGitBashPath',
|
||||
System_GetGitBashPathInfo = 'system:getGitBashPathInfo',
|
||||
System_SetGitBashPath = 'system:setGitBashPath',
|
||||
|
||||
// DevTools
|
||||
|
||||
@ -488,3 +488,11 @@ export const MACOS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
||||
|
||||
// resources/scripts should be maintained manually
|
||||
export const HOME_CHERRY_DIR = '.cherrystudio'
|
||||
|
||||
// Git Bash path configuration types
|
||||
export type GitBashPathSource = 'manual' | 'auto'
|
||||
|
||||
export interface GitBashPathInfo {
|
||||
path: string | null
|
||||
source: GitBashPathSource | null
|
||||
}
|
||||
|
||||
@ -6,7 +6,14 @@ import { loggerService } from '@logger'
|
||||
import { isLinux, isMac, isPortable, isWin } from '@main/constant'
|
||||
import { generateSignature } from '@main/integration/cherryai'
|
||||
import anthropicService from '@main/services/AnthropicService'
|
||||
import { findGitBash, getBinaryPath, isBinaryExists, runInstallScript, validateGitBashPath } from '@main/utils/process'
|
||||
import {
|
||||
autoDiscoverGitBash,
|
||||
getBinaryPath,
|
||||
getGitBashPathInfo,
|
||||
isBinaryExists,
|
||||
runInstallScript,
|
||||
validateGitBashPath
|
||||
} from '@main/utils/process'
|
||||
import { handleZoomFactor } from '@main/utils/zoom'
|
||||
import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
||||
import type { UpgradeChannel } from '@shared/config/constant'
|
||||
@ -499,9 +506,8 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
}
|
||||
|
||||
try {
|
||||
const customPath = configManager.get(ConfigKeys.GitBashPath) as string | undefined
|
||||
const bashPath = findGitBash(customPath)
|
||||
|
||||
// Use autoDiscoverGitBash to handle auto-discovery and persistence
|
||||
const bashPath = autoDiscoverGitBash()
|
||||
if (bashPath) {
|
||||
logger.info('Git Bash is available', { path: bashPath })
|
||||
return true
|
||||
@ -524,13 +530,22 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
return customPath ?? null
|
||||
})
|
||||
|
||||
// Returns { path, source } where source is 'manual' | 'auto' | null
|
||||
ipcMain.handle(IpcChannel.System_GetGitBashPathInfo, () => {
|
||||
return getGitBashPathInfo()
|
||||
})
|
||||
|
||||
ipcMain.handle(IpcChannel.System_SetGitBashPath, (_, newPath: string | null) => {
|
||||
if (!isWin) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (!newPath) {
|
||||
// Clear manual setting and re-run auto-discovery
|
||||
configManager.set(ConfigKeys.GitBashPath, null)
|
||||
configManager.set(ConfigKeys.GitBashPathSource, null)
|
||||
// Re-run auto-discovery to restore auto-discovered path if available
|
||||
autoDiscoverGitBash()
|
||||
return true
|
||||
}
|
||||
|
||||
@ -539,7 +554,9 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Set path with 'manual' source
|
||||
configManager.set(ConfigKeys.GitBashPath, validated)
|
||||
configManager.set(ConfigKeys.GitBashPathSource, 'manual')
|
||||
return true
|
||||
})
|
||||
|
||||
|
||||
@ -32,7 +32,8 @@ export enum ConfigKeys {
|
||||
Proxy = 'proxy',
|
||||
EnableDeveloperMode = 'enableDeveloperMode',
|
||||
ClientId = 'clientId',
|
||||
GitBashPath = 'gitBashPath'
|
||||
GitBashPath = 'gitBashPath',
|
||||
GitBashPathSource = 'gitBashPathSource' // 'manual' | 'auto' | null
|
||||
}
|
||||
|
||||
export class ConfigManager {
|
||||
|
||||
@ -249,6 +249,26 @@ class McpService {
|
||||
StdioClientTransport | SSEClientTransport | InMemoryTransport | StreamableHTTPClientTransport
|
||||
> => {
|
||||
// Create appropriate transport based on configuration
|
||||
|
||||
// Special case for nowledgeMem - uses HTTP transport instead of in-memory
|
||||
if (isBuiltinMCPServer(server) && server.name === BuiltinMCPServerNames.nowledgeMem) {
|
||||
const nowledgeMemUrl = 'http://127.0.0.1:14242/mcp'
|
||||
const options: StreamableHTTPClientTransportOptions = {
|
||||
fetch: async (url, init) => {
|
||||
return net.fetch(typeof url === 'string' ? url : url.toString(), init)
|
||||
},
|
||||
requestInit: {
|
||||
headers: {
|
||||
...defaultAppHeaders(),
|
||||
APP: 'Cherry Studio'
|
||||
}
|
||||
},
|
||||
authProvider
|
||||
}
|
||||
getServerLogger(server).debug(`Using StreamableHTTPClientTransport for ${server.name}`)
|
||||
return new StreamableHTTPClientTransport(new URL(nowledgeMemUrl), options)
|
||||
}
|
||||
|
||||
if (isBuiltinMCPServer(server) && server.name !== BuiltinMCPServerNames.mcpAutoInstall) {
|
||||
getServerLogger(server).debug(`Using in-memory transport`)
|
||||
const [clientTransport, serverTransport] = InMemoryTransport.createLinkedPair()
|
||||
|
||||
@ -15,8 +15,8 @@ import { query } from '@anthropic-ai/claude-agent-sdk'
|
||||
import { loggerService } from '@logger'
|
||||
import { config as apiConfigService } from '@main/apiServer/config'
|
||||
import { validateModelId } from '@main/apiServer/utils'
|
||||
import { ConfigKeys, configManager } from '@main/services/ConfigManager'
|
||||
import { validateGitBashPath } from '@main/utils/process'
|
||||
import { isWin } from '@main/constant'
|
||||
import { autoDiscoverGitBash } from '@main/utils/process'
|
||||
import getLoginShellEnvironment from '@main/utils/shell-env'
|
||||
import { app } from 'electron'
|
||||
|
||||
@ -109,7 +109,8 @@ class ClaudeCodeService implements AgentServiceInterface {
|
||||
Object.entries(loginShellEnv).filter(([key]) => !key.toLowerCase().endsWith('_proxy'))
|
||||
) as Record<string, string>
|
||||
|
||||
const customGitBashPath = validateGitBashPath(configManager.get(ConfigKeys.GitBashPath) as string | undefined)
|
||||
// Auto-discover Git Bash path on Windows (already logs internally)
|
||||
const customGitBashPath = isWin ? autoDiscoverGitBash() : null
|
||||
|
||||
const env = {
|
||||
...loginShellEnvWithoutProxies,
|
||||
|
||||
@ -1,9 +1,21 @@
|
||||
import { configManager } from '@main/services/ConfigManager'
|
||||
import { execFileSync } from 'child_process'
|
||||
import fs from 'fs'
|
||||
import path from 'path'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import { findExecutable, findGitBash, validateGitBashPath } from '../process'
|
||||
import { autoDiscoverGitBash, findExecutable, findGitBash, validateGitBashPath } from '../process'
|
||||
|
||||
// Mock configManager
|
||||
vi.mock('@main/services/ConfigManager', () => ({
|
||||
ConfigKeys: {
|
||||
GitBashPath: 'gitBashPath'
|
||||
},
|
||||
configManager: {
|
||||
get: vi.fn(),
|
||||
set: vi.fn()
|
||||
}
|
||||
}))
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('child_process')
|
||||
@ -695,4 +707,284 @@ describe.skipIf(process.platform !== 'win32')('process utilities', () => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('autoDiscoverGitBash', () => {
|
||||
const originalEnvVar = process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
|
||||
beforeEach(() => {
|
||||
vi.mocked(configManager.get).mockReset()
|
||||
vi.mocked(configManager.set).mockReset()
|
||||
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
// Restore original environment variable
|
||||
if (originalEnvVar !== undefined) {
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = originalEnvVar
|
||||
} else {
|
||||
delete process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
}
|
||||
})
|
||||
|
||||
/**
|
||||
* Helper to mock fs.existsSync with a set of valid paths
|
||||
*/
|
||||
const mockExistingPaths = (...validPaths: string[]) => {
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => validPaths.includes(p as string))
|
||||
}
|
||||
|
||||
describe('with no existing config path', () => {
|
||||
it('should discover and persist Git Bash path when not configured', () => {
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
mockExistingPaths(gitPath, bashPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||
})
|
||||
|
||||
it('should return null and not persist when Git Bash is not found', () => {
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||
vi.mocked(execFileSync).mockImplementation(() => {
|
||||
throw new Error('Not found')
|
||||
})
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBeNull()
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('environment variable precedence', () => {
|
||||
it('should use env var over valid config path', () => {
|
||||
const envPath = 'C:\\EnvGit\\bin\\bash.exe'
|
||||
const configPath = 'C:\\ConfigGit\\bin\\bash.exe'
|
||||
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||
vi.mocked(configManager.get).mockReturnValue(configPath)
|
||||
mockExistingPaths(envPath, configPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
// Env var should take precedence
|
||||
expect(result).toBe(envPath)
|
||||
// Should not persist env var path (it's a runtime override)
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should fall back to config path when env var is invalid', () => {
|
||||
const envPath = 'C:\\Invalid\\bash.exe'
|
||||
const configPath = 'C:\\ConfigGit\\bin\\bash.exe'
|
||||
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||
vi.mocked(configManager.get).mockReturnValue(configPath)
|
||||
// Env path is invalid (doesn't exist), only config path exists
|
||||
mockExistingPaths(configPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
// Should fall back to config path
|
||||
expect(result).toBe(configPath)
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should fall back to auto-discovery when both env var and config are invalid', () => {
|
||||
const envPath = 'C:\\InvalidEnv\\bash.exe'
|
||||
const configPath = 'C:\\InvalidConfig\\bash.exe'
|
||||
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
process.env.CLAUDE_CODE_GIT_BASH_PATH = envPath
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
vi.mocked(configManager.get).mockReturnValue(configPath)
|
||||
// Both env and config paths are invalid, only standard Git exists
|
||||
mockExistingPaths(gitPath, discoveredPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(discoveredPath)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', discoveredPath)
|
||||
})
|
||||
})
|
||||
|
||||
describe('with valid existing config path', () => {
|
||||
it('should validate and return existing path without re-discovering', () => {
|
||||
const existingPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||
mockExistingPaths(existingPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(existingPath)
|
||||
// Should not call findGitBash or persist again
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
// Should not call execFileSync (which findGitBash would use for discovery)
|
||||
expect(execFileSync).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should not override existing valid config with auto-discovery', () => {
|
||||
const existingPath = 'C:\\CustomGit\\bin\\bash.exe'
|
||||
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||
mockExistingPaths(existingPath, discoveredPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(existingPath)
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('with invalid existing config path', () => {
|
||||
it('should attempt auto-discovery when existing path does not exist', () => {
|
||||
const existingPath = 'C:\\NonExistent\\bin\\bash.exe'
|
||||
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
// Invalid path doesn't exist, but Git is installed at standard location
|
||||
mockExistingPaths(gitPath, discoveredPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
// Should discover and return the new path
|
||||
expect(result).toBe(discoveredPath)
|
||||
// Should persist the discovered path (overwrites invalid)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', discoveredPath)
|
||||
})
|
||||
|
||||
it('should attempt auto-discovery when existing path is not bash.exe', () => {
|
||||
const existingPath = 'C:\\CustomGit\\bin\\git.exe'
|
||||
const discoveredPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
// Invalid path exists but is not bash.exe (validation will fail)
|
||||
// Git is installed at standard location
|
||||
mockExistingPaths(existingPath, gitPath, discoveredPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
// Should discover and return the new path
|
||||
expect(result).toBe(discoveredPath)
|
||||
// Should persist the discovered path (overwrites invalid)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', discoveredPath)
|
||||
})
|
||||
|
||||
it('should return null when existing path is invalid and discovery fails', () => {
|
||||
const existingPath = 'C:\\NonExistent\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(existingPath)
|
||||
vi.mocked(fs.existsSync).mockReturnValue(false)
|
||||
vi.mocked(execFileSync).mockImplementation(() => {
|
||||
throw new Error('Not found')
|
||||
})
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
// Both validation and discovery failed
|
||||
expect(result).toBeNull()
|
||||
// Should not persist when discovery fails
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
|
||||
describe('config persistence verification', () => {
|
||||
it('should persist discovered path with correct config key', () => {
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
mockExistingPaths(gitPath, bashPath)
|
||||
|
||||
autoDiscoverGitBash()
|
||||
|
||||
// Verify the exact call to configManager.set
|
||||
expect(configManager.set).toHaveBeenCalledTimes(1)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||
})
|
||||
|
||||
it('should persist on each discovery when config remains undefined', () => {
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
mockExistingPaths(gitPath, bashPath)
|
||||
|
||||
autoDiscoverGitBash()
|
||||
autoDiscoverGitBash()
|
||||
|
||||
// Each call discovers and persists since config remains undefined (mocked)
|
||||
expect(configManager.set).toHaveBeenCalledTimes(2)
|
||||
})
|
||||
})
|
||||
|
||||
describe('real-world scenarios', () => {
|
||||
it('should discover and persist standard Git for Windows installation', () => {
|
||||
const gitPath = 'C:\\Program Files\\Git\\cmd\\git.exe'
|
||||
const bashPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
process.env.ProgramFiles = 'C:\\Program Files'
|
||||
mockExistingPaths(gitPath, bashPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||
})
|
||||
|
||||
it('should discover portable Git via where.exe and persist', () => {
|
||||
const gitPath = 'D:\\PortableApps\\Git\\bin\\git.exe'
|
||||
const bashPath = 'D:\\PortableApps\\Git\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(undefined)
|
||||
|
||||
vi.mocked(fs.existsSync).mockImplementation((p) => {
|
||||
const pathStr = p?.toString() || ''
|
||||
// Common git paths don't exist
|
||||
if (pathStr.includes('Program Files\\Git\\cmd\\git.exe')) return false
|
||||
if (pathStr.includes('Program Files (x86)\\Git\\cmd\\git.exe')) return false
|
||||
// Portable bash path exists
|
||||
if (pathStr === bashPath) return true
|
||||
return false
|
||||
})
|
||||
|
||||
vi.mocked(execFileSync).mockReturnValue(gitPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(bashPath)
|
||||
expect(configManager.set).toHaveBeenCalledWith('gitBashPath', bashPath)
|
||||
})
|
||||
|
||||
it('should respect user-configured path over auto-discovery', () => {
|
||||
const userConfiguredPath = 'D:\\MyGit\\bin\\bash.exe'
|
||||
const systemPath = 'C:\\Program Files\\Git\\bin\\bash.exe'
|
||||
|
||||
vi.mocked(configManager.get).mockReturnValue(userConfiguredPath)
|
||||
mockExistingPaths(userConfiguredPath, systemPath)
|
||||
|
||||
const result = autoDiscoverGitBash()
|
||||
|
||||
expect(result).toBe(userConfiguredPath)
|
||||
expect(configManager.set).not.toHaveBeenCalled()
|
||||
// Verify findGitBash was not called for discovery
|
||||
expect(execFileSync).not.toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import { loggerService } from '@logger'
|
||||
import type { GitBashPathInfo, GitBashPathSource } from '@shared/config/constant'
|
||||
import { HOME_CHERRY_DIR } from '@shared/config/constant'
|
||||
import { execFileSync, spawn } from 'child_process'
|
||||
import fs from 'fs'
|
||||
@ -6,6 +7,7 @@ import os from 'os'
|
||||
import path from 'path'
|
||||
|
||||
import { isWin } from '../constant'
|
||||
import { ConfigKeys, configManager } from '../services/ConfigManager'
|
||||
import { getResourcePath } from '.'
|
||||
|
||||
const logger = loggerService.withContext('Utils:Process')
|
||||
@ -59,7 +61,7 @@ export async function getBinaryPath(name?: string): Promise<string> {
|
||||
|
||||
export async function isBinaryExists(name: string): Promise<boolean> {
|
||||
const cmd = await getBinaryPath(name)
|
||||
return await fs.existsSync(cmd)
|
||||
return fs.existsSync(cmd)
|
||||
}
|
||||
|
||||
/**
|
||||
@ -225,3 +227,77 @@ export function validateGitBashPath(customPath?: string | null): string | null {
|
||||
logger.debug('Validated custom Git Bash path', { path: resolved })
|
||||
return resolved
|
||||
}
|
||||
|
||||
/**
|
||||
* Auto-discover and persist Git Bash path if not already configured
|
||||
* Only called when Git Bash is actually needed
|
||||
*
|
||||
* Precedence order:
|
||||
* 1. CLAUDE_CODE_GIT_BASH_PATH environment variable (highest - runtime override)
|
||||
* 2. Configured path from settings (manual or auto)
|
||||
* 3. Auto-discovery via findGitBash (only if no valid config exists)
|
||||
*/
|
||||
export function autoDiscoverGitBash(): string | null {
|
||||
if (!isWin) {
|
||||
return null
|
||||
}
|
||||
|
||||
// 1. Check environment variable override first (highest priority)
|
||||
const envOverride = process.env.CLAUDE_CODE_GIT_BASH_PATH
|
||||
if (envOverride) {
|
||||
const validated = validateGitBashPath(envOverride)
|
||||
if (validated) {
|
||||
logger.debug('Using CLAUDE_CODE_GIT_BASH_PATH override', { path: validated })
|
||||
return validated
|
||||
}
|
||||
logger.warn('CLAUDE_CODE_GIT_BASH_PATH provided but path is invalid', { path: envOverride })
|
||||
}
|
||||
|
||||
// 2. Check if a path is already configured
|
||||
const existingPath = configManager.get<string | undefined>(ConfigKeys.GitBashPath)
|
||||
const existingSource = configManager.get<GitBashPathSource | undefined>(ConfigKeys.GitBashPathSource)
|
||||
|
||||
if (existingPath) {
|
||||
const validated = validateGitBashPath(existingPath)
|
||||
if (validated) {
|
||||
return validated
|
||||
}
|
||||
// Existing path is invalid, try to auto-discover
|
||||
logger.warn('Existing Git Bash path is invalid, attempting auto-discovery', {
|
||||
path: existingPath,
|
||||
source: existingSource
|
||||
})
|
||||
}
|
||||
|
||||
// 3. Try to find Git Bash via auto-discovery
|
||||
const discoveredPath = findGitBash()
|
||||
if (discoveredPath) {
|
||||
// Persist the discovered path with 'auto' source
|
||||
configManager.set(ConfigKeys.GitBashPath, discoveredPath)
|
||||
configManager.set(ConfigKeys.GitBashPathSource, 'auto')
|
||||
logger.info('Auto-discovered Git Bash path', { path: discoveredPath })
|
||||
}
|
||||
|
||||
return discoveredPath
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Git Bash path info including source
|
||||
* If no path is configured, triggers auto-discovery first
|
||||
*/
|
||||
export function getGitBashPathInfo(): GitBashPathInfo {
|
||||
if (!isWin) {
|
||||
return { path: null, source: null }
|
||||
}
|
||||
|
||||
let path = configManager.get<string | null>(ConfigKeys.GitBashPath) ?? null
|
||||
let source = configManager.get<GitBashPathSource | null>(ConfigKeys.GitBashPathSource) ?? null
|
||||
|
||||
// If no path configured, trigger auto-discovery (handles upgrade from old versions)
|
||||
if (!path) {
|
||||
path = autoDiscoverGitBash()
|
||||
source = path ? 'auto' : null
|
||||
}
|
||||
|
||||
return { path, source }
|
||||
}
|
||||
|
||||
@ -2,7 +2,7 @@ import type { PermissionUpdate } from '@anthropic-ai/claude-agent-sdk'
|
||||
import { electronAPI } from '@electron-toolkit/preload'
|
||||
import type { SpanEntity, TokenUsage } from '@mcp-trace/trace-core'
|
||||
import type { SpanContext } from '@opentelemetry/api'
|
||||
import type { TerminalConfig, UpgradeChannel } from '@shared/config/constant'
|
||||
import type { GitBashPathInfo, TerminalConfig, UpgradeChannel } from '@shared/config/constant'
|
||||
import type { LogLevel, LogSourceWithContext } from '@shared/config/logger'
|
||||
import type { FileChangeEvent, WebviewKeyEvent } from '@shared/config/types'
|
||||
import type { MCPServerLogEntry } from '@shared/config/types'
|
||||
@ -126,6 +126,7 @@ const api = {
|
||||
getCpuName: () => ipcRenderer.invoke(IpcChannel.System_GetCpuName),
|
||||
checkGitBash: (): Promise<boolean> => ipcRenderer.invoke(IpcChannel.System_CheckGitBash),
|
||||
getGitBashPath: (): Promise<string | null> => ipcRenderer.invoke(IpcChannel.System_GetGitBashPath),
|
||||
getGitBashPathInfo: (): Promise<GitBashPathInfo> => ipcRenderer.invoke(IpcChannel.System_GetGitBashPathInfo),
|
||||
setGitBashPath: (newPath: string | null): Promise<boolean> =>
|
||||
ipcRenderer.invoke(IpcChannel.System_SetGitBashPath, newPath)
|
||||
},
|
||||
|
||||
@ -32,6 +32,10 @@ export class AiSdkToChunkAdapter {
|
||||
private firstTokenTimestamp: number | null = null
|
||||
private hasTextContent = false
|
||||
private getSessionWasCleared?: () => boolean
|
||||
private idleTimeoutMs?: number
|
||||
private idleAbortController?: AbortController
|
||||
private idleTimeoutTimer: ReturnType<typeof setTimeout> | null = null
|
||||
private idleTimeoutTriggered = false
|
||||
|
||||
constructor(
|
||||
private onChunk: (chunk: Chunk) => void,
|
||||
@ -39,13 +43,19 @@ export class AiSdkToChunkAdapter {
|
||||
accumulate?: boolean,
|
||||
enableWebSearch?: boolean,
|
||||
onSessionUpdate?: (sessionId: string) => void,
|
||||
getSessionWasCleared?: () => boolean
|
||||
getSessionWasCleared?: () => boolean,
|
||||
streamingConfig?: {
|
||||
idleTimeoutMs: number
|
||||
idleAbortController: AbortController
|
||||
}
|
||||
) {
|
||||
this.toolCallHandler = new ToolCallChunkHandler(onChunk, mcpTools)
|
||||
this.accumulate = accumulate
|
||||
this.enableWebSearch = enableWebSearch || false
|
||||
this.onSessionUpdate = onSessionUpdate
|
||||
this.getSessionWasCleared = getSessionWasCleared
|
||||
this.idleTimeoutMs = streamingConfig?.idleTimeoutMs
|
||||
this.idleAbortController = streamingConfig?.idleAbortController
|
||||
}
|
||||
|
||||
private markFirstTokenIfNeeded() {
|
||||
@ -59,6 +69,27 @@ export class AiSdkToChunkAdapter {
|
||||
this.firstTokenTimestamp = null
|
||||
}
|
||||
|
||||
private clearIdleTimeoutTimer() {
|
||||
if (this.idleTimeoutTimer) {
|
||||
clearTimeout(this.idleTimeoutTimer)
|
||||
this.idleTimeoutTimer = null
|
||||
}
|
||||
}
|
||||
|
||||
private resetIdleTimeoutTimer() {
|
||||
if (!this.idleTimeoutMs || this.idleTimeoutMs <= 0 || !this.idleAbortController) {
|
||||
return
|
||||
}
|
||||
|
||||
this.clearIdleTimeoutTimer()
|
||||
|
||||
this.idleTimeoutTimer = setTimeout(() => {
|
||||
this.idleTimeoutTriggered = true
|
||||
logger.warn('SSE idle timeout reached; aborting request', { idleTimeoutMs: this.idleTimeoutMs })
|
||||
this.idleAbortController?.abort()
|
||||
}, this.idleTimeoutMs)
|
||||
}
|
||||
|
||||
/**
|
||||
* 处理 AI SDK 流结果
|
||||
* @param aiSdkResult AI SDK 的流结果对象
|
||||
@ -88,6 +119,8 @@ export class AiSdkToChunkAdapter {
|
||||
}
|
||||
this.resetTimingState()
|
||||
this.responseStartTimestamp = Date.now()
|
||||
this.idleTimeoutTriggered = false
|
||||
this.resetIdleTimeoutTimer()
|
||||
// Reset state at the start of stream
|
||||
this.isFirstChunk = true
|
||||
this.hasTextContent = false
|
||||
@ -111,10 +144,12 @@ export class AiSdkToChunkAdapter {
|
||||
break
|
||||
}
|
||||
|
||||
this.resetIdleTimeoutTimer()
|
||||
// 转换并发送 chunk
|
||||
this.convertAndEmitChunk(value, final)
|
||||
}
|
||||
} finally {
|
||||
this.clearIdleTimeoutTimer()
|
||||
reader.releaseLock()
|
||||
this.resetTimingState()
|
||||
}
|
||||
@ -380,7 +415,12 @@ export class AiSdkToChunkAdapter {
|
||||
case 'abort':
|
||||
this.onChunk({
|
||||
type: ChunkType.ERROR,
|
||||
error: new DOMException('Request was aborted', 'AbortError')
|
||||
error: this.idleTimeoutTriggered
|
||||
? new DOMException(
|
||||
`SSE idle timeout after ${Math.round((this.idleTimeoutMs ?? 0) / 60000)} minutes`,
|
||||
'TimeoutError'
|
||||
)
|
||||
: new DOMException('Request was aborted', 'AbortError')
|
||||
})
|
||||
break
|
||||
case 'error':
|
||||
|
||||
@ -42,6 +42,10 @@ export type ModernAiProviderConfig = AiSdkMiddlewareConfig & {
|
||||
// topicId for tracing
|
||||
topicId?: string
|
||||
callType: string
|
||||
streamingConfig?: {
|
||||
idleTimeoutMs: number
|
||||
idleAbortController: AbortController
|
||||
}
|
||||
}
|
||||
|
||||
export default class ModernAiProvider {
|
||||
@ -330,7 +334,15 @@ export default class ModernAiProvider {
|
||||
// 创建带有中间件的执行器
|
||||
if (config.onChunk) {
|
||||
const accumulate = this.model!.supported_text_delta !== false // true and undefined
|
||||
const adapter = new AiSdkToChunkAdapter(config.onChunk, config.mcpTools, accumulate, config.enableWebSearch)
|
||||
const adapter = new AiSdkToChunkAdapter(
|
||||
config.onChunk,
|
||||
config.mcpTools ?? [],
|
||||
accumulate,
|
||||
config.enableWebSearch,
|
||||
undefined,
|
||||
undefined,
|
||||
config.streamingConfig
|
||||
)
|
||||
|
||||
const streamResult = await executor.streamText({
|
||||
...params,
|
||||
|
||||
@ -142,6 +142,10 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
||||
return { thinking: { type: reasoningEffort ? 'enabled' : 'disabled' } }
|
||||
}
|
||||
|
||||
if (reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
if (!reasoningEffort) {
|
||||
// DeepSeek hybrid inference models, v3.1 and maybe more in the future
|
||||
// 不同的 provider 有不同的思考控制方式,在这里统一解决
|
||||
@ -303,7 +307,7 @@ export class OpenAIAPIClient extends OpenAIBaseClient<
|
||||
// Grok models/Perplexity models/OpenAI models
|
||||
if (isSupportedReasoningEffortModel(model)) {
|
||||
// 检查模型是否支持所选选项
|
||||
const supportedOptions = getModelSupportedReasoningEffortOptions(model)
|
||||
const supportedOptions = getModelSupportedReasoningEffortOptions(model)?.filter((option) => option !== 'default')
|
||||
if (supportedOptions?.includes(reasoningEffort)) {
|
||||
return {
|
||||
reasoning_effort: reasoningEffort
|
||||
|
||||
@ -18,7 +18,7 @@ vi.mock('@renderer/services/AssistantService', () => ({
|
||||
toolUseMode: assistant.settings?.toolUseMode ?? 'prompt',
|
||||
defaultModel: assistant.defaultModel,
|
||||
customParameters: assistant.settings?.customParameters ?? [],
|
||||
reasoning_effort: assistant.settings?.reasoning_effort,
|
||||
reasoning_effort: assistant.settings?.reasoning_effort ?? 'default',
|
||||
reasoning_effort_cache: assistant.settings?.reasoning_effort_cache,
|
||||
qwenThinkMode: assistant.settings?.qwenThinkMode
|
||||
})
|
||||
|
||||
@ -0,0 +1,161 @@
|
||||
import type { Assistant, Model, Provider } from '@renderer/types'
|
||||
import { describe, expect, it, vi } from 'vitest'
|
||||
|
||||
vi.mock('@renderer/services/AssistantService', () => ({
|
||||
DEFAULT_ASSISTANT_SETTINGS: {
|
||||
temperature: 0.7,
|
||||
enableTemperature: true,
|
||||
contextCount: 5,
|
||||
enableMaxTokens: false,
|
||||
maxTokens: 0,
|
||||
streamOutput: true,
|
||||
topP: 1,
|
||||
enableTopP: false,
|
||||
toolUseMode: 'function',
|
||||
customParameters: []
|
||||
},
|
||||
getDefaultAssistant: vi.fn(() => ({
|
||||
id: 'default',
|
||||
name: 'Default Assistant',
|
||||
prompt: '',
|
||||
type: 'assistant',
|
||||
topics: [],
|
||||
settings: {
|
||||
temperature: 0.7,
|
||||
enableTemperature: true,
|
||||
contextCount: 5,
|
||||
enableMaxTokens: false,
|
||||
maxTokens: 0,
|
||||
streamOutput: true,
|
||||
topP: 1,
|
||||
enableTopP: false,
|
||||
toolUseMode: 'function',
|
||||
customParameters: []
|
||||
}
|
||||
})),
|
||||
getDefaultModel: vi.fn(() => ({
|
||||
id: 'gpt-4o',
|
||||
provider: 'openai',
|
||||
name: 'GPT-4o',
|
||||
group: 'openai'
|
||||
})),
|
||||
getAssistantSettings: vi.fn((assistant: any) => assistant?.settings ?? {}),
|
||||
getProviderByModel: vi.fn(() => ({
|
||||
id: 'openai',
|
||||
type: 'openai',
|
||||
name: 'OpenAI',
|
||||
apiKey: '',
|
||||
apiHost: 'https://example.com/v1',
|
||||
models: []
|
||||
})),
|
||||
getDefaultTopic: vi.fn(() => ({
|
||||
id: 'topic-1',
|
||||
assistantId: 'default',
|
||||
createdAt: new Date().toISOString(),
|
||||
updatedAt: new Date().toISOString(),
|
||||
name: 'Default Topic',
|
||||
messages: [],
|
||||
isNameManuallyEdited: false
|
||||
}))
|
||||
}))
|
||||
|
||||
vi.mock('@renderer/store', () => ({
|
||||
default: {
|
||||
getState: vi.fn(() => ({
|
||||
websearch: {
|
||||
maxResults: 5,
|
||||
excludeDomains: [],
|
||||
searchWithTime: false
|
||||
}
|
||||
}))
|
||||
}
|
||||
}))
|
||||
|
||||
vi.mock('@renderer/utils/prompt', () => ({
|
||||
replacePromptVariables: vi.fn(async (prompt: string) => prompt)
|
||||
}))
|
||||
|
||||
vi.mock('../../utils/mcp', () => ({
|
||||
setupToolsConfig: vi.fn(() => undefined)
|
||||
}))
|
||||
|
||||
vi.mock('../../utils/options', () => ({
|
||||
buildProviderOptions: vi.fn(() => ({
|
||||
providerOptions: {},
|
||||
standardParams: {}
|
||||
}))
|
||||
}))
|
||||
|
||||
import { buildStreamTextParams } from '../parameterBuilder'
|
||||
|
||||
const createModel = (): Model => ({
|
||||
id: 'gpt-4o',
|
||||
provider: 'openai',
|
||||
name: 'GPT-4o',
|
||||
group: 'openai'
|
||||
})
|
||||
|
||||
const createAssistant = (model: Model): Assistant => ({
|
||||
id: 'assistant-1',
|
||||
name: 'Assistant',
|
||||
prompt: '',
|
||||
type: 'assistant',
|
||||
topics: [],
|
||||
model,
|
||||
settings: {}
|
||||
})
|
||||
|
||||
const createProvider = (model: Model, overrides: Partial<Provider> = {}): Provider => ({
|
||||
id: 'openai-response',
|
||||
type: 'openai-response',
|
||||
name: 'OpenAI Responses',
|
||||
apiKey: 'test',
|
||||
apiHost: 'https://example.com/v1',
|
||||
models: [model],
|
||||
...overrides
|
||||
})
|
||||
|
||||
describe('parameterBuilder.buildStreamTextParams', () => {
|
||||
it('uses default max tool steps when unset', async () => {
|
||||
const model = createModel()
|
||||
const assistant = createAssistant(model)
|
||||
const provider = createProvider(model)
|
||||
|
||||
const { params } = await buildStreamTextParams([], assistant, provider, {})
|
||||
const stopWhen = params.stopWhen as any
|
||||
|
||||
expect(stopWhen({ steps: new Array(19) })).toBe(false)
|
||||
expect(stopWhen({ steps: new Array(20) })).toBe(true)
|
||||
})
|
||||
|
||||
it('uses provider.maxToolSteps when set', async () => {
|
||||
const model = createModel()
|
||||
const assistant = createAssistant(model)
|
||||
const provider = createProvider(model, { maxToolSteps: 42 })
|
||||
|
||||
const { params } = await buildStreamTextParams([], assistant, provider, {})
|
||||
const stopWhen = params.stopWhen as any
|
||||
|
||||
expect(stopWhen({ steps: new Array(41) })).toBe(false)
|
||||
expect(stopWhen({ steps: new Array(42) })).toBe(true)
|
||||
})
|
||||
|
||||
it('returns streamingConfig and abortSignal when SSE idle timeout is enabled', async () => {
|
||||
const model = createModel()
|
||||
const assistant = createAssistant(model)
|
||||
const provider = createProvider(model, { sseIdleTimeoutMinutes: 10 })
|
||||
|
||||
const userAbortController = new AbortController()
|
||||
|
||||
const { params, streamingConfig } = await buildStreamTextParams([], assistant, provider, {
|
||||
requestOptions: { signal: userAbortController.signal }
|
||||
})
|
||||
|
||||
expect(streamingConfig?.idleTimeoutMs).toBe(10 * 60 * 1000)
|
||||
expect(streamingConfig?.idleAbortController).toBeInstanceOf(AbortController)
|
||||
expect(params.abortSignal).toBeDefined()
|
||||
|
||||
userAbortController.abort()
|
||||
expect(params.abortSignal?.aborted).toBe(true)
|
||||
})
|
||||
})
|
||||
@ -40,6 +40,7 @@ import { stepCountIs } from 'ai'
|
||||
import { getAiSdkProviderId } from '../provider/factory'
|
||||
import { setupToolsConfig } from '../utils/mcp'
|
||||
import { buildProviderOptions } from '../utils/options'
|
||||
import { buildCombinedAbortSignal, normalizeMaxToolSteps, timeoutMinutesToMs } from '../utils/streamingTimeout'
|
||||
import { buildProviderBuiltinWebSearchConfig } from '../utils/websearch'
|
||||
import { addAnthropicHeaders } from './header'
|
||||
import { getMaxTokens, getTemperature, getTopP } from './modelParameters'
|
||||
@ -95,6 +96,10 @@ export async function buildStreamTextParams(
|
||||
enableUrlContext: boolean
|
||||
}
|
||||
webSearchPluginConfig?: WebSearchPluginConfig
|
||||
streamingConfig?: {
|
||||
idleTimeoutMs: number
|
||||
idleAbortController: AbortController
|
||||
}
|
||||
}> {
|
||||
const { mcpTools } = options
|
||||
|
||||
@ -218,6 +223,17 @@ export async function buildStreamTextParams(
|
||||
// Note: standardParams (topK, frequencyPenalty, presencePenalty, stopSequences, seed)
|
||||
// are extracted from custom parameters and passed directly to streamText()
|
||||
// instead of being placed in providerOptions
|
||||
const requestTimeoutMs = timeoutMinutesToMs(provider.requestTimeoutMinutes)
|
||||
const idleTimeoutMs = timeoutMinutesToMs(provider.sseIdleTimeoutMinutes)
|
||||
const idleAbortController = idleTimeoutMs ? new AbortController() : undefined
|
||||
|
||||
const abortSignal = buildCombinedAbortSignal([
|
||||
options.requestOptions?.signal,
|
||||
requestTimeoutMs ? AbortSignal.timeout(requestTimeoutMs) : undefined,
|
||||
idleAbortController?.signal
|
||||
])
|
||||
|
||||
const maxToolSteps = normalizeMaxToolSteps(provider.maxToolSteps)
|
||||
const params: StreamTextParams = {
|
||||
messages: sdkMessages,
|
||||
maxOutputTokens: getMaxTokens(assistant, model),
|
||||
@ -225,10 +241,10 @@ export async function buildStreamTextParams(
|
||||
topP: getTopP(assistant, model),
|
||||
// Include AI SDK standard params extracted from custom parameters
|
||||
...standardParams,
|
||||
abortSignal: options.requestOptions?.signal,
|
||||
abortSignal,
|
||||
headers,
|
||||
providerOptions,
|
||||
stopWhen: stepCountIs(20),
|
||||
stopWhen: stepCountIs(maxToolSteps),
|
||||
maxRetries: 0
|
||||
}
|
||||
|
||||
@ -246,7 +262,14 @@ export async function buildStreamTextParams(
|
||||
params,
|
||||
modelId: model.id,
|
||||
capabilities: { enableReasoning, enableWebSearch, enableGenerateImage, enableUrlContext },
|
||||
webSearchPluginConfig
|
||||
webSearchPluginConfig,
|
||||
streamingConfig:
|
||||
idleTimeoutMs && idleAbortController
|
||||
? {
|
||||
idleTimeoutMs,
|
||||
idleAbortController
|
||||
}
|
||||
: undefined
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -79,7 +79,7 @@ vi.mock('@renderer/services/AssistantService', () => ({
|
||||
import { getProviderByModel } from '@renderer/services/AssistantService'
|
||||
import type { Model, Provider } from '@renderer/types'
|
||||
import { formatApiHost } from '@renderer/utils/api'
|
||||
import { isCherryAIProvider, isPerplexityProvider } from '@renderer/utils/provider'
|
||||
import { isAzureOpenAIProvider, isCherryAIProvider, isPerplexityProvider } from '@renderer/utils/provider'
|
||||
|
||||
import { COPILOT_DEFAULT_HEADERS, COPILOT_EDITOR_VERSION, isCopilotResponsesModel } from '../constants'
|
||||
import { getActualProvider, providerToAiSdkConfig } from '../providerConfig'
|
||||
@ -133,6 +133,17 @@ const createPerplexityProvider = (): Provider => ({
|
||||
isSystem: false
|
||||
})
|
||||
|
||||
const createAzureProvider = (apiVersion: string): Provider => ({
|
||||
id: 'azure-openai',
|
||||
type: 'azure-openai',
|
||||
name: 'Azure OpenAI',
|
||||
apiKey: 'test-key',
|
||||
apiHost: 'https://example.openai.azure.com/openai',
|
||||
apiVersion,
|
||||
models: [],
|
||||
isSystem: true
|
||||
})
|
||||
|
||||
describe('Copilot responses routing', () => {
|
||||
beforeEach(() => {
|
||||
;(globalThis as any).window = {
|
||||
@ -504,3 +515,46 @@ describe('Stream options includeUsage configuration', () => {
|
||||
expect(config.providerId).toBe('github-copilot-openai-compatible')
|
||||
})
|
||||
})
|
||||
|
||||
describe('Azure OpenAI traditional API routing', () => {
|
||||
beforeEach(() => {
|
||||
;(globalThis as any).window = {
|
||||
...(globalThis as any).window,
|
||||
keyv: createWindowKeyv()
|
||||
}
|
||||
mockGetState.mockReturnValue({
|
||||
settings: {
|
||||
openAI: {
|
||||
streamOptions: {
|
||||
includeUsage: undefined
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
vi.mocked(isAzureOpenAIProvider).mockImplementation((provider) => provider.type === 'azure-openai')
|
||||
})
|
||||
|
||||
it('uses deployment-based URLs when apiVersion is a date version', () => {
|
||||
const provider = createAzureProvider('2024-02-15-preview')
|
||||
const config = providerToAiSdkConfig(provider, createModel('gpt-4o', 'GPT-4o', provider.id))
|
||||
|
||||
expect(config.providerId).toBe('azure')
|
||||
expect(config.options.apiVersion).toBe('2024-02-15-preview')
|
||||
expect(config.options.useDeploymentBasedUrls).toBe(true)
|
||||
})
|
||||
|
||||
it('does not force deployment-based URLs for apiVersion v1/preview', () => {
|
||||
const v1Provider = createAzureProvider('v1')
|
||||
const v1Config = providerToAiSdkConfig(v1Provider, createModel('gpt-4o', 'GPT-4o', v1Provider.id))
|
||||
expect(v1Config.providerId).toBe('azure-responses')
|
||||
expect(v1Config.options.apiVersion).toBe('v1')
|
||||
expect(v1Config.options.useDeploymentBasedUrls).toBeUndefined()
|
||||
|
||||
const previewProvider = createAzureProvider('preview')
|
||||
const previewConfig = providerToAiSdkConfig(previewProvider, createModel('gpt-4o', 'GPT-4o', previewProvider.id))
|
||||
expect(previewConfig.providerId).toBe('azure-responses')
|
||||
expect(previewConfig.options.apiVersion).toBe('preview')
|
||||
expect(previewConfig.options.useDeploymentBasedUrls).toBeUndefined()
|
||||
})
|
||||
})
|
||||
|
||||
@ -214,6 +214,15 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A
|
||||
} else if (aiSdkProviderId === 'azure') {
|
||||
extraOptions.mode = 'chat'
|
||||
}
|
||||
if (isAzureOpenAIProvider(actualProvider)) {
|
||||
const apiVersion = actualProvider.apiVersion?.trim()
|
||||
if (apiVersion) {
|
||||
extraOptions.apiVersion = apiVersion
|
||||
if (!['preview', 'v1'].includes(apiVersion)) {
|
||||
extraOptions.useDeploymentBasedUrls = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// bedrock
|
||||
if (aiSdkProviderId === 'bedrock') {
|
||||
|
||||
@ -11,6 +11,7 @@ import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
import {
|
||||
getAnthropicReasoningParams,
|
||||
getAnthropicThinkingBudget,
|
||||
getBedrockReasoningParams,
|
||||
getCustomParameters,
|
||||
getGeminiReasoningParams,
|
||||
@ -89,7 +90,8 @@ vi.mock('@renderer/config/models', async (importOriginal) => {
|
||||
isQwenAlwaysThinkModel: vi.fn(() => false),
|
||||
isSupportedThinkingTokenHunyuanModel: vi.fn(() => false),
|
||||
isSupportedThinkingTokenModel: vi.fn(() => false),
|
||||
isGPT51SeriesModel: vi.fn(() => false)
|
||||
isGPT51SeriesModel: vi.fn(() => false),
|
||||
findTokenLimit: vi.fn(actual.findTokenLimit)
|
||||
}
|
||||
})
|
||||
|
||||
@ -596,7 +598,7 @@ describe('reasoning utils', () => {
|
||||
expect(result).toEqual({})
|
||||
})
|
||||
|
||||
it('should return disabled thinking when no reasoning effort', async () => {
|
||||
it('should return disabled thinking when reasoning effort is none', async () => {
|
||||
const { isReasoningModel, isSupportedThinkingTokenClaudeModel } = await import('@renderer/config/models')
|
||||
|
||||
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||
@ -611,7 +613,9 @@ describe('reasoning utils', () => {
|
||||
const assistant: Assistant = {
|
||||
id: 'test',
|
||||
name: 'Test',
|
||||
settings: {}
|
||||
settings: {
|
||||
reasoning_effort: 'none'
|
||||
}
|
||||
} as Assistant
|
||||
|
||||
const result = getAnthropicReasoningParams(assistant, model)
|
||||
@ -647,7 +651,7 @@ describe('reasoning utils', () => {
|
||||
expect(result).toEqual({
|
||||
thinking: {
|
||||
type: 'enabled',
|
||||
budgetTokens: 2048
|
||||
budgetTokens: 4096
|
||||
}
|
||||
})
|
||||
})
|
||||
@ -675,7 +679,7 @@ describe('reasoning utils', () => {
|
||||
expect(result).toEqual({})
|
||||
})
|
||||
|
||||
it('should disable thinking for Flash models without reasoning effort', async () => {
|
||||
it('should disable thinking for Flash models when reasoning effort is none', async () => {
|
||||
const { isReasoningModel, isSupportedThinkingTokenGeminiModel } = await import('@renderer/config/models')
|
||||
|
||||
vi.mocked(isReasoningModel).mockReturnValue(true)
|
||||
@ -690,7 +694,9 @@ describe('reasoning utils', () => {
|
||||
const assistant: Assistant = {
|
||||
id: 'test',
|
||||
name: 'Test',
|
||||
settings: {}
|
||||
settings: {
|
||||
reasoning_effort: 'none'
|
||||
}
|
||||
} as Assistant
|
||||
|
||||
const result = getGeminiReasoningParams(assistant, model)
|
||||
@ -725,7 +731,7 @@ describe('reasoning utils', () => {
|
||||
const result = getGeminiReasoningParams(assistant, model)
|
||||
expect(result).toEqual({
|
||||
thinkingConfig: {
|
||||
thinkingBudget: 16448,
|
||||
thinkingBudget: expect.any(Number),
|
||||
includeThoughts: true
|
||||
}
|
||||
})
|
||||
@ -889,7 +895,7 @@ describe('reasoning utils', () => {
|
||||
expect(result).toEqual({
|
||||
reasoningConfig: {
|
||||
type: 'enabled',
|
||||
budgetTokens: 2048
|
||||
budgetTokens: 4096
|
||||
}
|
||||
})
|
||||
})
|
||||
@ -990,4 +996,89 @@ describe('reasoning utils', () => {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
describe('getAnthropicThinkingBudget', () => {
|
||||
it('should return undefined when reasoningEffort is undefined', async () => {
|
||||
const result = getAnthropicThinkingBudget(4096, undefined, 'claude-3-7-sonnet')
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should return undefined when reasoningEffort is none', async () => {
|
||||
const result = getAnthropicThinkingBudget(4096, 'none', 'claude-3-7-sonnet')
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should return undefined when tokenLimit is not found', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue(undefined)
|
||||
|
||||
const result = getAnthropicThinkingBudget(4096, 'medium', 'unknown-model')
|
||||
expect(result).toBeUndefined()
|
||||
})
|
||||
|
||||
it('should calculate budget correctly when maxTokens is provided', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||
|
||||
const result = getAnthropicThinkingBudget(4096, 'medium', 'claude-3-7-sonnet')
|
||||
// EFFORT_RATIO['medium'] = 0.5
|
||||
// budget = Math.floor((32768 - 1024) * 0.5 + 1024)
|
||||
// = Math.floor(31744 * 0.5 + 1024) = Math.floor(15872 + 1024) = 16896
|
||||
// budgetTokens = Math.min(16896, 4096) = 4096
|
||||
// result = Math.max(1024, 4096) = 4096
|
||||
expect(result).toBe(4096)
|
||||
})
|
||||
|
||||
it('should use tokenLimit.max when maxTokens is undefined', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||
|
||||
const result = getAnthropicThinkingBudget(undefined, 'medium', 'claude-3-7-sonnet')
|
||||
// When maxTokens is undefined, budget is not constrained by maxTokens
|
||||
// EFFORT_RATIO['medium'] = 0.5
|
||||
// budget = Math.floor((32768 - 1024) * 0.5 + 1024)
|
||||
// = Math.floor(31744 * 0.5 + 1024) = Math.floor(15872 + 1024) = 16896
|
||||
// result = Math.max(1024, 16896) = 16896
|
||||
expect(result).toBe(16896)
|
||||
})
|
||||
|
||||
it('should enforce minimum budget of 1024', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue({ min: 100, max: 1000 })
|
||||
|
||||
const result = getAnthropicThinkingBudget(500, 'low', 'claude-3-7-sonnet')
|
||||
// EFFORT_RATIO['low'] = 0.05
|
||||
// budget = Math.floor((1000 - 100) * 0.05 + 100)
|
||||
// = Math.floor(900 * 0.05 + 100) = Math.floor(45 + 100) = 145
|
||||
// budgetTokens = Math.min(145, 500) = 145
|
||||
// result = Math.max(1024, 145) = 1024
|
||||
expect(result).toBe(1024)
|
||||
})
|
||||
|
||||
it('should respect effort ratio for high reasoning effort', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||
|
||||
const result = getAnthropicThinkingBudget(8192, 'high', 'claude-3-7-sonnet')
|
||||
// EFFORT_RATIO['high'] = 0.8
|
||||
// budget = Math.floor((32768 - 1024) * 0.8 + 1024)
|
||||
// = Math.floor(31744 * 0.8 + 1024) = Math.floor(25395.2 + 1024) = 26419
|
||||
// budgetTokens = Math.min(26419, 8192) = 8192
|
||||
// result = Math.max(1024, 8192) = 8192
|
||||
expect(result).toBe(8192)
|
||||
})
|
||||
|
||||
it('should use full token limit when maxTokens is undefined and reasoning effort is high', async () => {
|
||||
const { findTokenLimit } = await import('@renderer/config/models')
|
||||
vi.mocked(findTokenLimit).mockReturnValue({ min: 1024, max: 32768 })
|
||||
|
||||
const result = getAnthropicThinkingBudget(undefined, 'high', 'claude-3-7-sonnet')
|
||||
// When maxTokens is undefined, budget is not constrained by maxTokens
|
||||
// EFFORT_RATIO['high'] = 0.8
|
||||
// budget = Math.floor((32768 - 1024) * 0.8 + 1024)
|
||||
// = Math.floor(31744 * 0.8 + 1024) = Math.floor(25395.2 + 1024) = 26419
|
||||
// result = Math.max(1024, 26419) = 26419
|
||||
expect(result).toBe(26419)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@ -0,0 +1,50 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
|
||||
import {
|
||||
buildCombinedAbortSignal,
|
||||
normalizeMaxToolSteps,
|
||||
normalizeTimeoutMinutes,
|
||||
timeoutMinutesToMs
|
||||
} from '../streamingTimeout'
|
||||
|
||||
describe('streamingTimeout utils', () => {
|
||||
it('normalizeTimeoutMinutes returns undefined for non-numbers', () => {
|
||||
expect(normalizeTimeoutMinutes(undefined)).toBeUndefined()
|
||||
expect(normalizeTimeoutMinutes(null)).toBeUndefined()
|
||||
expect(normalizeTimeoutMinutes('10')).toBeUndefined()
|
||||
})
|
||||
|
||||
it('normalizeTimeoutMinutes clamps to integer >= 0', () => {
|
||||
expect(normalizeTimeoutMinutes(-1)).toBe(0)
|
||||
expect(normalizeTimeoutMinutes(0)).toBe(0)
|
||||
expect(normalizeTimeoutMinutes(1.9)).toBe(1)
|
||||
})
|
||||
|
||||
it('timeoutMinutesToMs returns undefined for 0/undefined and converts minutes to ms', () => {
|
||||
expect(timeoutMinutesToMs(undefined)).toBeUndefined()
|
||||
expect(timeoutMinutesToMs(0)).toBeUndefined()
|
||||
expect(timeoutMinutesToMs(2)).toBe(2 * 60 * 1000)
|
||||
})
|
||||
|
||||
it('normalizeMaxToolSteps uses defaults and clamps', () => {
|
||||
expect(normalizeMaxToolSteps(undefined, { defaultSteps: 20, maxSteps: 50 })).toBe(20)
|
||||
expect(normalizeMaxToolSteps(-1, { defaultSteps: 20, maxSteps: 50 })).toBe(20)
|
||||
expect(normalizeMaxToolSteps(10.2, { defaultSteps: 20, maxSteps: 50 })).toBe(10)
|
||||
expect(normalizeMaxToolSteps(999, { defaultSteps: 20, maxSteps: 50 })).toBe(50)
|
||||
})
|
||||
|
||||
it('buildCombinedAbortSignal returns undefined for empty and combines signals', () => {
|
||||
expect(buildCombinedAbortSignal([])).toBeUndefined()
|
||||
|
||||
const controllerA = new AbortController()
|
||||
const controllerB = new AbortController()
|
||||
|
||||
const single = buildCombinedAbortSignal([controllerA.signal])
|
||||
expect(single).toBe(controllerA.signal)
|
||||
|
||||
const combined = buildCombinedAbortSignal([controllerA.signal, controllerB.signal])
|
||||
expect(combined?.aborted).toBe(false)
|
||||
controllerB.abort()
|
||||
expect(combined?.aborted).toBe(true)
|
||||
})
|
||||
})
|
||||
@ -10,6 +10,7 @@ import {
|
||||
GEMINI_FLASH_MODEL_REGEX,
|
||||
getModelSupportedReasoningEffortOptions,
|
||||
isDeepSeekHybridInferenceModel,
|
||||
isDoubaoSeed18Model,
|
||||
isDoubaoSeedAfter251015,
|
||||
isDoubaoThinkingAutoModel,
|
||||
isGemini3ThinkingTokenModel,
|
||||
@ -28,6 +29,7 @@ import {
|
||||
isSupportedThinkingTokenDoubaoModel,
|
||||
isSupportedThinkingTokenGeminiModel,
|
||||
isSupportedThinkingTokenHunyuanModel,
|
||||
isSupportedThinkingTokenMiMoModel,
|
||||
isSupportedThinkingTokenModel,
|
||||
isSupportedThinkingTokenQwenModel,
|
||||
isSupportedThinkingTokenZhipuModel
|
||||
@ -64,7 +66,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
// reasoningEffort is not set, no extra reasoning setting
|
||||
// Generally, for every model which supports reasoning control, the reasoning effort won't be undefined.
|
||||
// It's for some reasoning models that don't support reasoning control, such as deepseek reasoner.
|
||||
if (!reasoningEffort) {
|
||||
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
@ -329,7 +331,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
// Grok models/Perplexity models/OpenAI models, use reasoning_effort
|
||||
if (isSupportedReasoningEffortModel(model)) {
|
||||
// 检查模型是否支持所选选项
|
||||
const supportedOptions = getModelSupportedReasoningEffortOptions(model)
|
||||
const supportedOptions = getModelSupportedReasoningEffortOptions(model)?.filter((option) => option !== 'default')
|
||||
if (supportedOptions?.includes(reasoningEffort)) {
|
||||
return {
|
||||
reasoningEffort
|
||||
@ -389,7 +391,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
|
||||
// Use thinking, doubao, zhipu, etc.
|
||||
if (isSupportedThinkingTokenDoubaoModel(model)) {
|
||||
if (isDoubaoSeedAfter251015(model)) {
|
||||
if (isDoubaoSeedAfter251015(model) || isDoubaoSeed18Model(model)) {
|
||||
return { reasoningEffort }
|
||||
}
|
||||
if (reasoningEffort === 'high') {
|
||||
@ -408,6 +410,12 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
return { thinking: { type: 'enabled' } }
|
||||
}
|
||||
|
||||
if (isSupportedThinkingTokenMiMoModel(model)) {
|
||||
return {
|
||||
thinking: { type: 'enabled' }
|
||||
}
|
||||
}
|
||||
|
||||
// Default case: no special thinking settings
|
||||
return {}
|
||||
}
|
||||
@ -427,7 +435,7 @@ export function getOpenAIReasoningParams(
|
||||
|
||||
let reasoningEffort = assistant?.settings?.reasoning_effort
|
||||
|
||||
if (!reasoningEffort) {
|
||||
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
@ -479,16 +487,14 @@ export function getAnthropicThinkingBudget(
|
||||
return undefined
|
||||
}
|
||||
|
||||
const budgetTokens = Math.max(
|
||||
1024,
|
||||
Math.floor(
|
||||
Math.min(
|
||||
(tokenLimit.max - tokenLimit.min) * effortRatio + tokenLimit.min,
|
||||
(maxTokens || DEFAULT_MAX_TOKENS) * effortRatio
|
||||
)
|
||||
)
|
||||
)
|
||||
return budgetTokens
|
||||
const budget = Math.floor((tokenLimit.max - tokenLimit.min) * effortRatio + tokenLimit.min)
|
||||
|
||||
let budgetTokens = budget
|
||||
if (maxTokens !== undefined) {
|
||||
budgetTokens = Math.min(budget, maxTokens)
|
||||
}
|
||||
|
||||
return Math.max(1024, budgetTokens)
|
||||
}
|
||||
|
||||
/**
|
||||
@ -505,7 +511,11 @@ export function getAnthropicReasoningParams(
|
||||
|
||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||
|
||||
if (reasoningEffort === undefined || reasoningEffort === 'none') {
|
||||
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
if (reasoningEffort === 'none') {
|
||||
return {
|
||||
thinking: {
|
||||
type: 'disabled'
|
||||
@ -560,6 +570,10 @@ export function getGeminiReasoningParams(
|
||||
|
||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||
|
||||
if (!reasoningEffort || reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
// Gemini 推理参数
|
||||
if (isSupportedThinkingTokenGeminiModel(model)) {
|
||||
if (reasoningEffort === undefined || reasoningEffort === 'none') {
|
||||
@ -620,10 +634,6 @@ export function getXAIReasoningParams(assistant: Assistant, model: Model): Pick<
|
||||
|
||||
const { reasoning_effort: reasoningEffort } = getAssistantSettings(assistant)
|
||||
|
||||
if (!reasoningEffort || reasoningEffort === 'none') {
|
||||
return {}
|
||||
}
|
||||
|
||||
switch (reasoningEffort) {
|
||||
case 'auto':
|
||||
case 'minimal':
|
||||
@ -634,6 +644,10 @@ export function getXAIReasoningParams(assistant: Assistant, model: Model): Pick<
|
||||
return { reasoningEffort }
|
||||
case 'xhigh':
|
||||
return { reasoningEffort: 'high' }
|
||||
case 'default':
|
||||
case 'none':
|
||||
default:
|
||||
return {}
|
||||
}
|
||||
}
|
||||
|
||||
@ -650,7 +664,7 @@ export function getBedrockReasoningParams(
|
||||
|
||||
const reasoningEffort = assistant?.settings?.reasoning_effort
|
||||
|
||||
if (reasoningEffort === undefined) {
|
||||
if (reasoningEffort === undefined || reasoningEffort === 'default') {
|
||||
return {}
|
||||
}
|
||||
|
||||
|
||||
40
src/renderer/src/aiCore/utils/streamingTimeout.ts
Normal file
40
src/renderer/src/aiCore/utils/streamingTimeout.ts
Normal file
@ -0,0 +1,40 @@
|
||||
export const DEFAULT_MAX_TOOL_STEPS = 20
|
||||
export const MAX_MAX_TOOL_STEPS = 500
|
||||
|
||||
export function normalizeTimeoutMinutes(value: unknown): number | undefined {
|
||||
if (value === undefined || value === null) return undefined
|
||||
if (typeof value !== 'number' || !Number.isFinite(value)) return undefined
|
||||
const normalized = Math.max(0, Math.floor(value))
|
||||
return normalized
|
||||
}
|
||||
|
||||
export function timeoutMinutesToMs(minutes: unknown): number | undefined {
|
||||
const normalized = normalizeTimeoutMinutes(minutes)
|
||||
if (normalized === undefined || normalized <= 0) return undefined
|
||||
return normalized * 60 * 1000
|
||||
}
|
||||
|
||||
export function normalizeMaxToolSteps(
|
||||
value: unknown,
|
||||
options: {
|
||||
defaultSteps?: number
|
||||
maxSteps?: number
|
||||
} = {}
|
||||
): number {
|
||||
const defaultSteps = options.defaultSteps ?? DEFAULT_MAX_TOOL_STEPS
|
||||
const maxSteps = options.maxSteps ?? MAX_MAX_TOOL_STEPS
|
||||
|
||||
if (value === undefined || value === null) return defaultSteps
|
||||
if (typeof value !== 'number' || !Number.isFinite(value)) return defaultSteps
|
||||
|
||||
const normalized = Math.floor(value)
|
||||
if (normalized <= 0) return defaultSteps
|
||||
return Math.min(normalized, maxSteps)
|
||||
}
|
||||
|
||||
export function buildCombinedAbortSignal(signals: Array<AbortSignal | undefined | null>): AbortSignal | undefined {
|
||||
const validSignals = signals.filter((s): s is AbortSignal => Boolean(s))
|
||||
if (validSignals.length === 0) return undefined
|
||||
if (validSignals.length === 1) return validSignals[0]
|
||||
return AbortSignal.any(validSignals)
|
||||
}
|
||||
17
src/renderer/src/assets/images/models/mimo.svg
Normal file
17
src/renderer/src/assets/images/models/mimo.svg
Normal file
@ -0,0 +1,17 @@
|
||||
<svg width="100" height="100" viewBox="0 0 100 100" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g transform="translate(10, 42) scale(1.35)">
|
||||
<!-- m -->
|
||||
<path d="M1.2683 15.9987C0.9317 15.998 0.6091 15.8638 0.3713 15.6256C0.1335 15.3873 0 15.0644 0 14.7278V7.165C0.0148 6.83757 0.1554 6.52848 0.3924 6.30203C0.6293 6.07559 0.9445 5.94922 1.2722 5.94922C1.6 5.94922 1.9152 6.07559 2.1521 6.30203C2.3891 6.52848 2.5296 6.83757 2.5445 7.165V14.7278C2.5442 14.895 2.5109 15.0606 2.4466 15.215C2.3822 15.3693 2.2881 15.5095 2.1696 15.6276C2.0511 15.7456 1.9105 15.8391 1.7559 15.9028C1.6012 15.9665 1.4356 15.9991 1.2683 15.9987Z" fill="currentColor"/>
|
||||
<path d="M14.8841 15.9993C14.5468 15.9993 14.2232 15.8655 13.9845 15.6272C13.7457 15.389 13.6112 15.0657 13.6105 14.7284V4.67881L8.9888 9.45281C8.7538 9.69657 8.4315 9.83697 8.0929 9.84312C7.7544 9.84928 7.4272 9.72069 7.1835 9.48563C6.9397 9.25058 6.7993 8.92832 6.7931 8.58976C6.7901 8.42211 6.8201 8.25551 6.8814 8.09947C6.9428 7.94342 7.0342 7.80098 7.1506 7.68028L13.9703 0.661082C14.1463 0.478921 14.3728 0.35354 14.6207 0.301033C14.8685 0.248526 15.1264 0.271291 15.3612 0.366403C15.5961 0.461516 15.7971 0.624637 15.9385 0.834827C16.08 1.04502 16.1554 1.29268 16.1551 1.54603V14.7284C16.1551 15.0655 16.0212 15.3887 15.7828 15.6271C15.5444 15.8654 15.2212 15.9993 14.8841 15.9993Z" fill="currentColor"/>
|
||||
<path d="M8.0748 9.82621C7.9058 9.82749 7.7383 9.79518 7.5818 9.73117C7.4254 9.66716 7.2833 9.57272 7.1636 9.45332L0.3571 2.4315C0.1224 2.18948 -0.0065 1.86414 -0.0014 1.52705C0.0038 1.18996 0.1427 0.868726 0.3847 0.634023C0.6267 0.399319 0.9521 0.270369 1.2892 0.27554C1.6262 0.280711 1.9475 0.419579 2.1822 0.661595L8.9887 7.66767C9.1623 7.84735 9.2792 8.07413 9.3249 8.31977C9.3706 8.56541 9.343 8.81906 9.2456 9.04914C9.1482 9.27922 8.9852 9.47557 8.7771 9.61374C8.5689 9.75191 8.3247 9.8258 8.0748 9.82621Z" fill="currentColor"/>
|
||||
<!-- i -->
|
||||
<path d="M20.3539 15.9997C20.0169 15.9997 19.6936 15.8658 19.4552 15.6274C19.2169 15.3891 19.083 15.0658 19.083 14.7287V1.54636C19.083 1.20928 19.2169 0.886001 19.4552 0.647648C19.6936 0.409296 20.0169 0.275391 20.3539 0.275391C20.691 0.275391 21.0143 0.409296 21.2526 0.647648C21.491 0.886001 21.6249 1.20928 21.6249 1.54636V14.7287C21.6249 14.8956 21.592 15.0609 21.5282 15.2151C21.4643 15.3693 21.3707 15.5094 21.2526 15.6274C21.1346 15.7454 20.9945 15.839 20.8403 15.9029C20.6861 15.9668 20.5208 15.9997 20.3539 15.9997Z" fill="currentColor"/>
|
||||
<!-- m -->
|
||||
<path d="M25.8263 15.9992C25.4893 15.9992 25.166 15.8653 24.9276 15.627C24.6893 15.3886 24.5554 15.0654 24.5554 14.7283V7.1655C24.5554 6.82842 24.6893 6.50514 24.9276 6.26679C25.166 6.02844 25.4893 5.89453 25.8263 5.89453C26.1634 5.89453 26.4867 6.02844 26.7251 6.26679C26.9634 6.50514 27.0973 6.82842 27.0973 7.1655V14.7283C27.0973 15.0654 26.9634 15.3886 26.7251 15.627C26.4867 15.8653 26.1634 15.9992 25.8263 15.9992Z" fill="currentColor"/>
|
||||
<path d="M39.4394 16.0004C39.1023 16.0004 38.779 15.8664 38.5406 15.6281C38.3023 15.3897 38.1684 15.0665 38.1684 14.7294V4.67982L33.5467 9.45382C33.3117 9.69584 32.9901 9.83457 32.6523 9.83949C32.3156 9.84442 31.9894 9.71513 31.7474 9.48008C31.5054 9.24503 31.3674 8.92346 31.3623 8.58613C31.3573 8.24879 31.4863 7.92331 31.7214 7.6813L38.5284 0.662093C38.7044 0.483575 38.9304 0.361405 39.1767 0.311007C39.4233 0.260609 39.6787 0.284243 39.9114 0.378925C40.1437 0.473608 40.3427 0.635093 40.4837 0.842994C40.6247 1.05089 40.7007 1.29589 40.7027 1.54704V14.7294C40.7017 15.0649 40.5687 15.3866 40.3327 15.6246C40.0957 15.8625 39.7747 15.9976 39.4394 16.0004Z" fill="currentColor"/>
|
||||
<path d="M32.6324 9.82618C32.4634 9.82746 32.2964 9.79516 32.1394 9.73115C31.9834 9.66713 31.8414 9.57269 31.7214 9.45329L24.9151 2.43147C24.7921 2.31326 24.6942 2.1715 24.6271 2.01463C24.5601 1.85777 24.5253 1.68901 24.5249 1.51842C24.5244 1.34783 24.5583 1.1789 24.6246 1.02169C24.6908 0.864476 24.788 0.722207 24.9104 0.603357C25.0327 0.484507 25.1778 0.391509 25.3369 0.329905C25.4959 0.268302 25.6658 0.239353 25.8363 0.244785C26.0068 0.250217 26.1745 0.289918 26.3293 0.361522C26.4841 0.433126 26.623 0.535168 26.7375 0.661566L33.5467 7.66764C33.7204 7.84732 33.8374 8.0741 33.8824 8.31974C33.9284 8.56538 33.9014 8.81903 33.8034 9.04911C33.7064 9.27919 33.5434 9.47554 33.3354 9.61371C33.1267 9.75189 32.8824 9.82577 32.6324 9.82618Z" fill="currentColor"/>
|
||||
<!-- o -->
|
||||
<path d="M50.9434 15.9814C49.5534 15.9865 48.1864 15.6287 46.9774 14.9433C45.7674 14.2579 44.7584 13.2687 44.0484 12.0735C43.3384 10.8783 42.9534 9.5185 42.9304 8.12863C42.9074 6.73875 43.2474 5.36692 43.9164 4.1488C44.0844 3.86356 44.3564 3.65487 44.6754 3.56707C44.9944 3.47927 45.3344 3.51928 45.6244 3.67859C45.9144 3.8379 46.1314 4.10397 46.2274 4.42026C46.3244 4.73656 46.2944 5.07816 46.1434 5.3725C45.5764 6.40664 45.3594 7.59693 45.5264 8.76468C45.6924 9.93243 46.2334 11.0147 47.0674 11.8489C47.9014 12.6831 48.9834 13.2244 50.1514 13.3914C51.3184 13.5584 52.5094 13.3421 53.5434 12.7751C53.8384 12.6125 54.1864 12.5738 54.5104 12.6676C54.8344 12.7614 55.1074 12.98 55.2704 13.2753C55.4324 13.5706 55.4714 13.9184 55.3774 14.2422C55.2834 14.566 55.0654 14.8393 54.7694 15.0019C53.5974 15.6455 52.2814 15.9824 50.9434 15.9814Z" fill="currentColor"/>
|
||||
<path d="M56.8104 12.5052C56.5944 12.5044 56.3834 12.4484 56.1954 12.3424C55.9014 12.1795 55.6824 11.9066 55.5894 11.5833C55.4954 11.26 55.5324 10.9126 55.6944 10.6171C56.2614 9.58297 56.4784 8.39268 56.3114 7.22493C56.1454 6.05718 55.6044 4.97496 54.7704 4.14073C53.9364 3.30649 52.8544 2.76525 51.6864 2.59825C50.5194 2.43125 49.3284 2.64749 48.2944 3.21452C48.1474 3.30059 47.9854 3.3564 47.8164 3.37863C47.6484 3.40087 47.4774 3.38908 47.3134 3.34397C47.1494 3.29886 46.9964 3.22134 46.8624 3.116C46.7294 3.01066 46.6184 2.87964 46.5364 2.73069C46.4544 2.58174 46.4034 2.41788 46.3864 2.24882C46.3684 2.07975 46.3854 1.90891 46.4354 1.7464C46.4854 1.58389 46.5674 1.43301 46.6764 1.3027C46.7854 1.17238 46.9194 1.06527 47.0704 0.987704C48.5874 0.155491 50.3324 -0.162266 52.0454 0.0821474C53.7574 0.326561 55.3454 1.11995 56.5684 2.34319C57.7914 3.56642 58.5844 5.15347 58.8294 6.86604C59.0734 8.5786 58.7554 10.3242 57.9234 11.8408C57.8144 12.0411 57.6534 12.2084 57.4574 12.3253C57.2624 12.4422 57.0384 12.5043 56.8104 12.5052Z" fill="currentColor"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 6.2 KiB |
17
src/renderer/src/assets/images/providers/mimo.svg
Normal file
17
src/renderer/src/assets/images/providers/mimo.svg
Normal file
@ -0,0 +1,17 @@
|
||||
<svg width="100" height="100" viewBox="0 0 100 100" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g transform="translate(10, 42) scale(1.35)">
|
||||
<!-- m -->
|
||||
<path d="M1.2683 15.9987C0.9317 15.998 0.6091 15.8638 0.3713 15.6256C0.1335 15.3873 0 15.0644 0 14.7278V7.165C0.0148 6.83757 0.1554 6.52848 0.3924 6.30203C0.6293 6.07559 0.9445 5.94922 1.2722 5.94922C1.6 5.94922 1.9152 6.07559 2.1521 6.30203C2.3891 6.52848 2.5296 6.83757 2.5445 7.165V14.7278C2.5442 14.895 2.5109 15.0606 2.4466 15.215C2.3822 15.3693 2.2881 15.5095 2.1696 15.6276C2.0511 15.7456 1.9105 15.8391 1.7559 15.9028C1.6012 15.9665 1.4356 15.9991 1.2683 15.9987Z" fill="currentColor"/>
|
||||
<path d="M14.8841 15.9993C14.5468 15.9993 14.2232 15.8655 13.9845 15.6272C13.7457 15.389 13.6112 15.0657 13.6105 14.7284V4.67881L8.9888 9.45281C8.7538 9.69657 8.4315 9.83697 8.0929 9.84312C7.7544 9.84928 7.4272 9.72069 7.1835 9.48563C6.9397 9.25058 6.7993 8.92832 6.7931 8.58976C6.7901 8.42211 6.8201 8.25551 6.8814 8.09947C6.9428 7.94342 7.0342 7.80098 7.1506 7.68028L13.9703 0.661082C14.1463 0.478921 14.3728 0.35354 14.6207 0.301033C14.8685 0.248526 15.1264 0.271291 15.3612 0.366403C15.5961 0.461516 15.7971 0.624637 15.9385 0.834827C16.08 1.04502 16.1554 1.29268 16.1551 1.54603V14.7284C16.1551 15.0655 16.0212 15.3887 15.7828 15.6271C15.5444 15.8654 15.2212 15.9993 14.8841 15.9993Z" fill="currentColor"/>
|
||||
<path d="M8.0748 9.82621C7.9058 9.82749 7.7383 9.79518 7.5818 9.73117C7.4254 9.66716 7.2833 9.57272 7.1636 9.45332L0.3571 2.4315C0.1224 2.18948 -0.0065 1.86414 -0.0014 1.52705C0.0038 1.18996 0.1427 0.868726 0.3847 0.634023C0.6267 0.399319 0.9521 0.270369 1.2892 0.27554C1.6262 0.280711 1.9475 0.419579 2.1822 0.661595L8.9887 7.66767C9.1623 7.84735 9.2792 8.07413 9.3249 8.31977C9.3706 8.56541 9.343 8.81906 9.2456 9.04914C9.1482 9.27922 8.9852 9.47557 8.7771 9.61374C8.5689 9.75191 8.3247 9.8258 8.0748 9.82621Z" fill="currentColor"/>
|
||||
<!-- i -->
|
||||
<path d="M20.3539 15.9997C20.0169 15.9997 19.6936 15.8658 19.4552 15.6274C19.2169 15.3891 19.083 15.0658 19.083 14.7287V1.54636C19.083 1.20928 19.2169 0.886001 19.4552 0.647648C19.6936 0.409296 20.0169 0.275391 20.3539 0.275391C20.691 0.275391 21.0143 0.409296 21.2526 0.647648C21.491 0.886001 21.6249 1.20928 21.6249 1.54636V14.7287C21.6249 14.8956 21.592 15.0609 21.5282 15.2151C21.4643 15.3693 21.3707 15.5094 21.2526 15.6274C21.1346 15.7454 20.9945 15.839 20.8403 15.9029C20.6861 15.9668 20.5208 15.9997 20.3539 15.9997Z" fill="currentColor"/>
|
||||
<!-- m -->
|
||||
<path d="M25.8263 15.9992C25.4893 15.9992 25.166 15.8653 24.9276 15.627C24.6893 15.3886 24.5554 15.0654 24.5554 14.7283V7.1655C24.5554 6.82842 24.6893 6.50514 24.9276 6.26679C25.166 6.02844 25.4893 5.89453 25.8263 5.89453C26.1634 5.89453 26.4867 6.02844 26.7251 6.26679C26.9634 6.50514 27.0973 6.82842 27.0973 7.1655V14.7283C27.0973 15.0654 26.9634 15.3886 26.7251 15.627C26.4867 15.8653 26.1634 15.9992 25.8263 15.9992Z" fill="currentColor"/>
|
||||
<path d="M39.4394 16.0004C39.1023 16.0004 38.779 15.8664 38.5406 15.6281C38.3023 15.3897 38.1684 15.0665 38.1684 14.7294V4.67982L33.5467 9.45382C33.3117 9.69584 32.9901 9.83457 32.6523 9.83949C32.3156 9.84442 31.9894 9.71513 31.7474 9.48008C31.5054 9.24503 31.3674 8.92346 31.3623 8.58613C31.3573 8.24879 31.4863 7.92331 31.7214 7.6813L38.5284 0.662093C38.7044 0.483575 38.9304 0.361405 39.1767 0.311007C39.4233 0.260609 39.6787 0.284243 39.9114 0.378925C40.1437 0.473608 40.3427 0.635093 40.4837 0.842994C40.6247 1.05089 40.7007 1.29589 40.7027 1.54704V14.7294C40.7017 15.0649 40.5687 15.3866 40.3327 15.6246C40.0957 15.8625 39.7747 15.9976 39.4394 16.0004Z" fill="currentColor"/>
|
||||
<path d="M32.6324 9.82618C32.4634 9.82746 32.2964 9.79516 32.1394 9.73115C31.9834 9.66713 31.8414 9.57269 31.7214 9.45329L24.9151 2.43147C24.7921 2.31326 24.6942 2.1715 24.6271 2.01463C24.5601 1.85777 24.5253 1.68901 24.5249 1.51842C24.5244 1.34783 24.5583 1.1789 24.6246 1.02169C24.6908 0.864476 24.788 0.722207 24.9104 0.603357C25.0327 0.484507 25.1778 0.391509 25.3369 0.329905C25.4959 0.268302 25.6658 0.239353 25.8363 0.244785C26.0068 0.250217 26.1745 0.289918 26.3293 0.361522C26.4841 0.433126 26.623 0.535168 26.7375 0.661566L33.5467 7.66764C33.7204 7.84732 33.8374 8.0741 33.8824 8.31974C33.9284 8.56538 33.9014 8.81903 33.8034 9.04911C33.7064 9.27919 33.5434 9.47554 33.3354 9.61371C33.1267 9.75189 32.8824 9.82577 32.6324 9.82618Z" fill="currentColor"/>
|
||||
<!-- o -->
|
||||
<path d="M50.9434 15.9814C49.5534 15.9865 48.1864 15.6287 46.9774 14.9433C45.7674 14.2579 44.7584 13.2687 44.0484 12.0735C43.3384 10.8783 42.9534 9.5185 42.9304 8.12863C42.9074 6.73875 43.2474 5.36692 43.9164 4.1488C44.0844 3.86356 44.3564 3.65487 44.6754 3.56707C44.9944 3.47927 45.3344 3.51928 45.6244 3.67859C45.9144 3.8379 46.1314 4.10397 46.2274 4.42026C46.3244 4.73656 46.2944 5.07816 46.1434 5.3725C45.5764 6.40664 45.3594 7.59693 45.5264 8.76468C45.6924 9.93243 46.2334 11.0147 47.0674 11.8489C47.9014 12.6831 48.9834 13.2244 50.1514 13.3914C51.3184 13.5584 52.5094 13.3421 53.5434 12.7751C53.8384 12.6125 54.1864 12.5738 54.5104 12.6676C54.8344 12.7614 55.1074 12.98 55.2704 13.2753C55.4324 13.5706 55.4714 13.9184 55.3774 14.2422C55.2834 14.566 55.0654 14.8393 54.7694 15.0019C53.5974 15.6455 52.2814 15.9824 50.9434 15.9814Z" fill="currentColor"/>
|
||||
<path d="M56.8104 12.5052C56.5944 12.5044 56.3834 12.4484 56.1954 12.3424C55.9014 12.1795 55.6824 11.9066 55.5894 11.5833C55.4954 11.26 55.5324 10.9126 55.6944 10.6171C56.2614 9.58297 56.4784 8.39268 56.3114 7.22493C56.1454 6.05718 55.6044 4.97496 54.7704 4.14073C53.9364 3.30649 52.8544 2.76525 51.6864 2.59825C50.5194 2.43125 49.3284 2.64749 48.2944 3.21452C48.1474 3.30059 47.9854 3.3564 47.8164 3.37863C47.6484 3.40087 47.4774 3.38908 47.3134 3.34397C47.1494 3.29886 46.9964 3.22134 46.8624 3.116C46.7294 3.01066 46.6184 2.87964 46.5364 2.73069C46.4544 2.58174 46.4034 2.41788 46.3864 2.24882C46.3684 2.07975 46.3854 1.90891 46.4354 1.7464C46.4854 1.58389 46.5674 1.43301 46.6764 1.3027C46.7854 1.17238 46.9194 1.06527 47.0704 0.987704C48.5874 0.155491 50.3324 -0.162266 52.0454 0.0821474C53.7574 0.326561 55.3454 1.11995 56.5684 2.34319C57.7914 3.56642 58.5844 5.15347 58.8294 6.86604C59.0734 8.5786 58.7554 10.3242 57.9234 11.8408C57.8144 12.0411 57.6534 12.2084 57.4574 12.3253C57.2624 12.4422 57.0384 12.5043 56.8104 12.5052Z" fill="currentColor"/>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 6.2 KiB |
@ -113,6 +113,18 @@ export function MdiLightbulbOn(props: SVGProps<SVGSVGElement>) {
|
||||
)
|
||||
}
|
||||
|
||||
export function MdiLightbulbQuestion(props: SVGProps<SVGSVGElement>) {
|
||||
// {/* Icon from Material Design Icons by Pictogrammers - https://github.com/Templarian/MaterialDesign/blob/master/LICENSE */}
|
||||
return (
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" viewBox="0 0 24 24" {...props}>
|
||||
<path
|
||||
fill="currentColor"
|
||||
d="M8 2C11.9 2 15 5.1 15 9C15 11.4 13.8 13.5 12 14.7V17C12 17.6 11.6 18 11 18H5C4.4 18 4 17.6 4 17V14.7C2.2 13.5 1 11.4 1 9C1 5.1 4.1 2 8 2M5 21V20H11V21C11 21.6 10.6 22 10 22H6C5.4 22 5 21.6 5 21M8 4C5.2 4 3 6.2 3 9C3 11.1 4.2 12.8 6 13.6V16H10V13.6C11.8 12.8 13 11.1 13 9C13 6.2 10.8 4 8 4M20.5 14.5V16H19V14.5H20.5M18.5 9.5H17V9C17 7.3 18.3 6 20 6S23 7.3 23 9C23 10 22.5 10.9 21.7 11.4L21.4 11.6C20.8 12 20.5 12.6 20.5 13.3V13.5H19V13.3C19 12.1 19.6 11 20.6 10.4L20.9 10.2C21.3 9.9 21.5 9.5 21.5 9C21.5 8.2 20.8 7.5 20 7.5S18.5 8.2 18.5 9V9.5Z"
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function BingLogo(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg
|
||||
|
||||
@ -3,6 +3,7 @@ import { ErrorBoundary } from '@renderer/components/ErrorBoundary'
|
||||
import { HelpTooltip } from '@renderer/components/TooltipIcons'
|
||||
import { TopView } from '@renderer/components/TopView'
|
||||
import { permissionModeCards } from '@renderer/config/agent'
|
||||
import { isWin } from '@renderer/config/constant'
|
||||
import { useAgents } from '@renderer/hooks/agents/useAgents'
|
||||
import { useUpdateAgent } from '@renderer/hooks/agents/useUpdateAgent'
|
||||
import SelectAgentBaseModelButton from '@renderer/pages/home/components/SelectAgentBaseModelButton'
|
||||
@ -16,7 +17,8 @@ import type {
|
||||
UpdateAgentForm
|
||||
} from '@renderer/types'
|
||||
import { AgentConfigurationSchema, isAgentType } from '@renderer/types'
|
||||
import { Alert, Button, Input, Modal, Select } from 'antd'
|
||||
import type { GitBashPathInfo } from '@shared/config/constant'
|
||||
import { Button, Input, Modal, Select } from 'antd'
|
||||
import { AlertTriangleIcon } from 'lucide-react'
|
||||
import type { ChangeEvent, FormEvent } from 'react'
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from 'react'
|
||||
@ -59,8 +61,7 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
const isEditing = (agent?: AgentWithTools) => agent !== undefined
|
||||
|
||||
const [form, setForm] = useState<BaseAgentForm>(() => buildAgentForm(agent))
|
||||
const [hasGitBash, setHasGitBash] = useState<boolean>(true)
|
||||
const [customGitBashPath, setCustomGitBashPath] = useState<string>('')
|
||||
const [gitBashPathInfo, setGitBashPathInfo] = useState<GitBashPathInfo>({ path: null, source: null })
|
||||
|
||||
useEffect(() => {
|
||||
if (open) {
|
||||
@ -68,29 +69,15 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
}
|
||||
}, [agent, open])
|
||||
|
||||
const checkGitBash = useCallback(
|
||||
async (showToast = false) => {
|
||||
const checkGitBash = useCallback(async () => {
|
||||
if (!isWin) return
|
||||
try {
|
||||
const [gitBashInstalled, savedPath] = await Promise.all([
|
||||
window.api.system.checkGitBash(),
|
||||
window.api.system.getGitBashPath().catch(() => null)
|
||||
])
|
||||
setCustomGitBashPath(savedPath ?? '')
|
||||
setHasGitBash(gitBashInstalled)
|
||||
if (showToast) {
|
||||
if (gitBashInstalled) {
|
||||
window.toast.success(t('agent.gitBash.success', 'Git Bash detected successfully!'))
|
||||
} else {
|
||||
window.toast.error(t('agent.gitBash.notFound', 'Git Bash not found. Please install it first.'))
|
||||
}
|
||||
}
|
||||
const pathInfo = await window.api.system.getGitBashPathInfo()
|
||||
setGitBashPathInfo(pathInfo)
|
||||
} catch (error) {
|
||||
logger.error('Failed to check Git Bash:', error as Error)
|
||||
setHasGitBash(true) // Default to true on error to avoid false warnings
|
||||
}
|
||||
},
|
||||
[t]
|
||||
)
|
||||
}, [])
|
||||
|
||||
useEffect(() => {
|
||||
checkGitBash()
|
||||
@ -119,24 +106,22 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
return
|
||||
}
|
||||
|
||||
setCustomGitBashPath(pickedPath)
|
||||
await checkGitBash(true)
|
||||
await checkGitBash()
|
||||
} catch (error) {
|
||||
logger.error('Failed to pick Git Bash path', error as Error)
|
||||
window.toast.error(t('agent.gitBash.pick.failed', 'Failed to set Git Bash path'))
|
||||
}
|
||||
}, [checkGitBash, t])
|
||||
|
||||
const handleClearGitBash = useCallback(async () => {
|
||||
const handleResetGitBash = useCallback(async () => {
|
||||
try {
|
||||
// Clear manual setting and re-run auto-discovery
|
||||
await window.api.system.setGitBashPath(null)
|
||||
setCustomGitBashPath('')
|
||||
await checkGitBash(true)
|
||||
await checkGitBash()
|
||||
} catch (error) {
|
||||
logger.error('Failed to clear Git Bash path', error as Error)
|
||||
window.toast.error(t('agent.gitBash.pick.failed', 'Failed to set Git Bash path'))
|
||||
logger.error('Failed to reset Git Bash path', error as Error)
|
||||
}
|
||||
}, [checkGitBash, t])
|
||||
}, [checkGitBash])
|
||||
|
||||
const onPermissionModeChange = useCallback((value: PermissionMode) => {
|
||||
setForm((prev) => {
|
||||
@ -268,6 +253,12 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
return
|
||||
}
|
||||
|
||||
if (isWin && !gitBashPathInfo.path) {
|
||||
window.toast.error(t('agent.gitBash.error.required', 'Git Bash path is required on Windows'))
|
||||
loadingRef.current = false
|
||||
return
|
||||
}
|
||||
|
||||
if (isEditing(agent)) {
|
||||
if (!agent) {
|
||||
loadingRef.current = false
|
||||
@ -327,7 +318,8 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
t,
|
||||
updateAgent,
|
||||
afterSubmit,
|
||||
addAgent
|
||||
addAgent,
|
||||
gitBashPathInfo.path
|
||||
]
|
||||
)
|
||||
|
||||
@ -346,66 +338,6 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
footer={null}>
|
||||
<StyledForm onSubmit={onSubmit}>
|
||||
<FormContent>
|
||||
{!hasGitBash && (
|
||||
<Alert
|
||||
message={t('agent.gitBash.error.title', 'Git Bash Required')}
|
||||
description={
|
||||
<div>
|
||||
<div style={{ marginBottom: 8 }}>
|
||||
{t(
|
||||
'agent.gitBash.error.description',
|
||||
'Git Bash is required to run agents on Windows. The agent cannot function without it. Please install Git for Windows from'
|
||||
)}{' '}
|
||||
<a
|
||||
href="https://git-scm.com/download/win"
|
||||
onClick={(e) => {
|
||||
e.preventDefault()
|
||||
window.api.openWebsite('https://git-scm.com/download/win')
|
||||
}}
|
||||
style={{ textDecoration: 'underline' }}>
|
||||
git-scm.com
|
||||
</a>
|
||||
</div>
|
||||
<Button size="small" onClick={() => checkGitBash(true)}>
|
||||
{t('agent.gitBash.error.recheck', 'Recheck Git Bash Installation')}
|
||||
</Button>
|
||||
<Button size="small" style={{ marginLeft: 8 }} onClick={handlePickGitBash}>
|
||||
{t('agent.gitBash.pick.button', 'Select Git Bash Path')}
|
||||
</Button>
|
||||
</div>
|
||||
}
|
||||
type="error"
|
||||
showIcon
|
||||
style={{ marginBottom: 16 }}
|
||||
/>
|
||||
)}
|
||||
|
||||
{hasGitBash && customGitBashPath && (
|
||||
<Alert
|
||||
message={t('agent.gitBash.found.title', 'Git Bash configured')}
|
||||
description={
|
||||
<div style={{ display: 'flex', flexDirection: 'column', gap: 8 }}>
|
||||
<div>
|
||||
{t('agent.gitBash.customPath', {
|
||||
defaultValue: 'Using custom path: {{path}}',
|
||||
path: customGitBashPath
|
||||
})}
|
||||
</div>
|
||||
<div style={{ display: 'flex', gap: 8 }}>
|
||||
<Button size="small" onClick={handlePickGitBash}>
|
||||
{t('agent.gitBash.pick.button', 'Select Git Bash Path')}
|
||||
</Button>
|
||||
<Button size="small" onClick={handleClearGitBash}>
|
||||
{t('agent.gitBash.clear.button', 'Clear custom path')}
|
||||
</Button>
|
||||
</div>
|
||||
</div>
|
||||
}
|
||||
type="success"
|
||||
showIcon
|
||||
style={{ marginBottom: 16 }}
|
||||
/>
|
||||
)}
|
||||
<FormRow>
|
||||
<FormItem style={{ flex: 1 }}>
|
||||
<Label>
|
||||
@ -439,6 +371,40 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
/>
|
||||
</FormItem>
|
||||
|
||||
{isWin && (
|
||||
<FormItem>
|
||||
<div className="flex items-center gap-2">
|
||||
<Label>
|
||||
Git Bash <RequiredMark>*</RequiredMark>
|
||||
</Label>
|
||||
<HelpTooltip
|
||||
title={t(
|
||||
'agent.gitBash.tooltip',
|
||||
'Git Bash is required to run agents on Windows. Install from git-scm.com if not available.'
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
<GitBashInputWrapper>
|
||||
<Input
|
||||
value={gitBashPathInfo.path ?? ''}
|
||||
readOnly
|
||||
placeholder={t('agent.gitBash.placeholder', 'Select bash.exe path')}
|
||||
/>
|
||||
<Button size="small" onClick={handlePickGitBash}>
|
||||
{t('common.select', 'Select')}
|
||||
</Button>
|
||||
{gitBashPathInfo.source === 'manual' && (
|
||||
<Button size="small" onClick={handleResetGitBash}>
|
||||
{t('common.reset', 'Reset')}
|
||||
</Button>
|
||||
)}
|
||||
</GitBashInputWrapper>
|
||||
{gitBashPathInfo.path && gitBashPathInfo.source === 'auto' && (
|
||||
<SourceHint>{t('agent.gitBash.autoDiscoveredHint', 'Auto-discovered')}</SourceHint>
|
||||
)}
|
||||
</FormItem>
|
||||
)}
|
||||
|
||||
<FormItem>
|
||||
<Label>
|
||||
{t('agent.settings.tooling.permissionMode.title', 'Permission mode')} <RequiredMark>*</RequiredMark>
|
||||
@ -511,7 +477,11 @@ const PopupContainer: React.FC<Props> = ({ agent, afterSubmit, resolve }) => {
|
||||
|
||||
<FormFooter>
|
||||
<Button onClick={onCancel}>{t('common.close')}</Button>
|
||||
<Button type="primary" htmlType="submit" loading={loadingRef.current} disabled={!hasGitBash}>
|
||||
<Button
|
||||
type="primary"
|
||||
htmlType="submit"
|
||||
loading={loadingRef.current}
|
||||
disabled={isWin && !gitBashPathInfo.path}>
|
||||
{isEditing(agent) ? t('common.confirm') : t('common.add')}
|
||||
</Button>
|
||||
</FormFooter>
|
||||
@ -582,6 +552,21 @@ const FormItem = styled.div`
|
||||
gap: 8px;
|
||||
`
|
||||
|
||||
const GitBashInputWrapper = styled.div`
|
||||
display: flex;
|
||||
gap: 8px;
|
||||
align-items: center;
|
||||
|
||||
input {
|
||||
flex: 1;
|
||||
}
|
||||
`
|
||||
|
||||
const SourceHint = styled.span`
|
||||
font-size: 12px;
|
||||
color: var(--color-text-3);
|
||||
`
|
||||
|
||||
const Label = styled.label`
|
||||
font-size: 14px;
|
||||
color: var(--color-text-1);
|
||||
|
||||
@ -631,7 +631,7 @@ describe('Reasoning option configuration', () => {
|
||||
|
||||
it('restricts GPT-5 Pro reasoning to high effort only', () => {
|
||||
expect(MODEL_SUPPORTED_REASONING_EFFORT.gpt5pro).toEqual(['high'])
|
||||
expect(MODEL_SUPPORTED_OPTIONS.gpt5pro).toEqual(['high'])
|
||||
expect(MODEL_SUPPORTED_OPTIONS.gpt5pro).toEqual(['default', 'high'])
|
||||
})
|
||||
})
|
||||
|
||||
@ -733,6 +733,11 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-6-lite-251015' }))).toBe('doubao_after_251015')
|
||||
})
|
||||
|
||||
it('should return doubao_after_251015 for Doubao-Seed-1.8 models', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251215' }))).toBe('doubao_after_251015')
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1.8' }))).toBe('doubao_after_251015')
|
||||
})
|
||||
|
||||
it('should return doubao_no_auto for other Doubao thinking models', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toBe('doubao_no_auto')
|
||||
})
|
||||
@ -863,6 +868,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
||||
// auto > after_251015 > no_auto
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1.6' }))).toBe('doubao')
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-6-251015' }))).toBe('doubao_after_251015')
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251215' }))).toBe('doubao_after_251015')
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toBe('doubao_no_auto')
|
||||
})
|
||||
|
||||
@ -1672,10 +1678,26 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
describe('OpenAI models', () => {
|
||||
it('should return correct options for o-series models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3' }))).toEqual(['low', 'medium', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3-mini' }))).toEqual(['low', 'medium', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o4' }))).toEqual(['low', 'medium', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o3-mini' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'o4' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-oss-reasoning' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
@ -1685,17 +1707,22 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
it('should return correct options for deep research models', () => {
|
||||
// Note: Deep research models need to be actual OpenAI reasoning models to be detected
|
||||
// 'sonar-deep-research' from Perplexity is the primary deep research model
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual(['medium'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual([
|
||||
'default',
|
||||
'medium'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for GPT-5 models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5' }))).toEqual([
|
||||
'default',
|
||||
'minimal',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-preview' }))).toEqual([
|
||||
'default',
|
||||
'minimal',
|
||||
'low',
|
||||
'medium',
|
||||
@ -1704,17 +1731,22 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
})
|
||||
|
||||
it('should return correct options for GPT-5 Pro models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro' }))).toEqual(['high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro-preview' }))).toEqual(['high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro' }))).toEqual(['default', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-pro-preview' }))).toEqual([
|
||||
'default',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for GPT-5 Codex models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-codex' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5-codex-mini' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
@ -1723,18 +1755,21 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
it('should return correct options for GPT-5.1 models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-preview' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-mini' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
@ -1744,11 +1779,13 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
it('should return correct options for GPT-5.1 Codex models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-codex' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gpt-5.1-codex-mini' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'medium',
|
||||
'high'
|
||||
@ -1758,19 +1795,24 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
describe('Grok models', () => {
|
||||
it('should return correct options for Grok 3 mini', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'grok-3-mini' }))).toEqual(['low', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'grok-3-mini' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for Grok 4 Fast', () => {
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(createModel({ id: 'grok-4-fast', provider: 'openrouter' }))
|
||||
).toEqual(['none', 'auto'])
|
||||
).toEqual(['default', 'none', 'auto'])
|
||||
})
|
||||
})
|
||||
|
||||
describe('Gemini models', () => {
|
||||
it('should return correct options for Gemini Flash models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-2.5-flash-latest' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
@ -1778,6 +1820,7 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
'auto'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-flash-latest' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
@ -1788,12 +1831,14 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
it('should return correct options for Gemini Pro models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-2.5-pro-latest' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high',
|
||||
'auto'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-pro-latest' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high',
|
||||
@ -1803,11 +1848,13 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
it('should return correct options for Gemini 3 models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-flash' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-pro-preview' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
@ -1818,24 +1865,28 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
describe('Qwen models', () => {
|
||||
it('should return correct options for controllable Qwen models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen-plus' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen-turbo' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen-flash' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'qwen3-8b' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
@ -1853,11 +1904,13 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
describe('Doubao models', () => {
|
||||
it('should return correct options for auto-thinking Doubao models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-seed-1.6' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'auto',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-1-5-thinking-pro-m' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'auto',
|
||||
'high'
|
||||
@ -1866,12 +1919,14 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
it('should return correct options for Doubao models after 251015', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-seed-1-6-251015' }))).toEqual([
|
||||
'default',
|
||||
'minimal',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-seed-1-6-lite-251015' }))).toEqual([
|
||||
'default',
|
||||
'minimal',
|
||||
'low',
|
||||
'medium',
|
||||
@ -1881,6 +1936,7 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
it('should return correct options for other Doubao thinking models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'high'
|
||||
])
|
||||
@ -1889,28 +1945,43 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
describe('Other providers', () => {
|
||||
it('should return correct options for Hunyuan models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'hunyuan-a13b' }))).toEqual(['none', 'auto'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'hunyuan-a13b' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'auto'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for Zhipu models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.5' }))).toEqual(['none', 'auto'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.6' }))).toEqual(['none', 'auto'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.5' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'auto'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'glm-4.6' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'auto'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for Perplexity models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual(['medium'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'sonar-deep-research' }))).toEqual([
|
||||
'default',
|
||||
'medium'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for DeepSeek hybrid models', () => {
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(createModel({ id: 'deepseek-v3.1', provider: 'deepseek' }))
|
||||
).toEqual(['none', 'auto'])
|
||||
).toEqual(['default', 'none', 'auto'])
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(createModel({ id: 'deepseek-v3.2', provider: 'openrouter' }))
|
||||
).toEqual(['none', 'auto'])
|
||||
).toEqual(['default', 'none', 'auto'])
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(createModel({ id: 'deepseek-chat', provider: 'deepseek' }))
|
||||
).toEqual(['none', 'auto'])
|
||||
).toEqual(['default', 'none', 'auto'])
|
||||
})
|
||||
})
|
||||
|
||||
@ -1925,7 +1996,7 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
provider: 'openrouter'
|
||||
})
|
||||
)
|
||||
).toEqual(['none', 'auto'])
|
||||
).toEqual(['default', 'none', 'auto'])
|
||||
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(
|
||||
@ -1934,7 +2005,7 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
name: 'gpt-5.1'
|
||||
})
|
||||
)
|
||||
).toEqual(['none', 'low', 'medium', 'high'])
|
||||
).toEqual(['default', 'none', 'low', 'medium', 'high'])
|
||||
|
||||
// Qwen models work well for name-based fallback
|
||||
expect(
|
||||
@ -1944,7 +2015,7 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
name: 'qwen-plus'
|
||||
})
|
||||
)
|
||||
).toEqual(['none', 'low', 'medium', 'high'])
|
||||
).toEqual(['default', 'none', 'low', 'medium', 'high'])
|
||||
})
|
||||
|
||||
it('should use id result when id matches', () => {
|
||||
@ -1955,7 +2026,7 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
name: 'Different Name'
|
||||
})
|
||||
)
|
||||
).toEqual(['none', 'low', 'medium', 'high'])
|
||||
).toEqual(['default', 'none', 'low', 'medium', 'high'])
|
||||
|
||||
expect(
|
||||
getModelSupportedReasoningEffortOptions(
|
||||
@ -1964,20 +2035,27 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
name: 'Some other name'
|
||||
})
|
||||
)
|
||||
).toEqual(['low', 'medium', 'high'])
|
||||
).toEqual(['default', 'low', 'medium', 'high'])
|
||||
})
|
||||
})
|
||||
|
||||
describe('Case sensitivity', () => {
|
||||
it('should handle case insensitive model IDs', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'GPT-5.1' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'O3-MINI' }))).toEqual(['low', 'medium', 'high'])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'O3-MINI' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'Gemini-2.5-Flash-Latest' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
'medium',
|
||||
|
||||
@ -746,6 +746,12 @@ export const SYSTEM_MODELS: Record<SystemProviderId | 'defaultModel', Model[]> =
|
||||
}
|
||||
],
|
||||
doubao: [
|
||||
{
|
||||
id: 'doubao-seed-1-8-251215',
|
||||
provider: 'doubao',
|
||||
name: 'Doubao-Seed-1.8',
|
||||
group: 'Doubao-Seed-1.8'
|
||||
},
|
||||
{
|
||||
id: 'doubao-1-5-vision-pro-32k-250115',
|
||||
provider: 'doubao',
|
||||
@ -1785,5 +1791,13 @@ export const SYSTEM_MODELS: Record<SystemProviderId | 'defaultModel', Model[]> =
|
||||
provider: 'cerebras',
|
||||
group: 'qwen'
|
||||
}
|
||||
],
|
||||
mimo: [
|
||||
{
|
||||
id: 'mimo-v2-flash',
|
||||
name: 'Mimo V2 Flash',
|
||||
provider: 'mimo',
|
||||
group: 'Mimo'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@ -103,6 +103,7 @@ import MicrosoftModelLogo from '@renderer/assets/images/models/microsoft.png'
|
||||
import MicrosoftModelLogoDark from '@renderer/assets/images/models/microsoft_dark.png'
|
||||
import MidjourneyModelLogo from '@renderer/assets/images/models/midjourney.png'
|
||||
import MidjourneyModelLogoDark from '@renderer/assets/images/models/midjourney_dark.png'
|
||||
import MiMoModelLogo from '@renderer/assets/images/models/mimo.svg'
|
||||
import {
|
||||
default as MinicpmModelLogo,
|
||||
default as MinicpmModelLogoDark
|
||||
@ -301,7 +302,8 @@ export function getModelLogoById(modelId: string): string | undefined {
|
||||
bytedance: BytedanceModelLogo,
|
||||
ling: LingModelLogo,
|
||||
ring: LingModelLogo,
|
||||
'(V_1|V_1_TURBO|V_2|V_2A|V_2_TURBO|DESCRIBE|UPSCALE)': IdeogramModelLogo
|
||||
'(V_1|V_1_TURBO|V_2|V_2A|V_2_TURBO|DESCRIBE|UPSCALE)': IdeogramModelLogo,
|
||||
mimo: MiMoModelLogo
|
||||
} as const satisfies Record<string, string>
|
||||
|
||||
for (const key in logoMap) {
|
||||
|
||||
@ -52,6 +52,7 @@ export const MODEL_SUPPORTED_REASONING_EFFORT = {
|
||||
doubao_no_auto: ['high'] as const,
|
||||
doubao_after_251015: ['minimal', 'low', 'medium', 'high'] as const,
|
||||
hunyuan: ['auto'] as const,
|
||||
mimo: ['auto'] as const,
|
||||
zhipu: ['auto'] as const,
|
||||
perplexity: ['low', 'medium', 'high'] as const,
|
||||
deepseek_hybrid: ['auto'] as const
|
||||
@ -59,31 +60,32 @@ export const MODEL_SUPPORTED_REASONING_EFFORT = {
|
||||
|
||||
// 模型类型到支持选项的映射表
|
||||
export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
|
||||
default: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const,
|
||||
o: MODEL_SUPPORTED_REASONING_EFFORT.o,
|
||||
openai_deep_research: MODEL_SUPPORTED_REASONING_EFFORT.openai_deep_research,
|
||||
gpt5: [...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const,
|
||||
gpt5pro: MODEL_SUPPORTED_REASONING_EFFORT.gpt5pro,
|
||||
gpt5_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex,
|
||||
gpt5_1: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1,
|
||||
gpt5_1_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex,
|
||||
gpt5_2: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_2,
|
||||
gpt5_1_codex_max: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex_max,
|
||||
gpt52pro: MODEL_SUPPORTED_REASONING_EFFORT.gpt52pro,
|
||||
grok: MODEL_SUPPORTED_REASONING_EFFORT.grok,
|
||||
grok4_fast: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const,
|
||||
gemini: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
|
||||
gemini_pro: MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro,
|
||||
gemini3: MODEL_SUPPORTED_REASONING_EFFORT.gemini3,
|
||||
qwen: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const,
|
||||
qwen_thinking: MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking,
|
||||
doubao: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const,
|
||||
doubao_no_auto: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_no_auto] as const,
|
||||
doubao_after_251015: MODEL_SUPPORTED_REASONING_EFFORT.doubao_after_251015,
|
||||
hunyuan: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const,
|
||||
zhipu: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const,
|
||||
perplexity: MODEL_SUPPORTED_REASONING_EFFORT.perplexity,
|
||||
deepseek_hybrid: ['none', ...MODEL_SUPPORTED_REASONING_EFFORT.deepseek_hybrid] as const
|
||||
default: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const,
|
||||
o: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.o] as const,
|
||||
openai_deep_research: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.openai_deep_research] as const,
|
||||
gpt5: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const,
|
||||
gpt5pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5pro] as const,
|
||||
gpt5_codex: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex] as const,
|
||||
gpt5_1: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1] as const,
|
||||
gpt5_1_codex: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex] as const,
|
||||
gpt5_2: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_2] as const,
|
||||
gpt5_1_codex_max: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt5_1_codex_max] as const,
|
||||
gpt52pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt52pro] as const,
|
||||
grok: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.grok] as const,
|
||||
grok4_fast: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const,
|
||||
gemini: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
|
||||
gemini_pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro] as const,
|
||||
gemini3: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini3] as const,
|
||||
qwen: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const,
|
||||
qwen_thinking: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking] as const,
|
||||
doubao: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const,
|
||||
doubao_no_auto: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_no_auto] as const,
|
||||
doubao_after_251015: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao_after_251015] as const,
|
||||
mimo: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.mimo] as const,
|
||||
hunyuan: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.hunyuan] as const,
|
||||
zhipu: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.zhipu] as const,
|
||||
perplexity: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.perplexity] as const,
|
||||
deepseek_hybrid: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.deepseek_hybrid] as const
|
||||
} as const
|
||||
|
||||
const withModelIdAndNameAsId = <T>(model: Model, fn: (model: Model) => T): { idResult: T; nameResult: T } => {
|
||||
@ -146,7 +148,7 @@ const _getThinkModelType = (model: Model): ThinkingModelType => {
|
||||
} else if (isSupportedThinkingTokenDoubaoModel(model)) {
|
||||
if (isDoubaoThinkingAutoModel(model)) {
|
||||
thinkingModelType = 'doubao'
|
||||
} else if (isDoubaoSeedAfter251015(model)) {
|
||||
} else if (isDoubaoSeedAfter251015(model) || isDoubaoSeed18Model(model)) {
|
||||
thinkingModelType = 'doubao_after_251015'
|
||||
} else {
|
||||
thinkingModelType = 'doubao_no_auto'
|
||||
@ -155,6 +157,7 @@ const _getThinkModelType = (model: Model): ThinkingModelType => {
|
||||
else if (isSupportedReasoningEffortPerplexityModel(model)) thinkingModelType = 'perplexity'
|
||||
else if (isSupportedThinkingTokenZhipuModel(model)) thinkingModelType = 'zhipu'
|
||||
else if (isDeepSeekHybridInferenceModel(model)) thinkingModelType = 'deepseek_hybrid'
|
||||
else if (isSupportedThinkingTokenMiMoModel(model)) thinkingModelType = 'mimo'
|
||||
return thinkingModelType
|
||||
}
|
||||
|
||||
@ -191,20 +194,28 @@ const _getModelSupportedReasoningEffortOptions = (model: Model): ReasoningEffort
|
||||
* - The model is null/undefined
|
||||
* - The model doesn't support reasoning effort or thinking tokens
|
||||
*
|
||||
* All reasoning models support the 'default' option (always the first element),
|
||||
* which represents no additional configuration for thinking behavior.
|
||||
*
|
||||
* @example
|
||||
* // OpenAI o-series models support low, medium, high
|
||||
* // OpenAI o-series models support default, low, medium, high
|
||||
* getModelSupportedReasoningEffortOptions({ id: 'o3-mini', ... })
|
||||
* // Returns: ['low', 'medium', 'high']
|
||||
* // Returns: ['default', 'low', 'medium', 'high']
|
||||
* // 'default' = no additional configuration for thinking behavior
|
||||
*
|
||||
* @example
|
||||
* // GPT-5.1 models support none, low, medium, high
|
||||
* // GPT-5.1 models support default, none, low, medium, high
|
||||
* getModelSupportedReasoningEffortOptions({ id: 'gpt-5.1', ... })
|
||||
* // Returns: ['none', 'low', 'medium', 'high']
|
||||
* // Returns: ['default', 'none', 'low', 'medium', 'high']
|
||||
* // 'default' = no additional configuration
|
||||
* // 'none' = explicitly disable reasoning
|
||||
*
|
||||
* @example
|
||||
* // Gemini Flash models support none, low, medium, high, auto
|
||||
* // Gemini Flash models support default, none, low, medium, high, auto
|
||||
* getModelSupportedReasoningEffortOptions({ id: 'gemini-2.5-flash-latest', ... })
|
||||
* // Returns: ['none', 'low', 'medium', 'high', 'auto']
|
||||
* // Returns: ['default', 'none', 'low', 'medium', 'high', 'auto']
|
||||
* // 'default' = no additional configuration
|
||||
* // 'auto' = let the model automatically decide
|
||||
*
|
||||
* @example
|
||||
* // Non-reasoning models return undefined
|
||||
@ -214,7 +225,7 @@ const _getModelSupportedReasoningEffortOptions = (model: Model): ReasoningEffort
|
||||
* @example
|
||||
* // Name fallback when id doesn't match
|
||||
* getModelSupportedReasoningEffortOptions({ id: 'custom-id', name: 'gpt-5.1', ... })
|
||||
* // Returns: ['none', 'low', 'medium', 'high']
|
||||
* // Returns: ['default', 'none', 'low', 'medium', 'high']
|
||||
*/
|
||||
export const getModelSupportedReasoningEffortOptions = (
|
||||
model: Model | undefined | null
|
||||
@ -255,7 +266,8 @@ function _isSupportedThinkingTokenModel(model: Model): boolean {
|
||||
isSupportedThinkingTokenClaudeModel(model) ||
|
||||
isSupportedThinkingTokenDoubaoModel(model) ||
|
||||
isSupportedThinkingTokenHunyuanModel(model) ||
|
||||
isSupportedThinkingTokenZhipuModel(model)
|
||||
isSupportedThinkingTokenZhipuModel(model) ||
|
||||
isSupportedThinkingTokenMiMoModel(model)
|
||||
)
|
||||
}
|
||||
|
||||
@ -449,7 +461,7 @@ export function isQwenAlwaysThinkModel(model?: Model): boolean {
|
||||
|
||||
// Doubao 支持思考模式的模型正则
|
||||
export const DOUBAO_THINKING_MODEL_REGEX =
|
||||
/doubao-(?:1[.-]5-thinking-vision-pro|1[.-]5-thinking-pro-m|seed-1[.-]6(?:-flash)?(?!-(?:thinking)(?:-|$))|seed-code(?:-preview)?(?:-\d+)?)(?:-[\w-]+)*/i
|
||||
/doubao-(?:1[.-]5-thinking-vision-pro|1[.-]5-thinking-pro-m|seed-1[.-][68](?:-flash)?(?!-(?:thinking)(?:-|$))|seed-code(?:-preview)?(?:-\d+)?)(?:-[\w-]+)*/i
|
||||
|
||||
// 支持 auto 的 Doubao 模型 doubao-seed-1.6-xxx doubao-seed-1-6-xxx doubao-1-5-thinking-pro-m-xxx
|
||||
// Auto thinking is no longer supported after version 251015, see https://console.volcengine.com/ark/region:ark+cn-beijing/model/detail?Id=doubao-seed-1-6
|
||||
@ -467,6 +479,11 @@ export function isDoubaoSeedAfter251015(model: Model): boolean {
|
||||
return result
|
||||
}
|
||||
|
||||
export function isDoubaoSeed18Model(model: Model): boolean {
|
||||
const pattern = /doubao-seed-1[.-]8(?:-[\w-]+)?/i
|
||||
return pattern.test(model.id) || pattern.test(model.name)
|
||||
}
|
||||
|
||||
export function isSupportedThinkingTokenDoubaoModel(model?: Model): boolean {
|
||||
if (!model) {
|
||||
return false
|
||||
@ -548,6 +565,11 @@ export const isSupportedThinkingTokenZhipuModel = (model: Model): boolean => {
|
||||
return ['glm-4.5', 'glm-4.6'].some((id) => modelId.includes(id))
|
||||
}
|
||||
|
||||
export const isSupportedThinkingTokenMiMoModel = (model: Model): boolean => {
|
||||
const modelId = getLowerBaseModelName(model.id, '/')
|
||||
return ['mimo-v2-flash'].some((id) => modelId.includes(id))
|
||||
}
|
||||
|
||||
export const isDeepSeekHybridInferenceModel = (model: Model) => {
|
||||
const { idResult, nameResult } = withModelIdAndNameAsId(model, (model) => {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
@ -586,6 +608,8 @@ export const isZhipuReasoningModel = (model?: Model): boolean => {
|
||||
return isSupportedThinkingTokenZhipuModel(model) || modelId.includes('glm-z1')
|
||||
}
|
||||
|
||||
export const isMiMoReasoningModel = isSupportedThinkingTokenMiMoModel
|
||||
|
||||
export const isStepReasoningModel = (model?: Model): boolean => {
|
||||
if (!model) {
|
||||
return false
|
||||
@ -636,6 +660,7 @@ export function isReasoningModel(model?: Model): boolean {
|
||||
isDeepSeekHybridInferenceModel(model) ||
|
||||
isLingReasoningModel(model) ||
|
||||
isMiniMaxReasoningModel(model) ||
|
||||
isMiMoReasoningModel(model) ||
|
||||
modelId.includes('magistral') ||
|
||||
modelId.includes('pangu-pro-moe') ||
|
||||
modelId.includes('seed-oss') ||
|
||||
|
||||
@ -25,12 +25,13 @@ export const FUNCTION_CALLING_MODELS = [
|
||||
'learnlm(?:-[\\w-]+)?',
|
||||
'gemini(?:-[\\w-]+)?', // 提前排除了gemini的嵌入模型
|
||||
'grok-3(?:-[\\w-]+)?',
|
||||
'doubao-seed-1[.-]6(?:-[\\w-]+)?',
|
||||
'doubao-seed-1[.-][68](?:-[\\w-]+)?',
|
||||
'doubao-seed-code(?:-[\\w-]+)?',
|
||||
'kimi-k2(?:-[\\w-]+)?',
|
||||
'ling-\\w+(?:-[\\w-]+)?',
|
||||
'ring-\\w+(?:-[\\w-]+)?',
|
||||
'minimax-m2'
|
||||
'minimax-m2',
|
||||
'mimo-v2-flash'
|
||||
] as const
|
||||
|
||||
const FUNCTION_CALLING_EXCLUDED_MODELS = [
|
||||
|
||||
@ -45,7 +45,7 @@ const visionAllowedModels = [
|
||||
'deepseek-vl(?:[\\w-]+)?',
|
||||
'kimi-latest',
|
||||
'gemma-3(?:-[\\w-]+)',
|
||||
'doubao-seed-1[.-]6(?:-[\\w-]+)?',
|
||||
'doubao-seed-1[.-][68](?:-[\\w-]+)?',
|
||||
'doubao-seed-code(?:-[\\w-]+)?',
|
||||
'kimi-thinking-preview',
|
||||
`gemma3(?:[-:\\w]+)?`,
|
||||
|
||||
@ -31,6 +31,7 @@ import JinaProviderLogo from '@renderer/assets/images/providers/jina.png'
|
||||
import LanyunProviderLogo from '@renderer/assets/images/providers/lanyun.png'
|
||||
import LMStudioProviderLogo from '@renderer/assets/images/providers/lmstudio.png'
|
||||
import LongCatProviderLogo from '@renderer/assets/images/providers/longcat.png'
|
||||
import MiMoProviderLogo from '@renderer/assets/images/providers/mimo.svg'
|
||||
import MinimaxProviderLogo from '@renderer/assets/images/providers/minimax.png'
|
||||
import MistralProviderLogo from '@renderer/assets/images/providers/mistral.png'
|
||||
import ModelScopeProviderLogo from '@renderer/assets/images/providers/modelscope.png'
|
||||
@ -695,6 +696,17 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
|
||||
models: SYSTEM_MODELS.cerebras,
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
},
|
||||
mimo: {
|
||||
id: 'mimo',
|
||||
name: 'Xiaomi MiMo',
|
||||
type: 'openai',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.xiaomimimo.com',
|
||||
anthropicApiHost: 'https://api.xiaomimimo.com/anthropic',
|
||||
models: SYSTEM_MODELS.mimo,
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
}
|
||||
} as const
|
||||
|
||||
@ -763,7 +775,8 @@ export const PROVIDER_LOGO_MAP: AtLeast<SystemProviderId, string> = {
|
||||
huggingface: HuggingfaceProviderLogo,
|
||||
sophnet: SophnetProviderLogo,
|
||||
gateway: AIGatewayProviderLogo,
|
||||
cerebras: CerebrasProviderLogo
|
||||
cerebras: CerebrasProviderLogo,
|
||||
mimo: MiMoProviderLogo
|
||||
} as const
|
||||
|
||||
export function getProviderLogo(providerId: string) {
|
||||
@ -1434,5 +1447,16 @@ export const PROVIDER_URLS: Record<SystemProviderId, ProviderUrls> = {
|
||||
docs: 'https://inference-docs.cerebras.ai/introduction',
|
||||
models: 'https://inference-docs.cerebras.ai/models/overview'
|
||||
}
|
||||
},
|
||||
mimo: {
|
||||
api: {
|
||||
url: 'https://api.xiaomimimo.com'
|
||||
},
|
||||
websites: {
|
||||
official: 'https://platform.xiaomimimo.com/',
|
||||
apiKey: 'https://platform.xiaomimimo.com/#/console/usage',
|
||||
docs: 'https://platform.xiaomimimo.com/#/docs/welcome',
|
||||
models: 'https://platform.xiaomimimo.com/'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -5,7 +5,7 @@
|
||||
*/
|
||||
|
||||
import { loggerService } from '@logger'
|
||||
import type { AgentType, BuiltinMCPServerName, BuiltinOcrProviderId, ThinkingOption } from '@renderer/types'
|
||||
import type { AgentType, BuiltinMCPServerName, BuiltinOcrProviderId } from '@renderer/types'
|
||||
import { BuiltinMCPServerNames } from '@renderer/types'
|
||||
|
||||
import i18n from './index'
|
||||
@ -88,7 +88,8 @@ const providerKeyMap = {
|
||||
huggingface: 'provider.huggingface',
|
||||
sophnet: 'provider.sophnet',
|
||||
gateway: 'provider.ai-gateway',
|
||||
cerebras: 'provider.cerebras'
|
||||
cerebras: 'provider.cerebras',
|
||||
mimo: 'provider.mimo'
|
||||
} as const
|
||||
|
||||
/**
|
||||
@ -310,20 +311,6 @@ export const getHttpMessageLabel = (key: string): string => {
|
||||
return getLabel(httpMessageKeyMap, key)
|
||||
}
|
||||
|
||||
const reasoningEffortOptionsKeyMap: Record<ThinkingOption, string> = {
|
||||
none: 'assistants.settings.reasoning_effort.off',
|
||||
minimal: 'assistants.settings.reasoning_effort.minimal',
|
||||
high: 'assistants.settings.reasoning_effort.high',
|
||||
low: 'assistants.settings.reasoning_effort.low',
|
||||
medium: 'assistants.settings.reasoning_effort.medium',
|
||||
auto: 'assistants.settings.reasoning_effort.default',
|
||||
xhigh: 'assistants.settings.reasoning_effort.xhigh'
|
||||
} as const
|
||||
|
||||
export const getReasoningEffortOptionsLabel = (key: string): string => {
|
||||
return getLabel(reasoningEffortOptionsKeyMap, key)
|
||||
}
|
||||
|
||||
const fileFieldKeyMap = {
|
||||
created_at: 'files.created_at',
|
||||
size: 'files.size',
|
||||
@ -344,7 +331,8 @@ const builtInMcpDescriptionKeyMap: Record<BuiltinMCPServerName, string> = {
|
||||
[BuiltinMCPServerNames.difyKnowledge]: 'settings.mcp.builtinServersDescriptions.dify_knowledge',
|
||||
[BuiltinMCPServerNames.python]: 'settings.mcp.builtinServersDescriptions.python',
|
||||
[BuiltinMCPServerNames.didiMCP]: 'settings.mcp.builtinServersDescriptions.didi_mcp',
|
||||
[BuiltinMCPServerNames.browser]: 'settings.mcp.builtinServersDescriptions.browser'
|
||||
[BuiltinMCPServerNames.browser]: 'settings.mcp.builtinServersDescriptions.browser',
|
||||
[BuiltinMCPServerNames.nowledgeMem]: 'settings.mcp.builtinServersDescriptions.nowledge_mem'
|
||||
} as const
|
||||
|
||||
export const getBuiltInMcpServerDescriptionLabel = (key: string): string => {
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Using auto-detected Git Bash",
|
||||
"autoDiscoveredHint": "Auto-discovered",
|
||||
"clear": {
|
||||
"button": "Clear custom path"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "Git Bash is required to run agents on Windows. The agent cannot function without it. Please install Git for Windows from",
|
||||
"recheck": "Recheck Git Bash Installation",
|
||||
"required": "Git Bash path is required on Windows",
|
||||
"title": "Git Bash Required"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "Selected file is not a valid Git Bash executable (bash.exe).",
|
||||
"title": "Select Git Bash executable"
|
||||
},
|
||||
"success": "Git Bash detected successfully!"
|
||||
"placeholder": "Select bash.exe path",
|
||||
"success": "Git Bash detected successfully!",
|
||||
"tooltip": "Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "Enter your message here, send with {{key}} - @ select path, / select command"
|
||||
@ -544,14 +548,23 @@
|
||||
"more": "Assistant Settings",
|
||||
"prompt": "Prompt Settings",
|
||||
"reasoning_effort": {
|
||||
"auto": "Auto",
|
||||
"auto_description": "Flexibly determine reasoning effort",
|
||||
"default": "Default",
|
||||
"default_description": "Depend on the model's default behavior, without any configuration.",
|
||||
"high": "High",
|
||||
"high_description": "High level reasoning",
|
||||
"label": "Reasoning effort",
|
||||
"low": "Low",
|
||||
"low_description": "Low level reasoning",
|
||||
"medium": "Medium",
|
||||
"medium_description": "Medium level reasoning",
|
||||
"minimal": "Minimal",
|
||||
"minimal_description": "Minimal reasoning",
|
||||
"off": "Off",
|
||||
"xhigh": "Extra High"
|
||||
"off_description": "Disable reasoning",
|
||||
"xhigh": "Extra High",
|
||||
"xhigh_description": "Extra high level reasoning"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Add Phrase",
|
||||
@ -2630,6 +2643,7 @@
|
||||
"lanyun": "LANYUN",
|
||||
"lmstudio": "LM Studio",
|
||||
"longcat": "LongCat AI",
|
||||
"mimo": "Xiaomi MiMo",
|
||||
"minimax": "MiniMax",
|
||||
"mistral": "Mistral",
|
||||
"modelscope": "ModelScope",
|
||||
@ -3926,6 +3940,7 @@
|
||||
"mcp_auto_install": "Automatically install MCP service (beta)",
|
||||
"memory": "Persistent memory implementation based on a local knowledge graph. This enables the model to remember user-related information across different conversations. Requires configuring the MEMORY_FILE_PATH environment variable.",
|
||||
"no": "No description",
|
||||
"nowledge_mem": "Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
|
||||
"python": "Execute Python code in a secure sandbox environment. Run Python with Pyodide, supporting most standard libraries and scientific computing packages",
|
||||
"sequentialthinking": "A MCP server implementation that provides tools for dynamic and reflective problem solving through structured thinking processes"
|
||||
},
|
||||
@ -4564,6 +4579,24 @@
|
||||
"remove_invalid_keys": "Remove Invalid Keys",
|
||||
"search": "Search Providers...",
|
||||
"search_placeholder": "Search model id or name",
|
||||
"streaming": {
|
||||
"description": "Configure client-side limits for long-running streaming requests (e.g., OpenAI Responses SSE).",
|
||||
"label": "Streaming & Timeouts",
|
||||
"max_tool_steps": {
|
||||
"help": "Maximum tool-calling steps for agent workflows. Increase this if your workflow requires more than 20 steps.",
|
||||
"label": "Max tool steps"
|
||||
},
|
||||
"request_timeout": {
|
||||
"help": "Abort the request after this time. Set to 0 to disable.",
|
||||
"label": "Request hard timeout (minutes)"
|
||||
},
|
||||
"reset": "Reset to defaults",
|
||||
"sse_idle_timeout": {
|
||||
"help": "Abort if no stream events are received for this time. Increase it for long silent reasoning/tool runs; set to 0 to disable.",
|
||||
"label": "SSE idle timeout (minutes)"
|
||||
},
|
||||
"title": "Streaming & Timeouts"
|
||||
},
|
||||
"title": "Model Provider",
|
||||
"vertex_ai": {
|
||||
"api_host_help": "The API host for Vertex AI, not recommended to fill in, generally applicable to reverse proxy",
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "使用自动检测的 Git Bash",
|
||||
"autoDiscoveredHint": "自动发现",
|
||||
"clear": {
|
||||
"button": "清除自定义路径"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "在 Windows 上运行智能体需要 Git Bash。没有它智能体无法运行。请从以下地址安装 Git for Windows",
|
||||
"recheck": "重新检测 Git Bash 安装",
|
||||
"required": "在 Windows 上需要配置 Git Bash 路径",
|
||||
"title": "需要 Git Bash"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "选择的文件不是有效的 Git Bash 可执行文件(bash.exe)。",
|
||||
"title": "选择 Git Bash 可执行文件"
|
||||
},
|
||||
"success": "成功检测到 Git Bash!"
|
||||
"placeholder": "选择 bash.exe 路径",
|
||||
"success": "成功检测到 Git Bash!",
|
||||
"tooltip": "在 Windows 上运行智能体需要 Git Bash。如果未安装,请从 git-scm.com 下载安装。"
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "在这里输入消息,按 {{key}} 发送 - @ 选择路径, / 选择命令"
|
||||
@ -544,14 +548,23 @@
|
||||
"more": "助手设置",
|
||||
"prompt": "提示词设置",
|
||||
"reasoning_effort": {
|
||||
"auto": "自动",
|
||||
"auto_description": "灵活决定推理力度",
|
||||
"default": "默认",
|
||||
"default_description": "依赖模型默认行为,不作任何配置",
|
||||
"high": "沉思",
|
||||
"high_description": "高强度推理",
|
||||
"label": "思维链长度",
|
||||
"low": "浮想",
|
||||
"low_description": "低强度推理",
|
||||
"medium": "斟酌",
|
||||
"medium_description": "中强度推理",
|
||||
"minimal": "微念",
|
||||
"minimal_description": "最小程度的思考",
|
||||
"off": "关闭",
|
||||
"xhigh": "穷究"
|
||||
"off_description": "禁用推理",
|
||||
"xhigh": "穷究",
|
||||
"xhigh_description": "超高强度推理"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "添加短语",
|
||||
@ -2630,6 +2643,7 @@
|
||||
"lanyun": "蓝耘科技",
|
||||
"lmstudio": "LM Studio",
|
||||
"longcat": "龙猫",
|
||||
"mimo": "Xiaomi MiMo",
|
||||
"minimax": "MiniMax",
|
||||
"mistral": "Mistral",
|
||||
"modelscope": "ModelScope 魔搭",
|
||||
@ -3926,6 +3940,7 @@
|
||||
"mcp_auto_install": "自动安装 MCP 服务(测试版)",
|
||||
"memory": "基于本地知识图谱的持久性记忆基础实现。这使得模型能够在不同对话间记住用户的相关信息。需要配置 MEMORY_FILE_PATH 环境变量。",
|
||||
"no": "无描述",
|
||||
"nowledge_mem": "需要本地运行 Nowledge Mem 应用。将 AI 对话、工具、笔记、智能体和文件保存在本地计算机的私有记忆中。请从 https://mem.nowledge.co/ 下载",
|
||||
"python": "在安全的沙盒环境中执行 Python 代码。使用 Pyodide 运行 Python,支持大多数标准库和科学计算包",
|
||||
"sequentialthinking": "一个 MCP 服务器实现,提供了通过结构化思维过程进行动态和反思性问题解决的工具"
|
||||
},
|
||||
@ -4564,6 +4579,24 @@
|
||||
"remove_invalid_keys": "删除无效密钥",
|
||||
"search": "搜索模型平台...",
|
||||
"search_placeholder": "搜索模型 ID 或名称",
|
||||
"streaming": {
|
||||
"description": "配置长时间流式请求(例如 OpenAI Responses SSE)的客户端限制。",
|
||||
"label": "流式与超时",
|
||||
"max_tool_steps": {
|
||||
"help": "AI SDK 的工具调用会多轮循环。该值用于防止无限循环;若工作流需要更多轮工具调用请调大。",
|
||||
"label": "最大工具步数"
|
||||
},
|
||||
"request_timeout": {
|
||||
"help": "超过该时间将由客户端中止请求。设为 0 表示不额外限制。",
|
||||
"label": "请求硬超时(分钟)"
|
||||
},
|
||||
"reset": "恢复默认",
|
||||
"sse_idle_timeout": {
|
||||
"help": "在该时间内未收到任何流式事件则中止请求。长时间无输出/工具执行较慢时可调大;设为 0 关闭。",
|
||||
"label": "SSE 空闲超时(分钟)"
|
||||
},
|
||||
"title": "流式与超时"
|
||||
},
|
||||
"title": "模型服务",
|
||||
"vertex_ai": {
|
||||
"api_host_help": "Vertex AI 的 API 地址,不建议填写,通常适用于反向代理",
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "使用自動偵測的 Git Bash",
|
||||
"autoDiscoveredHint": "自動發現",
|
||||
"clear": {
|
||||
"button": "清除自訂路徑"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "在 Windows 上執行 Agent 需要 Git Bash。沒有它 Agent 無法運作。請從以下網址安裝 Git for Windows",
|
||||
"recheck": "重新偵測 Git Bash 安裝",
|
||||
"required": "在 Windows 上需要設定 Git Bash 路徑",
|
||||
"title": "需要 Git Bash"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "選擇的檔案不是有效的 Git Bash 可執行檔(bash.exe)。",
|
||||
"title": "選擇 Git Bash 可執行檔"
|
||||
},
|
||||
"success": "成功偵測到 Git Bash!"
|
||||
"placeholder": "選擇 bash.exe 路徑",
|
||||
"success": "成功偵測到 Git Bash!",
|
||||
"tooltip": "在 Windows 上執行 Agent 需要 Git Bash。如未安裝,請從 git-scm.com 下載安裝。"
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "在這裡輸入您的訊息,使用 {{key}} 傳送 - @ 選擇路徑,/ 選擇命令"
|
||||
@ -544,14 +548,23 @@
|
||||
"more": "助手設定",
|
||||
"prompt": "提示詞設定",
|
||||
"reasoning_effort": {
|
||||
"auto": "自動",
|
||||
"auto_description": "彈性決定推理投入的心力",
|
||||
"default": "預設",
|
||||
"default_description": "依賴模型的預設行為,無需任何配置。",
|
||||
"high": "盡力思考",
|
||||
"high_description": "高級推理",
|
||||
"label": "思維鏈長度",
|
||||
"low": "稍微思考",
|
||||
"low_description": "低階推理",
|
||||
"medium": "正常思考",
|
||||
"medium_description": "中等程度推理",
|
||||
"minimal": "最少思考",
|
||||
"minimal_description": "最少推理",
|
||||
"off": "關閉",
|
||||
"xhigh": "極力思考"
|
||||
"off_description": "禁用推理",
|
||||
"xhigh": "極力思考",
|
||||
"xhigh_description": "超高階推理"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "新增短語",
|
||||
@ -2630,6 +2643,7 @@
|
||||
"lanyun": "藍耘",
|
||||
"lmstudio": "LM Studio",
|
||||
"longcat": "龍貓",
|
||||
"mimo": "[to be translated]:Xiaomi MiMo",
|
||||
"minimax": "MiniMax",
|
||||
"mistral": "Mistral",
|
||||
"modelscope": "ModelScope 魔搭",
|
||||
@ -3926,6 +3940,7 @@
|
||||
"mcp_auto_install": "自動安裝 MCP 服務(測試版)",
|
||||
"memory": "基於本機知識圖譜的持久性記憶基礎實做。這使得模型能夠在不同對話間記住使用者的相關資訊。需要設定 MEMORY_FILE_PATH 環境變數。",
|
||||
"no": "無描述",
|
||||
"nowledge_mem": "需要本機執行 Nowledge Mem 應用程式。將 AI 對話、工具、筆記、代理和檔案保存在電腦上的私人記憶體中。請從 https://mem.nowledge.co/ 下載",
|
||||
"python": "在安全的沙盒環境中執行 Python 程式碼。使用 Pyodide 執行 Python,支援大多數標準函式庫和科學計算套件",
|
||||
"sequentialthinking": "一個 MCP 伺服器實做,提供了透過結構化思維過程進行動態和反思性問題解決的工具"
|
||||
},
|
||||
@ -4564,6 +4579,24 @@
|
||||
"remove_invalid_keys": "刪除無效金鑰",
|
||||
"search": "搜尋模型平臺...",
|
||||
"search_placeholder": "搜尋模型 ID 或名稱",
|
||||
"streaming": {
|
||||
"description": "設定長時間串流請求(例如 OpenAI Responses SSE)的用戶端限制。",
|
||||
"label": "串流與逾時",
|
||||
"max_tool_steps": {
|
||||
"help": "AI SDK 的工具呼叫會多輪循環。此值用於避免無限循環;若工作流程需要更多輪工具呼叫請調大。",
|
||||
"label": "最大工具步數"
|
||||
},
|
||||
"request_timeout": {
|
||||
"help": "超過此時間用戶端將中止請求。設為 0 表示不額外限制。",
|
||||
"label": "請求硬逾時(分鐘)"
|
||||
},
|
||||
"reset": "還原預設值",
|
||||
"sse_idle_timeout": {
|
||||
"help": "在此時間內未收到任何串流事件則中止請求。長時間無輸出/工具執行較慢時可調大;設為 0 關閉。",
|
||||
"label": "SSE 閒置逾時(分鐘)"
|
||||
},
|
||||
"title": "串流與逾時"
|
||||
},
|
||||
"title": "模型提供者",
|
||||
"vertex_ai": {
|
||||
"api_host_help": "Vertex AI 的 API 位址,不建議填寫,通常適用於反向代理",
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Automatisch ermitteltes Git Bash wird verwendet",
|
||||
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||
"clear": {
|
||||
"button": "Benutzerdefinierten Pfad löschen"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "Git Bash ist erforderlich, um Agents unter Windows auszuführen. Der Agent kann ohne es nicht funktionieren. Bitte installieren Sie Git für Windows von",
|
||||
"recheck": "Überprüfe die Git Bash-Installation erneut",
|
||||
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||
"title": "Git Bash erforderlich"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "Die ausgewählte Datei ist keine gültige Git Bash ausführbare Datei (bash.exe).",
|
||||
"title": "Git Bash ausführbare Datei auswählen"
|
||||
},
|
||||
"success": "Git Bash erfolgreich erkannt!"
|
||||
"placeholder": "[to be translated]:Select bash.exe path",
|
||||
"success": "Git Bash erfolgreich erkannt!",
|
||||
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "Gib hier deine Nachricht ein, senden mit {{key}} – @ Pfad auswählen, / Befehl auswählen"
|
||||
@ -544,14 +548,23 @@
|
||||
"more": "Assistenteneinstellungen",
|
||||
"prompt": "Prompt-Einstellungen",
|
||||
"reasoning_effort": {
|
||||
"auto": "Auto",
|
||||
"auto_description": "Denkaufwand flexibel bestimmen",
|
||||
"default": "Standard",
|
||||
"default_description": "Vom Standardverhalten des Modells abhängen, ohne Konfiguration.",
|
||||
"high": "Tiefes Nachdenken",
|
||||
"high_description": "Ganzheitliches Denken",
|
||||
"label": "Gedankenkettenlänge",
|
||||
"low": "Spontan",
|
||||
"low_description": "Geringfügige Argumentation",
|
||||
"medium": "Überlegt",
|
||||
"medium_description": "Denken auf mittlerem Niveau",
|
||||
"minimal": "Minimal",
|
||||
"minimal_description": "Minimales Denken",
|
||||
"off": "Aus",
|
||||
"xhigh": "Extra hoch"
|
||||
"off_description": "Denken deaktivieren",
|
||||
"xhigh": "Extra hoch",
|
||||
"xhigh_description": "Extra hohes Denkvermögen"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Phrase hinzufügen",
|
||||
@ -2630,6 +2643,7 @@
|
||||
"lanyun": "Lanyun Technologie",
|
||||
"lmstudio": "LM Studio",
|
||||
"longcat": "Meißner Riesenhamster",
|
||||
"mimo": "[to be translated]:Xiaomi MiMo",
|
||||
"minimax": "MiniMax",
|
||||
"mistral": "Mistral",
|
||||
"modelscope": "ModelScope",
|
||||
@ -3926,6 +3940,7 @@
|
||||
"mcp_auto_install": "MCP-Service automatisch installieren (Beta-Version)",
|
||||
"memory": "MCP-Server mit persistenter Erinnerungsbasis auf lokalem Wissensgraphen, der Informationen über verschiedene Dialoge hinweg speichert. MEMORY_FILE_PATH-Umgebungsvariable muss konfiguriert werden",
|
||||
"no": "Keine Beschreibung",
|
||||
"nowledge_mem": "Erfordert lokal laufende Nowledge Mem App. Speichert KI-Chats, Tools, Notizen, Agenten und Dateien in einem privaten Speicher auf Ihrem Computer. Download unter https://mem.nowledge.co/",
|
||||
"python": "Python-Code in einem sicheren Sandbox-Umgebung ausführen. Verwendung von Pyodide für Python, Unterstützung für die meisten Standardbibliotheken und wissenschaftliche Pakete",
|
||||
"sequentialthinking": "MCP-Server-Implementierung mit strukturiertem Denkprozess, der dynamische und reflektierende Problemlösungen ermöglicht"
|
||||
},
|
||||
@ -4564,6 +4579,24 @@
|
||||
"remove_invalid_keys": "Ungültige Schlüssel löschen",
|
||||
"search": "Modellplattform suchen...",
|
||||
"search_placeholder": "Modell-ID oder Name suchen",
|
||||
"streaming": {
|
||||
"description": "[to be translated]:Configure client-side limits for long-running streaming requests (e.g., OpenAI Responses SSE).",
|
||||
"label": "[to be translated]:Streaming & Timeouts",
|
||||
"max_tool_steps": {
|
||||
"help": "[to be translated]:Maximum tool-calling steps for agent workflows. Increase this if your workflow requires more than 20 steps.",
|
||||
"label": "[to be translated]:Max tool steps"
|
||||
},
|
||||
"request_timeout": {
|
||||
"help": "[to be translated]:Abort the request after this time. Set to 0 to disable.",
|
||||
"label": "[to be translated]:Request hard timeout (minutes)"
|
||||
},
|
||||
"reset": "[to be translated]:Reset to defaults",
|
||||
"sse_idle_timeout": {
|
||||
"help": "[to be translated]:Abort if no stream events are received for this time. Increase it for long silent reasoning/tool runs; set to 0 to disable.",
|
||||
"label": "[to be translated]:SSE idle timeout (minutes)"
|
||||
},
|
||||
"title": "[to be translated]:Streaming & Timeouts"
|
||||
},
|
||||
"title": "Modelldienst",
|
||||
"vertex_ai": {
|
||||
"api_host_help": "Vertex AI-API-Adresse, nicht empfohlen, normalerweise für Reverse-Proxy geeignet",
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Χρησιμοποιείται αυτόματα εντοπισμένο Git Bash",
|
||||
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||
"clear": {
|
||||
"button": "Διαγραφή προσαρμοσμένης διαδρομής"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "Το Git Bash απαιτείται για την εκτέλεση πρακτόρων στα Windows. Ο πράκτορας δεν μπορεί να λειτουργήσει χωρίς αυτό. Παρακαλούμε εγκαταστήστε το Git για Windows από",
|
||||
"recheck": "Επανέλεγχος Εγκατάστασης του Git Bash",
|
||||
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||
"title": "Απαιτείται Git Bash"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "Το επιλεγμένο αρχείο δεν είναι έγκυρο εκτελέσιμο Git Bash (bash.exe).",
|
||||
"title": "Επιλογή εκτελέσιμου Git Bash"
|
||||
},
|
||||
"success": "Το Git Bash εντοπίστηκε με επιτυχία!"
|
||||
"placeholder": "[to be translated]:Select bash.exe path",
|
||||
"success": "Το Git Bash εντοπίστηκε με επιτυχία!",
|
||||
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "Εισάγετε το μήνυμά σας εδώ, στείλτε με {{key}} - @ επιλέξτε διαδρομή, / επιλέξτε εντολή"
|
||||
@ -544,14 +548,23 @@
|
||||
"more": "Ρυθμίσεις Βοηθού",
|
||||
"prompt": "Ρυθμίσεις προκαλύμματος",
|
||||
"reasoning_effort": {
|
||||
"auto": "Αυτοκίνητο",
|
||||
"auto_description": "Ευέλικτος καθορισμός της προσπάθειας συλλογισμού",
|
||||
"default": "Προεπιλογή",
|
||||
"default_description": "Εξαρτηθείτε από την προεπιλεγμένη συμπεριφορά του μοντέλου, χωρίς καμία διαμόρφωση.",
|
||||
"high": "Μεγάλο",
|
||||
"high_description": "Υψηλού επιπέδου συλλογισμός",
|
||||
"label": "Μήκος λογισμικού αλυσίδας",
|
||||
"low": "Μικρό",
|
||||
"low_description": "Χαμηλού επιπέδου συλλογιστική",
|
||||
"medium": "Μεσαίο",
|
||||
"medium_description": "Αιτιολόγηση μεσαίου επιπέδου",
|
||||
"minimal": "ελάχιστος",
|
||||
"minimal_description": "Ελάχιστος συλλογισμός",
|
||||
"off": "Απενεργοποίηση",
|
||||
"xhigh": "Εξαιρετικά Υψηλή"
|
||||
"off_description": "Απενεργοποίηση λογικής",
|
||||
"xhigh": "Εξαιρετικά Υψηλή",
|
||||
"xhigh_description": "Εξαιρετικά υψηλού επιπέδου συλλογισμός"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Προσθήκη φράσης",
|
||||
@ -2630,6 +2643,7 @@
|
||||
"lanyun": "Λανιούν Τεχνολογία",
|
||||
"lmstudio": "LM Studio",
|
||||
"longcat": "Τσίρο",
|
||||
"mimo": "[to be translated]:Xiaomi MiMo",
|
||||
"minimax": "MiniMax",
|
||||
"mistral": "Mistral",
|
||||
"modelscope": "ModelScope Magpie",
|
||||
@ -3926,6 +3940,7 @@
|
||||
"mcp_auto_install": "Αυτόματη εγκατάσταση υπηρεσίας MCP (προβολή)",
|
||||
"memory": "Βασική υλοποίηση μόνιμης μνήμης με βάση τοπικό γράφημα γνώσης. Αυτό επιτρέπει στο μοντέλο να θυμάται πληροφορίες σχετικές με τον χρήστη ανάμεσα σε διαφορετικές συνομιλίες. Απαιτείται η ρύθμιση της μεταβλητής περιβάλλοντος MEMORY_FILE_PATH.",
|
||||
"no": "Χωρίς περιγραφή",
|
||||
"nowledge_mem": "[to be translated]:Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
|
||||
"python": "Εκτελέστε κώδικα Python σε ένα ασφαλές περιβάλλον sandbox. Χρησιμοποιήστε το Pyodide για να εκτελέσετε Python, υποστηρίζοντας την πλειονότητα των βιβλιοθηκών της τυπικής βιβλιοθήκης και των πακέτων επιστημονικού υπολογισμού",
|
||||
"sequentialthinking": "ένας εξυπηρετητής MCP που υλοποιείται, παρέχοντας εργαλεία για δυναμική και αναστοχαστική επίλυση προβλημάτων μέσω δομημένων διαδικασιών σκέψης"
|
||||
},
|
||||
@ -4564,6 +4579,24 @@
|
||||
"remove_invalid_keys": "Διαγραφή Ακυρωμένων Κλειδιών",
|
||||
"search": "Αναζήτηση πλατφόρμας μονάδων...",
|
||||
"search_placeholder": "Αναζήτηση ID ή ονόματος μονάδας",
|
||||
"streaming": {
|
||||
"description": "[to be translated]:Configure client-side limits for long-running streaming requests (e.g., OpenAI Responses SSE).",
|
||||
"label": "[to be translated]:Streaming & Timeouts",
|
||||
"max_tool_steps": {
|
||||
"help": "[to be translated]:Maximum tool-calling steps for agent workflows. Increase this if your workflow requires more than 20 steps.",
|
||||
"label": "[to be translated]:Max tool steps"
|
||||
},
|
||||
"request_timeout": {
|
||||
"help": "[to be translated]:Abort the request after this time. Set to 0 to disable.",
|
||||
"label": "[to be translated]:Request hard timeout (minutes)"
|
||||
},
|
||||
"reset": "[to be translated]:Reset to defaults",
|
||||
"sse_idle_timeout": {
|
||||
"help": "[to be translated]:Abort if no stream events are received for this time. Increase it for long silent reasoning/tool runs; set to 0 to disable.",
|
||||
"label": "[to be translated]:SSE idle timeout (minutes)"
|
||||
},
|
||||
"title": "[to be translated]:Streaming & Timeouts"
|
||||
},
|
||||
"title": "Υπηρεσία μονάδων",
|
||||
"vertex_ai": {
|
||||
"api_host_help": "Η διεύθυνση API του Vertex AI, δεν συνιστάται να συμπληρωθεί, συνήθως κατάλληλη για αντίστροφη διαμεσολάβηση",
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Usando Git Bash detectado automáticamente",
|
||||
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||
"clear": {
|
||||
"button": "Borrar ruta personalizada"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "Se requiere Git Bash para ejecutar agentes en Windows. El agente no puede funcionar sin él. Instale Git para Windows desde",
|
||||
"recheck": "Volver a verificar la instalación de Git Bash",
|
||||
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||
"title": "Git Bash Requerido"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "El archivo seleccionado no es un ejecutable válido de Git Bash (bash.exe).",
|
||||
"title": "Seleccionar ejecutable de Git Bash"
|
||||
},
|
||||
"success": "¡Git Bash detectado con éxito!"
|
||||
"placeholder": "[to be translated]:Select bash.exe path",
|
||||
"success": "¡Git Bash detectado con éxito!",
|
||||
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "Introduce tu mensaje aquí, envía con {{key}} - @ seleccionar ruta, / seleccionar comando"
|
||||
@ -544,14 +548,23 @@
|
||||
"more": "Configuración del Asistente",
|
||||
"prompt": "Configuración de Palabras Clave",
|
||||
"reasoning_effort": {
|
||||
"auto": "Automóvil",
|
||||
"auto_description": "Determinar flexiblemente el esfuerzo de razonamiento",
|
||||
"default": "Por defecto",
|
||||
"default_description": "Depender del comportamiento predeterminado del modelo, sin ninguna configuración.",
|
||||
"high": "Largo",
|
||||
"high_description": "Razonamiento de alto nivel",
|
||||
"label": "Longitud de Cadena de Razonamiento",
|
||||
"low": "Corto",
|
||||
"low_description": "Razonamiento de bajo nivel",
|
||||
"medium": "Medio",
|
||||
"medium_description": "Razonamiento de nivel medio",
|
||||
"minimal": "minimal",
|
||||
"minimal_description": "Razonamiento mínimo",
|
||||
"off": "Apagado",
|
||||
"xhigh": "Extra Alta"
|
||||
"off_description": "Deshabilitar razonamiento",
|
||||
"xhigh": "Extra Alta",
|
||||
"xhigh_description": "Razonamiento de extra alto nivel"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Agregar frase",
|
||||
@ -2630,6 +2643,7 @@
|
||||
"lanyun": "Tecnología Lanyun",
|
||||
"lmstudio": "Estudio LM",
|
||||
"longcat": "Totoro",
|
||||
"mimo": "[to be translated]:Xiaomi MiMo",
|
||||
"minimax": "Minimax",
|
||||
"mistral": "Mistral",
|
||||
"modelscope": "ModelScope Módulo",
|
||||
@ -3926,6 +3940,7 @@
|
||||
"mcp_auto_install": "Instalación automática del servicio MCP (versión beta)",
|
||||
"memory": "Implementación básica de memoria persistente basada en un grafo de conocimiento local. Esto permite que el modelo recuerde información relevante del usuario entre diferentes conversaciones. Es necesario configurar la variable de entorno MEMORY_FILE_PATH.",
|
||||
"no": "sin descripción",
|
||||
"nowledge_mem": "[to be translated]:Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
|
||||
"python": "Ejecuta código Python en un entorno sandbox seguro. Usa Pyodide para ejecutar Python, compatible con la mayoría de las bibliotecas estándar y paquetes de cálculo científico.",
|
||||
"sequentialthinking": "Una implementación de servidor MCP que proporciona herramientas para la resolución dinámica y reflexiva de problemas mediante un proceso de pensamiento estructurado"
|
||||
},
|
||||
@ -4564,6 +4579,24 @@
|
||||
"remove_invalid_keys": "Eliminar claves inválidas",
|
||||
"search": "Buscar plataforma de modelos...",
|
||||
"search_placeholder": "Buscar ID o nombre del modelo",
|
||||
"streaming": {
|
||||
"description": "[to be translated]:Configure client-side limits for long-running streaming requests (e.g., OpenAI Responses SSE).",
|
||||
"label": "[to be translated]:Streaming & Timeouts",
|
||||
"max_tool_steps": {
|
||||
"help": "[to be translated]:Maximum tool-calling steps for agent workflows. Increase this if your workflow requires more than 20 steps.",
|
||||
"label": "[to be translated]:Max tool steps"
|
||||
},
|
||||
"request_timeout": {
|
||||
"help": "[to be translated]:Abort the request after this time. Set to 0 to disable.",
|
||||
"label": "[to be translated]:Request hard timeout (minutes)"
|
||||
},
|
||||
"reset": "[to be translated]:Reset to defaults",
|
||||
"sse_idle_timeout": {
|
||||
"help": "[to be translated]:Abort if no stream events are received for this time. Increase it for long silent reasoning/tool runs; set to 0 to disable.",
|
||||
"label": "[to be translated]:SSE idle timeout (minutes)"
|
||||
},
|
||||
"title": "[to be translated]:Streaming & Timeouts"
|
||||
},
|
||||
"title": "Servicio de modelos",
|
||||
"vertex_ai": {
|
||||
"api_host_help": "Dirección de la API de Vertex AI, no se recomienda completar, normalmente aplicable al proxy inverso",
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Utilisation de Git Bash détecté automatiquement",
|
||||
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||
"clear": {
|
||||
"button": "Effacer le chemin personnalisé"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "Git Bash est requis pour exécuter des agents sur Windows. L'agent ne peut pas fonctionner sans. Veuillez installer Git pour Windows depuis",
|
||||
"recheck": "Revérifier l'installation de Git Bash",
|
||||
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||
"title": "Git Bash requis"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "Le fichier sélectionné n'est pas un exécutable Git Bash valide (bash.exe).",
|
||||
"title": "Sélectionner l'exécutable Git Bash"
|
||||
},
|
||||
"success": "Git Bash détecté avec succès !"
|
||||
"placeholder": "[to be translated]:Select bash.exe path",
|
||||
"success": "Git Bash détecté avec succès !",
|
||||
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "Entrez votre message ici, envoyez avec {{key}} - @ sélectionner le chemin, / sélectionner la commande"
|
||||
@ -544,14 +548,23 @@
|
||||
"more": "Paramètres de l'assistant",
|
||||
"prompt": "Paramètres de l'invite",
|
||||
"reasoning_effort": {
|
||||
"auto": "Auto",
|
||||
"auto_description": "Déterminer de manière flexible l'effort de raisonnement",
|
||||
"default": "Par défaut",
|
||||
"default_description": "Dépendre du comportement par défaut du modèle, sans aucune configuration.",
|
||||
"high": "Long",
|
||||
"high_description": "Raisonnement de haut niveau",
|
||||
"label": "Longueur de la chaîne de raisonnement",
|
||||
"low": "Court",
|
||||
"low_description": "Raisonnement de bas niveau",
|
||||
"medium": "Moyen",
|
||||
"medium_description": "Raisonnement de niveau moyen",
|
||||
"minimal": "minimal",
|
||||
"minimal_description": "Réflexion minimale",
|
||||
"off": "Off",
|
||||
"xhigh": "Très élevée"
|
||||
"off_description": "Désactiver le raisonnement",
|
||||
"xhigh": "Très élevée",
|
||||
"xhigh_description": "Raisonnement de très haut niveau"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Добавить фразу",
|
||||
@ -2630,6 +2643,7 @@
|
||||
"lanyun": "Technologie Lan Yun",
|
||||
"lmstudio": "Studio LM",
|
||||
"longcat": "Mon voisin Totoro",
|
||||
"mimo": "[to be translated]:Xiaomi MiMo",
|
||||
"minimax": "MiniMax",
|
||||
"mistral": "Mistral",
|
||||
"modelscope": "ModelScope MoDa",
|
||||
@ -3926,6 +3940,7 @@
|
||||
"mcp_auto_install": "Installation automatique du service MCP (version bêta)",
|
||||
"memory": "Implémentation de base de mémoire persistante basée sur un graphe de connaissances local. Cela permet au modèle de se souvenir des informations relatives à l'utilisateur entre différentes conversations. Nécessite la configuration de la variable d'environnement MEMORY_FILE_PATH.",
|
||||
"no": "sans description",
|
||||
"nowledge_mem": "[to be translated]:Requires Nowledge Mem app running locally. Keeps AI chats, tools, notes, agents, and files in private memory on your computer. Download from https://mem.nowledge.co/",
|
||||
"python": "Exécutez du code Python dans un environnement bac à sable sécurisé. Utilisez Pyodide pour exécuter Python, prenant en charge la plupart des bibliothèques standard et des packages de calcul scientifique.",
|
||||
"sequentialthinking": "Un serveur MCP qui fournit des outils permettant une résolution dynamique et réflexive des problèmes à travers un processus de pensée structuré"
|
||||
},
|
||||
@ -4564,6 +4579,24 @@
|
||||
"remove_invalid_keys": "Supprimer les clés invalides",
|
||||
"search": "Rechercher une plateforme de modèles...",
|
||||
"search_placeholder": "Rechercher un ID ou un nom de modèle",
|
||||
"streaming": {
|
||||
"description": "[to be translated]:Configure client-side limits for long-running streaming requests (e.g., OpenAI Responses SSE).",
|
||||
"label": "[to be translated]:Streaming & Timeouts",
|
||||
"max_tool_steps": {
|
||||
"help": "[to be translated]:Maximum tool-calling steps for agent workflows. Increase this if your workflow requires more than 20 steps.",
|
||||
"label": "[to be translated]:Max tool steps"
|
||||
},
|
||||
"request_timeout": {
|
||||
"help": "[to be translated]:Abort the request after this time. Set to 0 to disable.",
|
||||
"label": "[to be translated]:Request hard timeout (minutes)"
|
||||
},
|
||||
"reset": "[to be translated]:Reset to defaults",
|
||||
"sse_idle_timeout": {
|
||||
"help": "[to be translated]:Abort if no stream events are received for this time. Increase it for long silent reasoning/tool runs; set to 0 to disable.",
|
||||
"label": "[to be translated]:SSE idle timeout (minutes)"
|
||||
},
|
||||
"title": "[to be translated]:Streaming & Timeouts"
|
||||
},
|
||||
"title": "Services de modèles",
|
||||
"vertex_ai": {
|
||||
"api_host_help": "Adresse API de Vertex AI, il n'est pas recommandé de la remplir, généralement utilisée pour un proxy inverse",
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "自動検出されたGit Bashを使用中",
|
||||
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||
"clear": {
|
||||
"button": "カスタムパスをクリア"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "Windowsでエージェントを実行するにはGit Bashが必要です。これがないとエージェントは動作しません。以下からGit for Windowsをインストールしてください。",
|
||||
"recheck": "Git Bashのインストールを再確認してください",
|
||||
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||
"title": "Git Bashが必要です"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "選択されたファイルは有効なGit Bash実行ファイル(bash.exe)ではありません。",
|
||||
"title": "Git Bash実行ファイルを選択"
|
||||
},
|
||||
"success": "Git Bashが正常に検出されました!"
|
||||
"placeholder": "[to be translated]:Select bash.exe path",
|
||||
"success": "Git Bashが正常に検出されました!",
|
||||
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "メッセージをここに入力し、{{key}}で送信 - @でパスを選択、/でコマンドを選択"
|
||||
@ -544,14 +548,23 @@
|
||||
"more": "アシスタント設定",
|
||||
"prompt": "プロンプト設定",
|
||||
"reasoning_effort": {
|
||||
"auto": "自動",
|
||||
"auto_description": "推論にかける労力を柔軟に調整する",
|
||||
"default": "デフォルト",
|
||||
"default_description": "設定なしで、モデルの既定の動作に依存する。",
|
||||
"high": "最大限の思考",
|
||||
"high_description": "高度な推論",
|
||||
"label": "思考連鎖の長さ",
|
||||
"low": "少しの思考",
|
||||
"low_description": "低レベル推論",
|
||||
"medium": "普通の思考",
|
||||
"medium_description": "中レベル推論",
|
||||
"minimal": "最小限の思考",
|
||||
"minimal_description": "最小限の推論",
|
||||
"off": "オフ",
|
||||
"xhigh": "超高"
|
||||
"off_description": "推論を無効にする",
|
||||
"xhigh": "超高",
|
||||
"xhigh_description": "超高度な推論"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "プロンプトを追加",
|
||||
@ -2630,6 +2643,7 @@
|
||||
"lanyun": "LANYUN",
|
||||
"lmstudio": "LM Studio",
|
||||
"longcat": "トトロ",
|
||||
"mimo": "[to be translated]:Xiaomi MiMo",
|
||||
"minimax": "MiniMax",
|
||||
"mistral": "Mistral",
|
||||
"modelscope": "ModelScope",
|
||||
@ -3926,6 +3940,7 @@
|
||||
"mcp_auto_install": "MCPサービスの自動インストール(ベータ版)",
|
||||
"memory": "ローカルのナレッジグラフに基づく永続的なメモリの基本的な実装です。これにより、モデルは異なる会話間でユーザーの関連情報を記憶できるようになります。MEMORY_FILE_PATH 環境変数の設定が必要です。",
|
||||
"no": "説明なし",
|
||||
"nowledge_mem": "Nowledge Mem アプリをローカルで実行する必要があります。AI チャット、ツール、ノート、エージェント、ファイルをコンピューター上のプライベートメモリに保存します。https://mem.nowledge.co/ からダウンロードしてください",
|
||||
"python": "安全なサンドボックス環境でPythonコードを実行します。Pyodideを使用してPythonを実行し、ほとんどの標準ライブラリと科学計算パッケージをサポートしています。",
|
||||
"sequentialthinking": "構造化された思考プロセスを通じて動的かつ反省的な問題解決を行うためのツールを提供するMCPサーバーの実装"
|
||||
},
|
||||
@ -4564,6 +4579,24 @@
|
||||
"remove_invalid_keys": "無効なキーを削除",
|
||||
"search": "プロバイダーを検索...",
|
||||
"search_placeholder": "モデルIDまたは名前を検索",
|
||||
"streaming": {
|
||||
"description": "[to be translated]:Configure client-side limits for long-running streaming requests (e.g., OpenAI Responses SSE).",
|
||||
"label": "[to be translated]:Streaming & Timeouts",
|
||||
"max_tool_steps": {
|
||||
"help": "[to be translated]:Maximum tool-calling steps for agent workflows. Increase this if your workflow requires more than 20 steps.",
|
||||
"label": "[to be translated]:Max tool steps"
|
||||
},
|
||||
"request_timeout": {
|
||||
"help": "[to be translated]:Abort the request after this time. Set to 0 to disable.",
|
||||
"label": "[to be translated]:Request hard timeout (minutes)"
|
||||
},
|
||||
"reset": "[to be translated]:Reset to defaults",
|
||||
"sse_idle_timeout": {
|
||||
"help": "[to be translated]:Abort if no stream events are received for this time. Increase it for long silent reasoning/tool runs; set to 0 to disable.",
|
||||
"label": "[to be translated]:SSE idle timeout (minutes)"
|
||||
},
|
||||
"title": "[to be translated]:Streaming & Timeouts"
|
||||
},
|
||||
"title": "モデルプロバイダー",
|
||||
"vertex_ai": {
|
||||
"api_host_help": "Vertex AIのAPIアドレス。逆プロキシに適しています。",
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Usando Git Bash detectado automaticamente",
|
||||
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||
"clear": {
|
||||
"button": "Limpar caminho personalizado"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "O Git Bash é necessário para executar agentes no Windows. O agente não pode funcionar sem ele. Por favor, instale o Git para Windows a partir de",
|
||||
"recheck": "Reverificar a Instalação do Git Bash",
|
||||
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||
"title": "Git Bash Necessário"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "O arquivo selecionado não é um executável válido do Git Bash (bash.exe).",
|
||||
"title": "Selecionar executável do Git Bash"
|
||||
},
|
||||
"success": "Git Bash detectado com sucesso!"
|
||||
"placeholder": "[to be translated]:Select bash.exe path",
|
||||
"success": "Git Bash detectado com sucesso!",
|
||||
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "Digite sua mensagem aqui, envie com {{key}} - @ selecionar caminho, / selecionar comando"
|
||||
@ -544,14 +548,23 @@
|
||||
"more": "Configurações do Assistente",
|
||||
"prompt": "Configurações de Prompt",
|
||||
"reasoning_effort": {
|
||||
"auto": "Automóvel",
|
||||
"auto_description": "Determinar flexivelmente o esforço de raciocínio",
|
||||
"default": "Padrão",
|
||||
"default_description": "Depender do comportamento padrão do modelo, sem qualquer configuração.",
|
||||
"high": "Longo",
|
||||
"high_description": "Raciocínio de alto nível",
|
||||
"label": "Comprimento da Cadeia de Raciocínio",
|
||||
"low": "Curto",
|
||||
"low_description": "Raciocínio de baixo nível",
|
||||
"medium": "Médio",
|
||||
"medium_description": "Raciocínio de nível médio",
|
||||
"minimal": "mínimo",
|
||||
"minimal_description": "Raciocínio mínimo",
|
||||
"off": "Desligado",
|
||||
"xhigh": "Extra Alta"
|
||||
"off_description": "Desabilitar raciocínio",
|
||||
"xhigh": "Extra Alta",
|
||||
"xhigh_description": "Raciocínio de altíssimo nível"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Adicionar Frase",
|
||||
@ -2630,6 +2643,7 @@
|
||||
"lanyun": "Lanyun Tecnologia",
|
||||
"lmstudio": "Estúdio LM",
|
||||
"longcat": "Totoro",
|
||||
"mimo": "[to be translated]:Xiaomi MiMo",
|
||||
"minimax": "Minimax",
|
||||
"mistral": "Mistral",
|
||||
"modelscope": "ModelScope MôDá",
|
||||
@ -3926,6 +3940,7 @@
|
||||
"mcp_auto_install": "Instalação automática do serviço MCP (beta)",
|
||||
"memory": "Implementação base de memória persistente baseada em grafos de conhecimento locais. Isso permite que o modelo lembre informações relevantes do utilizador entre diferentes conversas. É necessário configurar a variável de ambiente MEMORY_FILE_PATH.",
|
||||
"no": "sem descrição",
|
||||
"nowledge_mem": "Requer a aplicação Nowledge Mem em execução localmente. Mantém conversas de IA, ferramentas, notas, agentes e ficheiros numa memória privada no seu computador. Transfira de https://mem.nowledge.co/",
|
||||
"python": "Executar código Python num ambiente sandbox seguro. Utilizar Pyodide para executar Python, suportando a maioria das bibliotecas padrão e pacotes de computação científica",
|
||||
"sequentialthinking": "Uma implementação de servidor MCP que fornece ferramentas para resolução dinâmica e reflexiva de problemas através de um processo de pensamento estruturado"
|
||||
},
|
||||
@ -4564,6 +4579,24 @@
|
||||
"remove_invalid_keys": "Remover chaves inválidas",
|
||||
"search": "Procurar plataforma de modelos...",
|
||||
"search_placeholder": "Procurar ID ou nome do modelo",
|
||||
"streaming": {
|
||||
"description": "[to be translated]:Configure client-side limits for long-running streaming requests (e.g., OpenAI Responses SSE).",
|
||||
"label": "[to be translated]:Streaming & Timeouts",
|
||||
"max_tool_steps": {
|
||||
"help": "[to be translated]:Maximum tool-calling steps for agent workflows. Increase this if your workflow requires more than 20 steps.",
|
||||
"label": "[to be translated]:Max tool steps"
|
||||
},
|
||||
"request_timeout": {
|
||||
"help": "[to be translated]:Abort the request after this time. Set to 0 to disable.",
|
||||
"label": "[to be translated]:Request hard timeout (minutes)"
|
||||
},
|
||||
"reset": "[to be translated]:Reset to defaults",
|
||||
"sse_idle_timeout": {
|
||||
"help": "[to be translated]:Abort if no stream events are received for this time. Increase it for long silent reasoning/tool runs; set to 0 to disable.",
|
||||
"label": "[to be translated]:SSE idle timeout (minutes)"
|
||||
},
|
||||
"title": "[to be translated]:Streaming & Timeouts"
|
||||
},
|
||||
"title": "Serviços de Modelos",
|
||||
"vertex_ai": {
|
||||
"api_host_help": "O endereço da API do Vertex AI, não é recomendado preencher, normalmente aplicável a proxy reverso",
|
||||
|
||||
@ -32,6 +32,7 @@
|
||||
},
|
||||
"gitBash": {
|
||||
"autoDetected": "Используется автоматически обнаруженный Git Bash",
|
||||
"autoDiscoveredHint": "[to be translated]:Auto-discovered",
|
||||
"clear": {
|
||||
"button": "Очистить пользовательский путь"
|
||||
},
|
||||
@ -39,6 +40,7 @@
|
||||
"error": {
|
||||
"description": "Для запуска агентов в Windows требуется Git Bash. Без него агент не может работать. Пожалуйста, установите Git для Windows с",
|
||||
"recheck": "Повторная проверка установки Git Bash",
|
||||
"required": "[to be translated]:Git Bash path is required on Windows",
|
||||
"title": "Требуется Git Bash"
|
||||
},
|
||||
"found": {
|
||||
@ -51,7 +53,9 @@
|
||||
"invalidPath": "Выбранный файл не является допустимым исполняемым файлом Git Bash (bash.exe).",
|
||||
"title": "Выберите исполняемый файл Git Bash"
|
||||
},
|
||||
"success": "Git Bash успешно обнаружен!"
|
||||
"placeholder": "[to be translated]:Select bash.exe path",
|
||||
"success": "Git Bash успешно обнаружен!",
|
||||
"tooltip": "[to be translated]:Git Bash is required to run agents on Windows. Install from git-scm.com if not available."
|
||||
},
|
||||
"input": {
|
||||
"placeholder": "Введите ваше сообщение здесь, отправьте с помощью {{key}} — @ выбрать путь, / выбрать команду"
|
||||
@ -544,14 +548,23 @@
|
||||
"more": "Настройки ассистента",
|
||||
"prompt": "Настройки промптов",
|
||||
"reasoning_effort": {
|
||||
"auto": "Авто",
|
||||
"auto_description": "Гибко определяйте усилие на рассуждение",
|
||||
"default": "По умолчанию",
|
||||
"default_description": "Полагаться на поведение модели по умолчанию, без какой-либо конфигурации.",
|
||||
"high": "Стараюсь думать",
|
||||
"high_description": "Высокоуровневое рассуждение",
|
||||
"label": "Настройки размышлений",
|
||||
"low": "Меньше думать",
|
||||
"low_description": "Низкоуровневое рассуждение",
|
||||
"medium": "Среднее",
|
||||
"medium_description": "Средний уровень рассуждения",
|
||||
"minimal": "минимальный",
|
||||
"minimal_description": "Минимальное рассуждение",
|
||||
"off": "Выключить",
|
||||
"xhigh": "Сверхвысокое"
|
||||
"off_description": "Отключить рассуждение",
|
||||
"xhigh": "Сверхвысокое",
|
||||
"xhigh_description": "Высочайший уровень рассуждений"
|
||||
},
|
||||
"regular_phrases": {
|
||||
"add": "Добавить подсказку",
|
||||
@ -2630,6 +2643,7 @@
|
||||
"lanyun": "LANYUN",
|
||||
"lmstudio": "LM Studio",
|
||||
"longcat": "Тоторо",
|
||||
"mimo": "[to be translated]:Xiaomi MiMo",
|
||||
"minimax": "MiniMax",
|
||||
"mistral": "Mistral",
|
||||
"modelscope": "ModelScope",
|
||||
@ -3926,6 +3940,7 @@
|
||||
"mcp_auto_install": "Автоматическая установка службы MCP (бета-версия)",
|
||||
"memory": "реализация постоянной памяти на основе локального графа знаний. Это позволяет модели запоминать информацию о пользователе между различными диалогами. Требуется настроить переменную среды MEMORY_FILE_PATH.",
|
||||
"no": "без описания",
|
||||
"nowledge_mem": "Требуется запущенное локально приложение Nowledge Mem. Хранит чаты ИИ, инструменты, заметки, агентов и файлы в приватной памяти на вашем компьютере. Скачать можно на https://mem.nowledge.co/",
|
||||
"python": "Выполняйте код Python в безопасной песочнице. Запускайте Python с помощью Pyodide, поддерживается большинство стандартных библиотек и пакетов для научных вычислений",
|
||||
"sequentialthinking": "MCP серверная реализация, предоставляющая инструменты для динамического и рефлексивного решения проблем посредством структурированного мыслительного процесса"
|
||||
},
|
||||
@ -4564,6 +4579,24 @@
|
||||
"remove_invalid_keys": "Удалить недействительные ключи",
|
||||
"search": "Поиск поставщиков...",
|
||||
"search_placeholder": "Поиск по ID или имени модели",
|
||||
"streaming": {
|
||||
"description": "[to be translated]:Configure client-side limits for long-running streaming requests (e.g., OpenAI Responses SSE).",
|
||||
"label": "[to be translated]:Streaming & Timeouts",
|
||||
"max_tool_steps": {
|
||||
"help": "[to be translated]:Maximum tool-calling steps for agent workflows. Increase this if your workflow requires more than 20 steps.",
|
||||
"label": "[to be translated]:Max tool steps"
|
||||
},
|
||||
"request_timeout": {
|
||||
"help": "[to be translated]:Abort the request after this time. Set to 0 to disable.",
|
||||
"label": "[to be translated]:Request hard timeout (minutes)"
|
||||
},
|
||||
"reset": "[to be translated]:Reset to defaults",
|
||||
"sse_idle_timeout": {
|
||||
"help": "[to be translated]:Abort if no stream events are received for this time. Increase it for long silent reasoning/tool runs; set to 0 to disable.",
|
||||
"label": "[to be translated]:SSE idle timeout (minutes)"
|
||||
},
|
||||
"title": "[to be translated]:Streaming & Timeouts"
|
||||
},
|
||||
"title": "Провайдеры моделей",
|
||||
"vertex_ai": {
|
||||
"api_host_help": "API-адрес Vertex AI, не рекомендуется заполнять, обычно применим к обратным прокси",
|
||||
|
||||
@ -6,7 +6,8 @@ import {
|
||||
MdiLightbulbOn30,
|
||||
MdiLightbulbOn50,
|
||||
MdiLightbulbOn80,
|
||||
MdiLightbulbOn90
|
||||
MdiLightbulbOn90,
|
||||
MdiLightbulbQuestion
|
||||
} from '@renderer/components/Icons/SVGIcon'
|
||||
import { QuickPanelReservedSymbol, useQuickPanel } from '@renderer/components/QuickPanel'
|
||||
import {
|
||||
@ -18,7 +19,6 @@ import {
|
||||
MODEL_SUPPORTED_OPTIONS
|
||||
} from '@renderer/config/models'
|
||||
import { useAssistant } from '@renderer/hooks/useAssistant'
|
||||
import { getReasoningEffortOptionsLabel } from '@renderer/i18n/label'
|
||||
import type { ToolQuickPanelApi } from '@renderer/pages/home/Inputbar/types'
|
||||
import type { Model, ThinkingOption } from '@renderer/types'
|
||||
import { Tooltip } from 'antd'
|
||||
@ -88,19 +88,48 @@ const ThinkingButton: FC<Props> = ({ quickPanel, model, assistantId }): ReactEle
|
||||
[updateAssistantSettings, assistant.enableWebSearch, model, t]
|
||||
)
|
||||
|
||||
const reasoningEffortOptionLabelMap = {
|
||||
default: t('assistants.settings.reasoning_effort.default'),
|
||||
none: t('assistants.settings.reasoning_effort.off'),
|
||||
minimal: t('assistants.settings.reasoning_effort.minimal'),
|
||||
high: t('assistants.settings.reasoning_effort.high'),
|
||||
low: t('assistants.settings.reasoning_effort.low'),
|
||||
medium: t('assistants.settings.reasoning_effort.medium'),
|
||||
auto: t('assistants.settings.reasoning_effort.auto'),
|
||||
xhigh: t('assistants.settings.reasoning_effort.xhigh')
|
||||
} as const satisfies Record<ThinkingOption, string>
|
||||
|
||||
const reasoningEffortDescriptionMap = {
|
||||
default: t('assistants.settings.reasoning_effort.default_description'),
|
||||
none: t('assistants.settings.reasoning_effort.off_description'),
|
||||
minimal: t('assistants.settings.reasoning_effort.minimal_description'),
|
||||
low: t('assistants.settings.reasoning_effort.low_description'),
|
||||
medium: t('assistants.settings.reasoning_effort.medium_description'),
|
||||
high: t('assistants.settings.reasoning_effort.high_description'),
|
||||
xhigh: t('assistants.settings.reasoning_effort.xhigh_description'),
|
||||
auto: t('assistants.settings.reasoning_effort.auto_description')
|
||||
} as const satisfies Record<ThinkingOption, string>
|
||||
|
||||
const panelItems = useMemo(() => {
|
||||
// 使用表中定义的选项创建UI选项
|
||||
return supportedOptions.map((option) => ({
|
||||
level: option,
|
||||
label: getReasoningEffortOptionsLabel(option),
|
||||
description: '',
|
||||
label: reasoningEffortOptionLabelMap[option],
|
||||
description: reasoningEffortDescriptionMap[option],
|
||||
icon: ThinkingIcon({ option }),
|
||||
isSelected: currentReasoningEffort === option,
|
||||
action: () => onThinkingChange(option)
|
||||
}))
|
||||
}, [currentReasoningEffort, supportedOptions, onThinkingChange])
|
||||
}, [
|
||||
supportedOptions,
|
||||
reasoningEffortOptionLabelMap,
|
||||
reasoningEffortDescriptionMap,
|
||||
currentReasoningEffort,
|
||||
onThinkingChange
|
||||
])
|
||||
|
||||
const isThinkingEnabled = currentReasoningEffort !== undefined && currentReasoningEffort !== 'none'
|
||||
const isThinkingEnabled =
|
||||
currentReasoningEffort !== undefined && currentReasoningEffort !== 'none' && currentReasoningEffort !== 'default'
|
||||
|
||||
const disableThinking = useCallback(() => {
|
||||
onThinkingChange('none')
|
||||
@ -197,8 +226,9 @@ const ThinkingIcon = (props: { option?: ThinkingOption; isFixedReasoning?: boole
|
||||
case 'none':
|
||||
IconComponent = MdiLightbulbOffOutline
|
||||
break
|
||||
case 'default':
|
||||
default:
|
||||
IconComponent = MdiLightbulbOffOutline
|
||||
IconComponent = MdiLightbulbQuestion
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@ -36,7 +36,7 @@ import {
|
||||
import { Button, Divider, Flex, Input, Select, Space, Switch, Tooltip } from 'antd'
|
||||
import Link from 'antd/es/typography/Link'
|
||||
import { debounce, isEmpty } from 'lodash'
|
||||
import { Bolt, Check, Settings2, SquareArrowOutUpRight, TriangleAlert } from 'lucide-react'
|
||||
import { Bolt, Check, Settings2, SquareArrowOutUpRight, Timer, TriangleAlert } from 'lucide-react'
|
||||
import type { FC } from 'react'
|
||||
import { useCallback, useEffect, useMemo, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
@ -61,6 +61,7 @@ import LMStudioSettings from './LMStudioSettings'
|
||||
import OVMSSettings from './OVMSSettings'
|
||||
import ProviderOAuth from './ProviderOAuth'
|
||||
import SelectProviderModelPopup from './SelectProviderModelPopup'
|
||||
import StreamingSettingsPopup from './StreamingSettings/StreamingSettingsPopup'
|
||||
import VertexAISettings from './VertexAISettings'
|
||||
|
||||
interface Props {
|
||||
@ -80,7 +81,8 @@ const ANTHROPIC_COMPATIBLE_PROVIDER_IDS = [
|
||||
SystemProviderIds.minimax,
|
||||
SystemProviderIds.silicon,
|
||||
SystemProviderIds.qiniu,
|
||||
SystemProviderIds.dmxapi
|
||||
SystemProviderIds.dmxapi,
|
||||
SystemProviderIds.mimo
|
||||
] as const
|
||||
type AnthropicCompatibleProviderId = (typeof ANTHROPIC_COMPATIBLE_PROVIDER_IDS)[number]
|
||||
|
||||
@ -394,6 +396,14 @@ const ProviderSetting: FC<Props> = ({ providerId }) => {
|
||||
<Button type="text" size="small" icon={<SquareArrowOutUpRight size={14} />} />
|
||||
</Link>
|
||||
)}
|
||||
<Tooltip title={t('settings.provider.streaming.label')}>
|
||||
<Button
|
||||
type="text"
|
||||
icon={<Timer size={14} />}
|
||||
size="small"
|
||||
onClick={() => StreamingSettingsPopup.show({ providerId: provider.id })}
|
||||
/>
|
||||
</Tooltip>
|
||||
{!isSystemProvider(provider) && (
|
||||
<Tooltip title={t('settings.provider.api.options.label')}>
|
||||
<Button
|
||||
@ -417,6 +427,19 @@ const ProviderSetting: FC<Props> = ({ providerId }) => {
|
||||
/>
|
||||
</SettingTitle>
|
||||
<Divider style={{ width: '100%', margin: '10px 0' }} />
|
||||
<SettingSubtitle style={{ display: 'flex', alignItems: 'center', justifyContent: 'space-between', marginTop: 5 }}>
|
||||
<div className="flex items-center gap-1">{t('settings.provider.streaming.title')}</div>
|
||||
<Tooltip title={t('settings.provider.streaming.label')} mouseEnterDelay={0.3}>
|
||||
<Button
|
||||
type="text"
|
||||
onClick={() => StreamingSettingsPopup.show({ providerId: provider.id })}
|
||||
icon={<Timer size={16} />}
|
||||
/>
|
||||
</Tooltip>
|
||||
</SettingSubtitle>
|
||||
<SettingHelpTextRow style={{ paddingTop: 0 }}>
|
||||
<SettingHelpText>{t('settings.provider.streaming.description')}</SettingHelpText>
|
||||
</SettingHelpTextRow>
|
||||
{isProviderSupportAuth(provider) && <ProviderOAuth providerId={provider.id} />}
|
||||
{provider.id === 'openai' && <OpenAIAlert />}
|
||||
{provider.id === 'ovms' && <OVMSSettings />}
|
||||
|
||||
@ -0,0 +1,116 @@
|
||||
import { DEFAULT_MAX_TOOL_STEPS, MAX_MAX_TOOL_STEPS } from '@renderer/aiCore/utils/streamingTimeout'
|
||||
import { HStack } from '@renderer/components/Layout'
|
||||
import { InfoTooltip } from '@renderer/components/TooltipIcons'
|
||||
import { useProvider } from '@renderer/hooks/useProvider'
|
||||
import type { Provider } from '@renderer/types'
|
||||
import { Button, Flex, InputNumber } from 'antd'
|
||||
import { startTransition, useCallback } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
|
||||
import { SettingHelpText, SettingHelpTextRow, SettingSubtitle } from '../..'
|
||||
|
||||
type Props = {
|
||||
providerId: string
|
||||
}
|
||||
|
||||
const StreamingSettings = ({ providerId }: Props) => {
|
||||
const { t } = useTranslation()
|
||||
const { provider, updateProvider } = useProvider(providerId)
|
||||
|
||||
const updateProviderTransition = useCallback(
|
||||
(updates: Partial<Provider>) => {
|
||||
startTransition(() => {
|
||||
updateProvider(updates)
|
||||
})
|
||||
},
|
||||
[updateProvider]
|
||||
)
|
||||
|
||||
const requestTimeoutMinutes = provider.requestTimeoutMinutes ?? 0
|
||||
const sseIdleTimeoutMinutes = provider.sseIdleTimeoutMinutes ?? 0
|
||||
const maxToolSteps = provider.maxToolSteps ?? DEFAULT_MAX_TOOL_STEPS
|
||||
|
||||
return (
|
||||
<Flex vertical gap="middle">
|
||||
<SettingSubtitle style={{ marginTop: 0 }}>{t('settings.provider.streaming.title')}</SettingSubtitle>
|
||||
<SettingHelpTextRow style={{ paddingTop: 0 }}>
|
||||
<SettingHelpText>{t('settings.provider.streaming.description')}</SettingHelpText>
|
||||
</SettingHelpTextRow>
|
||||
|
||||
<HStack justifyContent="space-between" alignItems="center">
|
||||
<HStack alignItems="center" gap={6}>
|
||||
<label style={{ cursor: 'pointer' }} htmlFor="provider-request-timeout-minutes">
|
||||
{t('settings.provider.streaming.request_timeout.label')}
|
||||
</label>
|
||||
<InfoTooltip title={t('settings.provider.streaming.request_timeout.help')}></InfoTooltip>
|
||||
</HStack>
|
||||
<InputNumber
|
||||
id="provider-request-timeout-minutes"
|
||||
min={0}
|
||||
max={720}
|
||||
step={1}
|
||||
value={requestTimeoutMinutes}
|
||||
onChange={(value) => {
|
||||
updateProviderTransition({ requestTimeoutMinutes: value ?? 0 })
|
||||
}}
|
||||
style={{ width: 160 }}
|
||||
/>
|
||||
</HStack>
|
||||
|
||||
<HStack justifyContent="space-between" alignItems="center">
|
||||
<HStack alignItems="center" gap={6}>
|
||||
<label style={{ cursor: 'pointer' }} htmlFor="provider-sse-idle-timeout-minutes">
|
||||
{t('settings.provider.streaming.sse_idle_timeout.label')}
|
||||
</label>
|
||||
<InfoTooltip title={t('settings.provider.streaming.sse_idle_timeout.help')}></InfoTooltip>
|
||||
</HStack>
|
||||
<InputNumber
|
||||
id="provider-sse-idle-timeout-minutes"
|
||||
min={0}
|
||||
max={720}
|
||||
step={1}
|
||||
value={sseIdleTimeoutMinutes}
|
||||
onChange={(value) => {
|
||||
updateProviderTransition({ sseIdleTimeoutMinutes: value ?? 0 })
|
||||
}}
|
||||
style={{ width: 160 }}
|
||||
/>
|
||||
</HStack>
|
||||
|
||||
<HStack justifyContent="space-between" alignItems="center">
|
||||
<HStack alignItems="center" gap={6}>
|
||||
<label style={{ cursor: 'pointer' }} htmlFor="provider-max-tool-steps">
|
||||
{t('settings.provider.streaming.max_tool_steps.label')}
|
||||
</label>
|
||||
<InfoTooltip title={t('settings.provider.streaming.max_tool_steps.help')}></InfoTooltip>
|
||||
</HStack>
|
||||
<InputNumber
|
||||
id="provider-max-tool-steps"
|
||||
min={1}
|
||||
max={MAX_MAX_TOOL_STEPS}
|
||||
step={1}
|
||||
value={maxToolSteps}
|
||||
onChange={(value) => {
|
||||
updateProviderTransition({ maxToolSteps: value ?? DEFAULT_MAX_TOOL_STEPS })
|
||||
}}
|
||||
style={{ width: 160 }}
|
||||
/>
|
||||
</HStack>
|
||||
|
||||
<HStack justifyContent="flex-end">
|
||||
<Button
|
||||
onClick={() => {
|
||||
updateProviderTransition({
|
||||
requestTimeoutMinutes: 0,
|
||||
sseIdleTimeoutMinutes: 0,
|
||||
maxToolSteps: DEFAULT_MAX_TOOL_STEPS
|
||||
})
|
||||
}}>
|
||||
{t('settings.provider.streaming.reset')}
|
||||
</Button>
|
||||
</HStack>
|
||||
</Flex>
|
||||
)
|
||||
}
|
||||
|
||||
export default StreamingSettings
|
||||
@ -0,0 +1,66 @@
|
||||
import { TopView } from '@renderer/components/TopView'
|
||||
import { Modal } from 'antd'
|
||||
import { useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
|
||||
import StreamingSettings from './StreamingSettings'
|
||||
|
||||
interface ShowParams {
|
||||
providerId: string
|
||||
}
|
||||
|
||||
interface Props extends ShowParams {
|
||||
resolve: (data: any) => void
|
||||
}
|
||||
|
||||
const PopupContainer: React.FC<Props> = ({ providerId, resolve }) => {
|
||||
const { t } = useTranslation()
|
||||
const [open, setOpen] = useState(true)
|
||||
|
||||
const onCancel = () => {
|
||||
setOpen(false)
|
||||
}
|
||||
|
||||
const onClose = () => {
|
||||
resolve({})
|
||||
}
|
||||
|
||||
StreamingSettingsPopup.hide = onCancel
|
||||
|
||||
return (
|
||||
<Modal
|
||||
title={t('settings.provider.streaming.title')}
|
||||
open={open}
|
||||
onCancel={onCancel}
|
||||
afterClose={onClose}
|
||||
transitionName="animation-move-down"
|
||||
styles={{ body: { padding: '20px 16px' } }}
|
||||
footer={null}
|
||||
centered>
|
||||
<StreamingSettings providerId={providerId} />
|
||||
</Modal>
|
||||
)
|
||||
}
|
||||
|
||||
const TopViewKey = 'StreamingSettingsPopup'
|
||||
|
||||
export default class StreamingSettingsPopup {
|
||||
static topviewId = 0
|
||||
static hide() {
|
||||
TopView.hide(TopViewKey)
|
||||
}
|
||||
static show(props: ShowParams) {
|
||||
return new Promise<any>((resolve) => {
|
||||
TopView.show(
|
||||
<PopupContainer
|
||||
{...props}
|
||||
resolve={(v) => {
|
||||
resolve(v)
|
||||
TopView.hide(TopViewKey)
|
||||
}}
|
||||
/>,
|
||||
TopViewKey
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -187,7 +187,8 @@ export async function fetchChatCompletion({
|
||||
params: aiSdkParams,
|
||||
modelId,
|
||||
capabilities,
|
||||
webSearchPluginConfig
|
||||
webSearchPluginConfig,
|
||||
streamingConfig
|
||||
} = await buildStreamTextParams(messages, assistant, provider, {
|
||||
mcpTools: mcpTools,
|
||||
webSearchProviderId: assistant.webSearchProviderId,
|
||||
@ -221,7 +222,8 @@ export async function fetchChatCompletion({
|
||||
assistant,
|
||||
topicId,
|
||||
callType: 'chat',
|
||||
uiMessages
|
||||
uiMessages,
|
||||
streamingConfig
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@ -38,7 +38,8 @@ export const DEFAULT_ASSISTANT_SETTINGS = {
|
||||
enableTopP: false,
|
||||
// It would gracefully fallback to prompt if not supported by model.
|
||||
toolUseMode: 'function',
|
||||
customParameters: []
|
||||
customParameters: [],
|
||||
reasoning_effort: 'default'
|
||||
} as const satisfies AssistantSettings
|
||||
|
||||
export function getDefaultAssistant(): Assistant {
|
||||
@ -186,7 +187,7 @@ export const getAssistantSettings = (assistant: Assistant): AssistantSettings =>
|
||||
streamOutput: assistant?.settings?.streamOutput ?? true,
|
||||
toolUseMode: assistant?.settings?.toolUseMode ?? 'function',
|
||||
defaultModel: assistant?.defaultModel ?? undefined,
|
||||
reasoning_effort: assistant?.settings?.reasoning_effort ?? undefined,
|
||||
reasoning_effort: assistant?.settings?.reasoning_effort ?? 'default',
|
||||
customParameters: assistant?.settings?.customParameters ?? []
|
||||
}
|
||||
}
|
||||
|
||||
@ -67,7 +67,7 @@ const persistedReducer = persistReducer(
|
||||
{
|
||||
key: 'cherry-studio',
|
||||
storage,
|
||||
version: 186,
|
||||
version: 187,
|
||||
blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs', 'toolPermissions'],
|
||||
migrate
|
||||
},
|
||||
|
||||
@ -183,6 +183,16 @@ export const builtinMCPServers: BuiltinMCPServer[] = [
|
||||
provider: 'CherryAI',
|
||||
installSource: 'builtin',
|
||||
isTrusted: true
|
||||
},
|
||||
{
|
||||
id: nanoid(),
|
||||
name: BuiltinMCPServerNames.nowledgeMem,
|
||||
reference: 'https://mem.nowledge.co/',
|
||||
type: 'inMemory',
|
||||
isActive: false,
|
||||
provider: 'Nowledge',
|
||||
installSource: 'builtin',
|
||||
isTrusted: true
|
||||
}
|
||||
] as const
|
||||
|
||||
|
||||
@ -3038,6 +3038,21 @@ const migrateConfig = {
|
||||
logger.error('migrate 186 error', error as Error)
|
||||
return state
|
||||
}
|
||||
},
|
||||
'187': (state: RootState) => {
|
||||
try {
|
||||
state.assistants.assistants.forEach((assistant) => {
|
||||
if (assistant.settings && assistant.settings.reasoning_effort === undefined) {
|
||||
assistant.settings.reasoning_effort = 'default'
|
||||
}
|
||||
})
|
||||
addProvider(state, 'mimo')
|
||||
logger.info('migrate 187 success')
|
||||
return state
|
||||
} catch (error) {
|
||||
logger.error('migrate 187 error', error as Error)
|
||||
return state
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -102,13 +102,14 @@ const ThinkModelTypes = [
|
||||
'doubao',
|
||||
'doubao_no_auto',
|
||||
'doubao_after_251015',
|
||||
'mimo',
|
||||
'hunyuan',
|
||||
'zhipu',
|
||||
'perplexity',
|
||||
'deepseek_hybrid'
|
||||
] as const
|
||||
|
||||
export type ReasoningEffortOption = NonNullable<OpenAI.ReasoningEffort> | 'auto'
|
||||
export type ReasoningEffortOption = NonNullable<OpenAI.ReasoningEffort> | 'auto' | 'default'
|
||||
export type ThinkingOption = ReasoningEffortOption
|
||||
export type ThinkingModelType = (typeof ThinkModelTypes)[number]
|
||||
export type ThinkingOptionConfig = Record<ThinkingModelType, ThinkingOption[]>
|
||||
@ -120,6 +121,8 @@ export function isThinkModelType(type: string): type is ThinkingModelType {
|
||||
}
|
||||
|
||||
export const EFFORT_RATIO: EffortRatio = {
|
||||
// 'default' is not expected to be used.
|
||||
default: 0,
|
||||
none: 0.01,
|
||||
minimal: 0.05,
|
||||
low: 0.05,
|
||||
@ -140,12 +143,11 @@ export type AssistantSettings = {
|
||||
streamOutput: boolean
|
||||
defaultModel?: Model
|
||||
customParameters?: AssistantSettingCustomParameters[]
|
||||
reasoning_effort?: ReasoningEffortOption
|
||||
/** 保留上一次使用思考模型时的 reasoning effort, 在从非思考模型切换到思考模型时恢复.
|
||||
*
|
||||
* TODO: 目前 reasoning_effort === undefined 有两个语义,有的场景是显式关闭思考,有的场景是不传参。
|
||||
* 未来应该重构思考控制,将启用/关闭思考和思考选项分离,这样就不用依赖 cache 了。
|
||||
*
|
||||
reasoning_effort: ReasoningEffortOption
|
||||
/**
|
||||
* Preserve the effective reasoning effort (not 'default') from the last use of a thinking model which supports thinking control,
|
||||
* and restore it when switching back from a non-thinking or fixed reasoning model.
|
||||
* FIXME: It should be managed by external cache service instead of being stored in the assistant
|
||||
*/
|
||||
reasoning_effort_cache?: ReasoningEffortOption
|
||||
qwenThinkMode?: boolean
|
||||
@ -750,7 +752,8 @@ export const BuiltinMCPServerNames = {
|
||||
difyKnowledge: '@cherry/dify-knowledge',
|
||||
python: '@cherry/python',
|
||||
didiMCP: '@cherry/didi-mcp',
|
||||
browser: '@cherry/browser'
|
||||
browser: '@cherry/browser',
|
||||
nowledgeMem: '@cherry/nowledge-mem'
|
||||
} as const
|
||||
|
||||
export type BuiltinMCPServerName = (typeof BuiltinMCPServerNames)[keyof typeof BuiltinMCPServerNames]
|
||||
|
||||
@ -114,6 +114,25 @@ export type Provider = {
|
||||
serviceTier?: ServiceTier
|
||||
verbosity?: OpenAIVerbosity
|
||||
|
||||
/**
|
||||
* Client-side hard timeout for a single model request.
|
||||
* - `0`/`undefined`: no additional client-enforced timeout (recommended when upstream supports long-running streams).
|
||||
* - `> 0`: abort the request after N minutes.
|
||||
*/
|
||||
requestTimeoutMinutes?: number
|
||||
/**
|
||||
* Client-side idle timeout for SSE streaming.
|
||||
* - `0`/`undefined`: disabled.
|
||||
* - `> 0`: abort the request if no stream events are received for N minutes.
|
||||
*/
|
||||
sseIdleTimeoutMinutes?: number
|
||||
/**
|
||||
* Max tool/agent steps for AI SDK multi-step tool calling loop.
|
||||
* - `undefined`: uses the app default (currently 20).
|
||||
* - `> 0`: stop after N steps to avoid infinite loops.
|
||||
*/
|
||||
maxToolSteps?: number
|
||||
|
||||
/** @deprecated */
|
||||
isNotSupportArrayContent?: boolean
|
||||
/** @deprecated */
|
||||
@ -189,7 +208,8 @@ export const SystemProviderIdSchema = z.enum([
|
||||
'huggingface',
|
||||
'sophnet',
|
||||
'gateway',
|
||||
'cerebras'
|
||||
'cerebras',
|
||||
'mimo'
|
||||
])
|
||||
|
||||
export type SystemProviderId = z.infer<typeof SystemProviderIdSchema>
|
||||
@ -258,7 +278,8 @@ export const SystemProviderIds = {
|
||||
longcat: 'longcat',
|
||||
huggingface: 'huggingface',
|
||||
gateway: 'gateway',
|
||||
cerebras: 'cerebras'
|
||||
cerebras: 'cerebras',
|
||||
mimo: 'mimo'
|
||||
} as const satisfies Record<SystemProviderId, SystemProviderId>
|
||||
|
||||
type SystemProviderIdTypeMap = typeof SystemProviderIds
|
||||
|
||||
@ -1,8 +1,15 @@
|
||||
import '@testing-library/jest-dom/vitest'
|
||||
|
||||
import { createRequire } from 'node:module'
|
||||
import { styleSheetSerializer } from 'jest-styled-components/serializer'
|
||||
import { expect, vi } from 'vitest'
|
||||
|
||||
const require = createRequire(import.meta.url)
|
||||
const bufferModule = require('buffer')
|
||||
if (!bufferModule.SlowBuffer) {
|
||||
bufferModule.SlowBuffer = bufferModule.Buffer
|
||||
}
|
||||
|
||||
expect.addSnapshotSerializer(styleSheetSerializer)
|
||||
|
||||
// Mock LoggerService globally for renderer tests
|
||||
@ -48,3 +55,29 @@ vi.stubGlobal('api', {
|
||||
writeWithId: vi.fn().mockResolvedValue(undefined)
|
||||
}
|
||||
})
|
||||
|
||||
if (typeof globalThis.localStorage === 'undefined' || typeof (globalThis.localStorage as any).getItem !== 'function') {
|
||||
let store = new Map<string, string>()
|
||||
|
||||
const localStorageMock = {
|
||||
getItem: (key: string) => store.get(key) ?? null,
|
||||
setItem: (key: string, value: string) => {
|
||||
store.set(key, String(value))
|
||||
},
|
||||
removeItem: (key: string) => {
|
||||
store.delete(key)
|
||||
},
|
||||
clear: () => {
|
||||
store.clear()
|
||||
},
|
||||
key: (index: number) => Array.from(store.keys())[index] ?? null,
|
||||
get length() {
|
||||
return store.size
|
||||
}
|
||||
}
|
||||
|
||||
vi.stubGlobal('localStorage', localStorageMock)
|
||||
if (typeof window !== 'undefined') {
|
||||
Object.defineProperty(window, 'localStorage', { value: localStorageMock })
|
||||
}
|
||||
}
|
||||
|
||||
20
yarn.lock
20
yarn.lock
@ -11246,7 +11246,7 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"buffer-equal-constant-time@npm:1.0.1":
|
||||
"buffer-equal-constant-time@npm:^1.0.1":
|
||||
version: 1.0.1
|
||||
resolution: "buffer-equal-constant-time@npm:1.0.1"
|
||||
checksum: 10c0/fb2294e64d23c573d0dd1f1e7a466c3e978fe94a4e0f8183937912ca374619773bef8e2aceb854129d2efecbbc515bbd0cc78d2734a3e3031edb0888531bbc8e
|
||||
@ -17178,24 +17178,24 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"jwa@npm:^2.0.0":
|
||||
version: 2.0.0
|
||||
resolution: "jwa@npm:2.0.0"
|
||||
"jwa@npm:^2.0.1":
|
||||
version: 2.0.1
|
||||
resolution: "jwa@npm:2.0.1"
|
||||
dependencies:
|
||||
buffer-equal-constant-time: "npm:1.0.1"
|
||||
buffer-equal-constant-time: "npm:^1.0.1"
|
||||
ecdsa-sig-formatter: "npm:1.0.11"
|
||||
safe-buffer: "npm:^5.0.1"
|
||||
checksum: 10c0/6baab823b93c038ba1d2a9e531984dcadbc04e9eb98d171f4901b7a40d2be15961a359335de1671d78cb6d987f07cbe5d350d8143255977a889160c4d90fcc3c
|
||||
checksum: 10c0/ab3ebc6598e10dc11419d4ed675c9ca714a387481466b10e8a6f3f65d8d9c9237e2826f2505280a739cf4cbcf511cb288eeec22b5c9c63286fc5a2e4f97e78cf
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"jws@npm:^4.0.0":
|
||||
version: 4.0.0
|
||||
resolution: "jws@npm:4.0.0"
|
||||
version: 4.0.1
|
||||
resolution: "jws@npm:4.0.1"
|
||||
dependencies:
|
||||
jwa: "npm:^2.0.0"
|
||||
jwa: "npm:^2.0.1"
|
||||
safe-buffer: "npm:^5.0.1"
|
||||
checksum: 10c0/f1ca77ea5451e8dc5ee219cb7053b8a4f1254a79cb22417a2e1043c1eb8a569ae118c68f24d72a589e8a3dd1824697f47d6bd4fb4bebb93a3bdf53545e721661
|
||||
checksum: 10c0/6be1ed93023aef570ccc5ea8d162b065840f3ef12f0d1bb3114cade844de7a357d5dc558201d9a65101e70885a6fa56b17462f520e6b0d426195510618a154d0
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
|
||||
Loading…
Reference in New Issue
Block a user