mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-01 01:30:51 +08:00
Merge branch 'feat/agents-new' of github.com:CherryHQ/cherry-studio into feat/agents-new
This commit is contained in:
commit
c19659daa5
22
.github/workflows/delete-branch.yml
vendored
Normal file
22
.github/workflows/delete-branch.yml
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
name: Delete merged branch
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- closed
|
||||
|
||||
jobs:
|
||||
delete-branch:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
if: github.event.pull_request.merged == true && github.event.pull_request.head.repo.full_name == github.repository
|
||||
steps:
|
||||
- name: Delete merged branch
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
github.rest.git.deleteRef({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
ref: `heads/${context.payload.pull_request.head.ref}`,
|
||||
})
|
||||
2
.github/workflows/pr-ci.yml
vendored
2
.github/workflows/pr-ci.yml
vendored
@ -10,12 +10,14 @@ on:
|
||||
- main
|
||||
- develop
|
||||
- v2
|
||||
types: [ready_for_review, synchronize, opened]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
PRCI: true
|
||||
if: github.event.pull_request.draft == false
|
||||
|
||||
steps:
|
||||
- name: Check out Git repository
|
||||
|
||||
36
.yarn/patches/@ai-sdk-google-npm-2.0.14-376d8b03cc.patch
vendored
Normal file
36
.yarn/patches/@ai-sdk-google-npm-2.0.14-376d8b03cc.patch
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||
index 110f37ec18c98b1d55ae2b73cc716194e6f9094d..91d0f336b318833c6cee9599fe91370c0ff75323 100644
|
||||
--- a/dist/index.mjs
|
||||
+++ b/dist/index.mjs
|
||||
@@ -447,7 +447,10 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
||||
}
|
||||
|
||||
// src/get-model-path.ts
|
||||
-function getModelPath(modelId) {
|
||||
+function getModelPath(modelId, baseURL) {
|
||||
+ if (baseURL?.includes('cherryin')) {
|
||||
+ return `models/${modelId}`;
|
||||
+ }
|
||||
return modelId.includes("/") ? modelId : `models/${modelId}`;
|
||||
}
|
||||
|
||||
@@ -856,7 +859,8 @@ var GoogleGenerativeAILanguageModel = class {
|
||||
rawValue: rawResponse
|
||||
} = await postJsonToApi2({
|
||||
url: `${this.config.baseURL}/${getModelPath(
|
||||
- this.modelId
|
||||
+ this.modelId,
|
||||
+ this.config.baseURL
|
||||
)}:generateContent`,
|
||||
headers: mergedHeaders,
|
||||
body: args,
|
||||
@@ -962,7 +966,8 @@ var GoogleGenerativeAILanguageModel = class {
|
||||
);
|
||||
const { responseHeaders, value: response } = await postJsonToApi2({
|
||||
url: `${this.config.baseURL}/${getModelPath(
|
||||
- this.modelId
|
||||
+ this.modelId,
|
||||
+ this.config.baseURL
|
||||
)}:streamGenerateContent?alt=sse`,
|
||||
headers,
|
||||
body: args,
|
||||
@ -125,16 +125,59 @@ afterSign: scripts/notarize.js
|
||||
artifactBuildCompleted: scripts/artifact-build-completed.js
|
||||
releaseInfo:
|
||||
releaseNotes: |
|
||||
✨ 新功能:
|
||||
- 新增 CherryIN 服务商
|
||||
- 新增 AiOnly AI 服务商
|
||||
- 更新 MCP 服务器卡片布局和样式,改为列表视图
|
||||
<!--LANG:en-->
|
||||
🚀 New Features:
|
||||
- Refactored AI core engine for more efficient and stable content generation
|
||||
- Added support for multiple AI model providers: CherryIN, AiOnly
|
||||
- Added API server functionality for external application integration
|
||||
- Added PaddleOCR document recognition for enhanced document processing
|
||||
- Added Anthropic OAuth authentication support
|
||||
- Added data storage space limit notifications
|
||||
- Added font settings for global and code fonts customization
|
||||
- Added auto-copy feature after translation completion
|
||||
- Added keyboard shortcuts: rename topic, edit last message, etc.
|
||||
- Added text attachment preview for viewing file contents in messages
|
||||
- Added custom window control buttons (minimize, maximize, close)
|
||||
- Support for Qwen long-text (qwen-long) and document analysis (qwen-doc) models with native file uploads
|
||||
- Support for Qwen image recognition models (Qwen-Image)
|
||||
- Added iFlow CLI support
|
||||
- Converted knowledge base and web search to tool-calling approach for better flexibility
|
||||
|
||||
🐛 问题修复:
|
||||
- 修复 QwenMT 模型的翻译内容处理逻辑
|
||||
- 修复无法将外部笔记添加到知识库的问题
|
||||
🎨 UI Improvements & Bug Fixes:
|
||||
- Integrated HeroUI and Tailwind CSS framework
|
||||
- Optimized message notification styles with unified toast component
|
||||
- Moved free models to bottom with fixed position for easier access
|
||||
- Refactored quick panel and input bar tools for smoother operation
|
||||
- Optimized responsive design for navbar and sidebar
|
||||
- Improved scrollbar component with horizontal scrolling support
|
||||
- Fixed multiple translation issues: paste handling, file processing, state management
|
||||
- Various UI optimizations and bug fixes
|
||||
<!--LANG:zh-CN-->
|
||||
🚀 新功能:
|
||||
- 重构 AI 核心引擎,提供更高效稳定的内容生成
|
||||
- 新增多个 AI 模型提供商支持:CherryIN、AiOnly
|
||||
- 新增 API 服务器功能,支持外部应用集成
|
||||
- 新增 PaddleOCR 文档识别,增强文档处理能力
|
||||
- 新增 Anthropic OAuth 认证支持
|
||||
- 新增数据存储空间限制提醒
|
||||
- 新增字体设置,支持全局字体和代码字体自定义
|
||||
- 新增翻译完成后自动复制功能
|
||||
- 新增键盘快捷键:重命名主题、编辑最后一条消息等
|
||||
- 新增文本附件预览,可查看消息中的文件内容
|
||||
- 新增自定义窗口控制按钮(最小化、最大化、关闭)
|
||||
- 支持通义千问长文本(qwen-long)和文档分析(qwen-doc)模型,原生文件上传
|
||||
- 支持通义千问图像识别模型(Qwen-Image)
|
||||
- 新增 iFlow CLI 支持
|
||||
- 知识库和网页搜索转换为工具调用方式,提升灵活性
|
||||
|
||||
🎨 界面改进与问题修复:
|
||||
- 集成 HeroUI 和 Tailwind CSS 框架
|
||||
- 优化消息通知样式,统一 toast 组件
|
||||
- 免费模型移至底部固定位置,便于访问
|
||||
- 重构快捷面板和输入栏工具,操作更流畅
|
||||
- 优化导航栏和侧边栏响应式设计
|
||||
- 改进滚动条组件,支持水平滚动
|
||||
- 修复多个翻译问题:粘贴处理、文件处理、状态管理
|
||||
- 各种界面优化和问题修复
|
||||
<!--LANG:END-->
|
||||
|
||||
🚀 性能优化:
|
||||
- 提升输入框响应速度
|
||||
- 优化模型切换性能
|
||||
- 改进翻译功能的引用和邮件格式处理
|
||||
|
||||
@ -380,7 +380,8 @@
|
||||
"pkce-challenge@npm:^4.1.0": "patch:pkce-challenge@npm%3A4.1.0#~/.yarn/patches/pkce-challenge-npm-4.1.0-fbc51695a3.patch",
|
||||
"undici": "6.21.2",
|
||||
"vite": "npm:rolldown-vite@latest",
|
||||
"tesseract.js@npm:*": "patch:tesseract.js@npm%3A6.0.1#~/.yarn/patches/tesseract.js-npm-6.0.1-2562a7e46d.patch"
|
||||
"tesseract.js@npm:*": "patch:tesseract.js@npm%3A6.0.1#~/.yarn/patches/tesseract.js-npm-6.0.1-2562a7e46d.patch",
|
||||
"@ai-sdk/google@npm:2.0.14": "patch:@ai-sdk/google@npm%3A2.0.14#~/.yarn/patches/@ai-sdk-google-npm-2.0.14-376d8b03cc.patch"
|
||||
},
|
||||
"packageManager": "yarn@4.9.1",
|
||||
"lint-staged": {
|
||||
|
||||
@ -39,7 +39,7 @@
|
||||
"@ai-sdk/anthropic": "^2.0.17",
|
||||
"@ai-sdk/azure": "^2.0.30",
|
||||
"@ai-sdk/deepseek": "^1.0.17",
|
||||
"@ai-sdk/google": "^2.0.14",
|
||||
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.14#~/.yarn/patches/@ai-sdk-google-npm-2.0.14-376d8b03cc.patch",
|
||||
"@ai-sdk/openai": "^2.0.30",
|
||||
"@ai-sdk/openai-compatible": "^1.0.17",
|
||||
"@ai-sdk/provider": "^2.0.0",
|
||||
|
||||
@ -368,16 +368,27 @@ export const WINDOWS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
||||
}
|
||||
]
|
||||
|
||||
// Helper function to escape strings for AppleScript
|
||||
const escapeForAppleScript = (str: string): string => {
|
||||
// In AppleScript strings, backslashes and double quotes need to be escaped
|
||||
// When passed through osascript -e with single quotes, we need:
|
||||
// 1. Backslash: \ -> \\
|
||||
// 2. Double quote: " -> \"
|
||||
return str
|
||||
.replace(/\\/g, '\\\\') // Escape backslashes first
|
||||
.replace(/"/g, '\\"') // Then escape double quotes
|
||||
}
|
||||
|
||||
export const MACOS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
||||
{
|
||||
id: terminalApps.systemDefault,
|
||||
name: 'Terminal',
|
||||
bundleId: 'com.apple.Terminal',
|
||||
command: (directory: string, fullCommand: string) => ({
|
||||
command: (_directory: string, fullCommand: string) => ({
|
||||
command: 'sh',
|
||||
args: [
|
||||
'-c',
|
||||
`open -na Terminal && sleep 0.5 && osascript -e 'tell application "Terminal" to activate' -e 'tell application "Terminal" to do script "cd '${directory.replace(/\\/g, '\\\\').replace(/'/g, "\\'")}' && clear && ${fullCommand.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}" in front window'`
|
||||
`open -na Terminal && sleep 0.5 && osascript -e 'tell application "Terminal" to activate' -e 'tell application "Terminal" to do script "${escapeForAppleScript(fullCommand)}" in front window'`
|
||||
]
|
||||
})
|
||||
},
|
||||
@ -385,11 +396,11 @@ export const MACOS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
||||
id: terminalApps.iterm2,
|
||||
name: 'iTerm2',
|
||||
bundleId: 'com.googlecode.iterm2',
|
||||
command: (directory: string, fullCommand: string) => ({
|
||||
command: (_directory: string, fullCommand: string) => ({
|
||||
command: 'sh',
|
||||
args: [
|
||||
'-c',
|
||||
`open -na iTerm && sleep 0.8 && osascript -e 'on waitUntilRunning()\n repeat 50 times\n tell application "System Events"\n if (exists process "iTerm2") then exit repeat\n end tell\n delay 0.1\n end repeat\nend waitUntilRunning\n\nwaitUntilRunning()\n\ntell application "iTerm2"\n if (count of windows) = 0 then\n create window with default profile\n delay 0.3\n else\n tell current window\n create tab with default profile\n end tell\n delay 0.3\n end if\n tell current session of current window to write text "cd '${directory.replace(/\\/g, '\\\\').replace(/'/g, "\\'")}' && clear && ${fullCommand.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}"\n activate\nend tell'`
|
||||
`open -na iTerm && sleep 0.8 && osascript -e 'on waitUntilRunning()\n repeat 50 times\n tell application "System Events"\n if (exists process "iTerm2") then exit repeat\n end tell\n delay 0.1\n end repeat\nend waitUntilRunning\n\nwaitUntilRunning()\n\ntell application "iTerm2"\n if (count of windows) = 0 then\n create window with default profile\n delay 0.3\n else\n tell current window\n create tab with default profile\n end tell\n delay 0.3\n end if\n tell current session of current window to write text "${escapeForAppleScript(fullCommand)}"\n activate\nend tell'`
|
||||
]
|
||||
})
|
||||
},
|
||||
@ -397,11 +408,11 @@ export const MACOS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
||||
id: terminalApps.kitty,
|
||||
name: 'kitty',
|
||||
bundleId: 'net.kovidgoyal.kitty',
|
||||
command: (directory: string, fullCommand: string) => ({
|
||||
command: (_directory: string, fullCommand: string) => ({
|
||||
command: 'sh',
|
||||
args: [
|
||||
'-c',
|
||||
`cd "${directory}" && open -na kitty --args --directory="${directory}" sh -c "${fullCommand.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}; exec \\$SHELL" && sleep 0.5 && osascript -e 'tell application "kitty" to activate'`
|
||||
`cd "${_directory}" && open -na kitty --args --directory="${_directory}" sh -c "${fullCommand.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}; exec \\$SHELL" && sleep 0.5 && osascript -e 'tell application "kitty" to activate'`
|
||||
]
|
||||
})
|
||||
},
|
||||
@ -409,11 +420,11 @@ export const MACOS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
||||
id: terminalApps.alacritty,
|
||||
name: 'Alacritty',
|
||||
bundleId: 'org.alacritty',
|
||||
command: (directory: string, fullCommand: string) => ({
|
||||
command: (_directory: string, fullCommand: string) => ({
|
||||
command: 'sh',
|
||||
args: [
|
||||
'-c',
|
||||
`open -na Alacritty --args --working-directory "${directory}" -e sh -c "${fullCommand.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}; exec \\$SHELL" && sleep 0.5 && osascript -e 'tell application "Alacritty" to activate'`
|
||||
`open -na Alacritty --args --working-directory "${_directory}" -e sh -c "${fullCommand.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}; exec \\$SHELL" && sleep 0.5 && osascript -e 'tell application "Alacritty" to activate'`
|
||||
]
|
||||
})
|
||||
},
|
||||
@ -421,11 +432,11 @@ export const MACOS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
||||
id: terminalApps.wezterm,
|
||||
name: 'WezTerm',
|
||||
bundleId: 'com.github.wez.wezterm',
|
||||
command: (directory: string, fullCommand: string) => ({
|
||||
command: (_directory: string, fullCommand: string) => ({
|
||||
command: 'sh',
|
||||
args: [
|
||||
'-c',
|
||||
`open -na WezTerm --args start --new-tab --cwd "${directory}" -- sh -c "${fullCommand.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}; exec \\$SHELL" && sleep 0.5 && osascript -e 'tell application "WezTerm" to activate'`
|
||||
`open -na WezTerm --args start --new-tab --cwd "${_directory}" -- sh -c "${fullCommand.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}; exec \\$SHELL" && sleep 0.5 && osascript -e 'tell application "WezTerm" to activate'`
|
||||
]
|
||||
})
|
||||
},
|
||||
@ -433,11 +444,11 @@ export const MACOS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
||||
id: terminalApps.ghostty,
|
||||
name: 'Ghostty',
|
||||
bundleId: 'com.mitchellh.ghostty',
|
||||
command: (directory: string, fullCommand: string) => ({
|
||||
command: (_directory: string, fullCommand: string) => ({
|
||||
command: 'sh',
|
||||
args: [
|
||||
'-c',
|
||||
`cd "${directory}" && open -na Ghostty --args --working-directory="${directory}" -e sh -c "${fullCommand.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}; exec \\$SHELL" && sleep 0.5 && osascript -e 'tell application "Ghostty" to activate'`
|
||||
`cd "${_directory}" && open -na Ghostty --args --working-directory="${_directory}" -e sh -c "${fullCommand.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}; exec \\$SHELL" && sleep 0.5 && osascript -e 'tell application "Ghostty" to activate'`
|
||||
]
|
||||
})
|
||||
},
|
||||
@ -445,7 +456,7 @@ export const MACOS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
||||
id: terminalApps.tabby,
|
||||
name: 'Tabby',
|
||||
bundleId: 'org.tabby',
|
||||
command: (directory: string, fullCommand: string) => ({
|
||||
command: (_directory: string, fullCommand: string) => ({
|
||||
command: 'sh',
|
||||
args: [
|
||||
'-c',
|
||||
@ -453,7 +464,7 @@ export const MACOS_TERMINALS_WITH_COMMANDS: TerminalConfigWithCommand[] = [
|
||||
open -na Tabby --args open && sleep 0.3
|
||||
else
|
||||
open -na Tabby --args open && sleep 2
|
||||
fi && osascript -e 'tell application "Tabby" to activate' -e 'set the clipboard to "cd \\"${directory.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}\\" && clear && ${fullCommand.replace(/\\/g, '\\\\').replace(/"/g, '\\"')}"' -e 'tell application "System Events" to tell process "Tabby" to keystroke "v" using {command down}' -e 'tell application "System Events" to key code 36'`
|
||||
fi && osascript -e 'tell application "Tabby" to activate' -e 'set the clipboard to "${escapeForAppleScript(fullCommand)}"' -e 'tell application "System Events" to tell process "Tabby" to keystroke "v" using {command down}' -e 'tell application "System Events" to key code 36'`
|
||||
]
|
||||
})
|
||||
}
|
||||
|
||||
@ -14,6 +14,13 @@ import { modelsRoutes } from './routes/models'
|
||||
|
||||
const logger = loggerService.withContext('ApiServer')
|
||||
|
||||
const LONG_POLL_TIMEOUT_MS = 120 * 60_000 // 120 minutes
|
||||
const extendMessagesTimeout: express.RequestHandler = (req, res, next) => {
|
||||
req.setTimeout(LONG_POLL_TIMEOUT_MS)
|
||||
res.setTimeout(LONG_POLL_TIMEOUT_MS)
|
||||
next()
|
||||
}
|
||||
|
||||
const app = express()
|
||||
app.use(
|
||||
express.json({
|
||||
@ -26,7 +33,12 @@ app.use((req, res, next) => {
|
||||
const start = Date.now()
|
||||
res.on('finish', () => {
|
||||
const duration = Date.now() - start
|
||||
logger.info(`${req.method} ${req.path} - ${res.statusCode} - ${duration}ms`)
|
||||
logger.info('API request completed', {
|
||||
method: req.method,
|
||||
path: req.path,
|
||||
statusCode: res.statusCode,
|
||||
durationMs: duration
|
||||
})
|
||||
})
|
||||
next()
|
||||
})
|
||||
@ -113,12 +125,11 @@ app.get('/', (_req, res) => {
|
||||
})
|
||||
})
|
||||
|
||||
// Provider-specific API routes with auth (must be before /v1 to avoid conflicts)
|
||||
const providerRouter = express.Router({ mergeParams: true })
|
||||
providerRouter.use(authMiddleware)
|
||||
// Mount provider-specific messages route
|
||||
providerRouter.use('/v1/messages', messagesProviderRoutes)
|
||||
app.use('/:provider', providerRouter)
|
||||
// Setup OpenAPI documentation before protected routes so docs remain public
|
||||
setupOpenAPIDocumentation(app)
|
||||
|
||||
// Provider-specific messages route requires authentication
|
||||
app.use('/:provider/v1/messages', authMiddleware, extendMessagesTimeout, messagesProviderRoutes)
|
||||
|
||||
// API v1 routes with auth
|
||||
const apiRouter = express.Router()
|
||||
@ -126,14 +137,11 @@ apiRouter.use(authMiddleware)
|
||||
// Mount routes
|
||||
apiRouter.use('/chat', chatRoutes)
|
||||
apiRouter.use('/mcps', mcpRoutes)
|
||||
apiRouter.use('/messages', messagesRoutes)
|
||||
apiRouter.use('/messages', extendMessagesTimeout, messagesRoutes)
|
||||
apiRouter.use('/models', modelsRoutes)
|
||||
apiRouter.use('/agents', agentsRoutes)
|
||||
app.use('/v1', apiRouter)
|
||||
|
||||
// Setup OpenAPI documentation
|
||||
setupOpenAPIDocumentation(app)
|
||||
|
||||
// Error handling (must be last)
|
||||
app.use(errorHandler)
|
||||
|
||||
|
||||
@ -36,7 +36,7 @@ class ConfigManager {
|
||||
}
|
||||
return this._config
|
||||
} catch (error: any) {
|
||||
logger.warn('Failed to load config from Redux, using defaults:', error)
|
||||
logger.warn('Failed to load config from Redux, using defaults', { error })
|
||||
this._config = {
|
||||
enabled: false,
|
||||
port: defaultPort,
|
||||
|
||||
@ -6,7 +6,7 @@ const logger = loggerService.withContext('ApiServerErrorHandler')
|
||||
|
||||
// oxlint-disable-next-line @typescript-eslint/no-unused-vars
|
||||
export const errorHandler = (err: Error, _req: Request, res: Response, _next: NextFunction) => {
|
||||
logger.error('API Server Error:', err)
|
||||
logger.error('API server error', { error: err })
|
||||
|
||||
// Don't expose internal errors in production
|
||||
const isDev = process.env.NODE_ENV === 'development'
|
||||
|
||||
@ -197,10 +197,11 @@ export function setupOpenAPIDocumentation(app: Express) {
|
||||
})
|
||||
)
|
||||
|
||||
logger.info('OpenAPI documentation setup complete')
|
||||
logger.info('Documentation available at /api-docs')
|
||||
logger.info('OpenAPI spec available at /api-docs.json')
|
||||
logger.info('OpenAPI documentation ready', {
|
||||
docsPath: '/api-docs',
|
||||
specPath: '/api-docs.json'
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Failed to setup OpenAPI documentation:', error as Error)
|
||||
logger.error('Failed to setup OpenAPI documentation', { error })
|
||||
}
|
||||
}
|
||||
|
||||
@ -51,18 +51,18 @@ const modelValidationErrorBody = (error: AgentModelValidationError) => ({
|
||||
*/
|
||||
export const createAgent = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
logger.info('Creating new agent')
|
||||
logger.debug('Agent data:', req.body)
|
||||
logger.debug('Creating agent')
|
||||
logger.debug('Agent payload', { body: req.body })
|
||||
|
||||
const agent = await agentService.createAgent(req.body)
|
||||
|
||||
try {
|
||||
logger.info(`Agent created successfully: ${agent.id}`)
|
||||
logger.info(`Creating default session for new agent: ${agent.id}`)
|
||||
logger.info('Agent created', { agentId: agent.id })
|
||||
logger.debug('Creating default session for agent', { agentId: agent.id })
|
||||
|
||||
await sessionService.createSession(agent.id, {})
|
||||
|
||||
logger.info(`Default session created for agent: ${agent.id}`)
|
||||
logger.info('Default session created for agent', { agentId: agent.id })
|
||||
return res.status(201).json(agent)
|
||||
} catch (sessionError: any) {
|
||||
logger.error('Failed to create default session for new agent, rolling back agent creation', {
|
||||
@ -89,7 +89,7 @@ export const createAgent = async (req: Request, res: Response): Promise<Response
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error instanceof AgentModelValidationError) {
|
||||
logger.warn('Agent model validation error during create:', {
|
||||
logger.warn('Agent model validation error during create', {
|
||||
agentType: error.context.agentType,
|
||||
field: error.context.field,
|
||||
model: error.context.model,
|
||||
@ -98,7 +98,7 @@ export const createAgent = async (req: Request, res: Response): Promise<Response
|
||||
return res.status(400).json(modelValidationErrorBody(error))
|
||||
}
|
||||
|
||||
logger.error('Error creating agent:', error)
|
||||
logger.error('Error creating agent', { error })
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: `Failed to create agent: ${error.message}`,
|
||||
@ -171,11 +171,16 @@ export const listAgents = async (req: Request, res: Response): Promise<Response>
|
||||
const limit = req.query.limit ? parseInt(req.query.limit as string) : 20
|
||||
const offset = req.query.offset ? parseInt(req.query.offset as string) : 0
|
||||
|
||||
logger.info(`Listing agents with limit=${limit}, offset=${offset}`)
|
||||
logger.debug('Listing agents', { limit, offset })
|
||||
|
||||
const result = await agentService.listAgents({ limit, offset })
|
||||
|
||||
logger.info(`Retrieved ${result.agents.length} agents (total: ${result.total})`)
|
||||
logger.info('Agents listed', {
|
||||
returned: result.agents.length,
|
||||
total: result.total,
|
||||
limit,
|
||||
offset
|
||||
})
|
||||
return res.json({
|
||||
data: result.agents,
|
||||
total: result.total,
|
||||
@ -183,7 +188,7 @@ export const listAgents = async (req: Request, res: Response): Promise<Response>
|
||||
offset
|
||||
} satisfies ListAgentsResponse)
|
||||
} catch (error: any) {
|
||||
logger.error('Error listing agents:', error)
|
||||
logger.error('Error listing agents', { error })
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: 'Failed to list agents',
|
||||
@ -231,12 +236,12 @@ export const listAgents = async (req: Request, res: Response): Promise<Response>
|
||||
export const getAgent = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { agentId } = req.params
|
||||
logger.info(`Getting agent: ${agentId}`)
|
||||
logger.debug('Getting agent', { agentId })
|
||||
|
||||
const agent = await agentService.getAgent(agentId)
|
||||
|
||||
if (!agent) {
|
||||
logger.warn(`Agent not found: ${agentId}`)
|
||||
logger.warn('Agent not found', { agentId })
|
||||
return res.status(404).json({
|
||||
error: {
|
||||
message: 'Agent not found',
|
||||
@ -246,10 +251,10 @@ export const getAgent = async (req: Request, res: Response): Promise<Response> =
|
||||
})
|
||||
}
|
||||
|
||||
logger.info(`Agent retrieved successfully: ${agentId}`)
|
||||
logger.info('Agent retrieved', { agentId })
|
||||
return res.json(agent)
|
||||
} catch (error: any) {
|
||||
logger.error('Error getting agent:', error)
|
||||
logger.error('Error getting agent', { error, agentId: req.params.agentId })
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: 'Failed to get agent',
|
||||
@ -309,8 +314,8 @@ export const getAgent = async (req: Request, res: Response): Promise<Response> =
|
||||
export const updateAgent = async (req: Request, res: Response): Promise<Response> => {
|
||||
const { agentId } = req.params
|
||||
try {
|
||||
logger.info(`Updating agent: ${agentId}`)
|
||||
logger.debug('Update data:', req.body)
|
||||
logger.debug('Updating agent', { agentId })
|
||||
logger.debug('Replace payload', { body: req.body })
|
||||
|
||||
const { validatedBody } = req as ValidationRequest
|
||||
const replacePayload = (validatedBody ?? {}) as ReplaceAgentRequest
|
||||
@ -318,7 +323,7 @@ export const updateAgent = async (req: Request, res: Response): Promise<Response
|
||||
const agent = await agentService.updateAgent(agentId, replacePayload, { replace: true })
|
||||
|
||||
if (!agent) {
|
||||
logger.warn(`Agent not found for update: ${agentId}`)
|
||||
logger.warn('Agent not found for update', { agentId })
|
||||
return res.status(404).json({
|
||||
error: {
|
||||
message: 'Agent not found',
|
||||
@ -328,11 +333,11 @@ export const updateAgent = async (req: Request, res: Response): Promise<Response
|
||||
})
|
||||
}
|
||||
|
||||
logger.info(`Agent updated successfully: ${agentId}`)
|
||||
logger.info('Agent updated', { agentId })
|
||||
return res.json(agent)
|
||||
} catch (error: any) {
|
||||
if (error instanceof AgentModelValidationError) {
|
||||
logger.warn('Agent model validation error during update:', {
|
||||
logger.warn('Agent model validation error during update', {
|
||||
agentId,
|
||||
agentType: error.context.agentType,
|
||||
field: error.context.field,
|
||||
@ -342,7 +347,7 @@ export const updateAgent = async (req: Request, res: Response): Promise<Response
|
||||
return res.status(400).json(modelValidationErrorBody(error))
|
||||
}
|
||||
|
||||
logger.error('Error updating agent:', error)
|
||||
logger.error('Error updating agent', { error, agentId })
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: 'Failed to update agent: ' + error.message,
|
||||
@ -455,8 +460,8 @@ export const updateAgent = async (req: Request, res: Response): Promise<Response
|
||||
export const patchAgent = async (req: Request, res: Response): Promise<Response> => {
|
||||
const { agentId } = req.params
|
||||
try {
|
||||
logger.info(`Partially updating agent: ${agentId}`)
|
||||
logger.debug('Partial update data:', req.body)
|
||||
logger.debug('Partially updating agent', { agentId })
|
||||
logger.debug('Patch payload', { body: req.body })
|
||||
|
||||
const { validatedBody } = req as ValidationRequest
|
||||
const updatePayload = (validatedBody ?? {}) as UpdateAgentRequest
|
||||
@ -464,7 +469,7 @@ export const patchAgent = async (req: Request, res: Response): Promise<Response>
|
||||
const agent = await agentService.updateAgent(agentId, updatePayload)
|
||||
|
||||
if (!agent) {
|
||||
logger.warn(`Agent not found for partial update: ${agentId}`)
|
||||
logger.warn('Agent not found for partial update', { agentId })
|
||||
return res.status(404).json({
|
||||
error: {
|
||||
message: 'Agent not found',
|
||||
@ -474,11 +479,11 @@ export const patchAgent = async (req: Request, res: Response): Promise<Response>
|
||||
})
|
||||
}
|
||||
|
||||
logger.info(`Agent partially updated successfully: ${agentId}`)
|
||||
logger.info('Agent patched', { agentId })
|
||||
return res.json(agent)
|
||||
} catch (error: any) {
|
||||
if (error instanceof AgentModelValidationError) {
|
||||
logger.warn('Agent model validation error during partial update:', {
|
||||
logger.warn('Agent model validation error during partial update', {
|
||||
agentId,
|
||||
agentType: error.context.agentType,
|
||||
field: error.context.field,
|
||||
@ -488,7 +493,7 @@ export const patchAgent = async (req: Request, res: Response): Promise<Response>
|
||||
return res.status(400).json(modelValidationErrorBody(error))
|
||||
}
|
||||
|
||||
logger.error('Error partially updating agent:', error)
|
||||
logger.error('Error partially updating agent', { error, agentId })
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: `Failed to partially update agent: ${error.message}`,
|
||||
@ -532,12 +537,12 @@ export const patchAgent = async (req: Request, res: Response): Promise<Response>
|
||||
export const deleteAgent = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { agentId } = req.params
|
||||
logger.info(`Deleting agent: ${agentId}`)
|
||||
logger.debug('Deleting agent', { agentId })
|
||||
|
||||
const deleted = await agentService.deleteAgent(agentId)
|
||||
|
||||
if (!deleted) {
|
||||
logger.warn(`Agent not found for deletion: ${agentId}`)
|
||||
logger.warn('Agent not found for deletion', { agentId })
|
||||
return res.status(404).json({
|
||||
error: {
|
||||
message: 'Agent not found',
|
||||
@ -547,10 +552,10 @@ export const deleteAgent = async (req: Request, res: Response): Promise<Response
|
||||
})
|
||||
}
|
||||
|
||||
logger.info(`Agent deleted successfully: ${agentId}`)
|
||||
logger.info('Agent deleted', { agentId })
|
||||
return res.status(204).send()
|
||||
} catch (error: any) {
|
||||
logger.error('Error deleting agent:', error)
|
||||
logger.error('Error deleting agent', { error, agentId: req.params.agentId })
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: 'Failed to delete agent',
|
||||
|
||||
@ -32,8 +32,8 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
|
||||
const messageData = req.body
|
||||
|
||||
logger.info(`Creating streaming message for session: ${sessionId}`)
|
||||
logger.debug('Streaming message data:', messageData)
|
||||
logger.info('Creating streaming message', { agentId, sessionId })
|
||||
logger.debug('Streaming message payload', { messageData })
|
||||
|
||||
// Set SSE headers
|
||||
res.setHeader('Content-Type', 'text/event-stream')
|
||||
@ -68,7 +68,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
// res.write('data: {"type":"finish"}\n\n')
|
||||
res.write('data: [DONE]\n\n')
|
||||
} catch (writeError) {
|
||||
logger.error('Error writing final sentinel to SSE stream:', { error: writeError as Error })
|
||||
logger.error('Error writing final sentinel to SSE stream', { error: writeError as Error })
|
||||
}
|
||||
res.end()
|
||||
}
|
||||
@ -94,7 +94,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
*/
|
||||
const handleDisconnect = () => {
|
||||
if (responseEnded) return
|
||||
logger.info(`Client disconnected from streaming message for session: ${sessionId}`)
|
||||
logger.info('Streaming client disconnected', { agentId, sessionId })
|
||||
responseEnded = true
|
||||
abortController.abort('Client disconnected')
|
||||
reader.cancel('Client disconnected').catch(() => {})
|
||||
@ -119,7 +119,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
finalizeResponse()
|
||||
} catch (error) {
|
||||
if (responseEnded) return
|
||||
logger.error('Error reading agent stream:', { error })
|
||||
logger.error('Error reading agent stream', { error })
|
||||
try {
|
||||
res.write(
|
||||
`data: ${JSON.stringify({
|
||||
@ -132,7 +132,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
})}\n\n`
|
||||
)
|
||||
} catch (writeError) {
|
||||
logger.error('Error writing stream error to SSE:', { error: writeError })
|
||||
logger.error('Error writing stream error to SSE', { error: writeError })
|
||||
}
|
||||
responseEnded = true
|
||||
res.end()
|
||||
@ -140,7 +140,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
}
|
||||
|
||||
pumpStream().catch((error) => {
|
||||
logger.error('Pump stream failure:', { error })
|
||||
logger.error('Pump stream failure', { error })
|
||||
})
|
||||
|
||||
completion
|
||||
@ -150,7 +150,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
})
|
||||
.catch((error) => {
|
||||
if (responseEnded) return
|
||||
logger.error(`Streaming message error for session: ${sessionId}:`, error)
|
||||
logger.error('Streaming message error', { agentId, sessionId, error })
|
||||
try {
|
||||
res.write(
|
||||
`data: ${JSON.stringify({
|
||||
@ -163,7 +163,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
})}\n\n`
|
||||
)
|
||||
} catch (writeError) {
|
||||
logger.error('Error writing completion error to SSE stream:', { error: writeError })
|
||||
logger.error('Error writing completion error to SSE stream', { error: writeError })
|
||||
}
|
||||
responseEnded = true
|
||||
res.end()
|
||||
@ -173,7 +173,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
const timeout = setTimeout(
|
||||
() => {
|
||||
if (!responseEnded) {
|
||||
logger.error(`Streaming message timeout for session: ${sessionId}`)
|
||||
logger.error('Streaming message timeout', { agentId, sessionId })
|
||||
try {
|
||||
res.write(
|
||||
`data: ${JSON.stringify({
|
||||
@ -186,7 +186,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
})}\n\n`
|
||||
)
|
||||
} catch (writeError) {
|
||||
logger.error('Error writing timeout to SSE stream:', { error: writeError })
|
||||
logger.error('Error writing timeout to SSE stream', { error: writeError })
|
||||
}
|
||||
abortController.abort('stream timeout')
|
||||
reader.cancel('stream timeout').catch(() => {})
|
||||
@ -201,7 +201,11 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
res.on('close', () => clearTimeout(timeout))
|
||||
res.on('finish', () => clearTimeout(timeout))
|
||||
} catch (error: any) {
|
||||
logger.error('Error in streaming message handler:', error)
|
||||
logger.error('Error in streaming message handler', {
|
||||
error,
|
||||
agentId: req.params.agentId,
|
||||
sessionId: req.params.sessionId
|
||||
})
|
||||
|
||||
// Send error as SSE if possible
|
||||
if (!res.headersSent) {
|
||||
@ -222,7 +226,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
|
||||
|
||||
res.write(`data: ${JSON.stringify(errorResponse)}\n\n`)
|
||||
} catch (writeError) {
|
||||
logger.error('Error writing initial error to SSE stream:', { error: writeError })
|
||||
logger.error('Error writing initial error to SSE stream', { error: writeError })
|
||||
}
|
||||
|
||||
res.end()
|
||||
@ -239,7 +243,7 @@ export const deleteMessage = async (req: Request, res: Response): Promise<Respon
|
||||
const deleted = await sessionMessageService.deleteSessionMessage(sessionId, messageId)
|
||||
|
||||
if (!deleted) {
|
||||
logger.warn(`Message ${messageId} not found for session ${sessionId}`)
|
||||
logger.warn('Session message not found', { agentId, sessionId, messageId })
|
||||
return res.status(404).json({
|
||||
error: {
|
||||
message: 'Message not found for this session',
|
||||
@ -249,7 +253,7 @@ export const deleteMessage = async (req: Request, res: Response): Promise<Respon
|
||||
})
|
||||
}
|
||||
|
||||
logger.info(`Message ${messageId} deleted successfully for session ${sessionId}`)
|
||||
logger.info('Session message deleted', { agentId, sessionId, messageId })
|
||||
return res.status(204).send()
|
||||
} catch (error: any) {
|
||||
if (error?.status === 404) {
|
||||
@ -268,7 +272,12 @@ export const deleteMessage = async (req: Request, res: Response): Promise<Respon
|
||||
})
|
||||
}
|
||||
|
||||
logger.error('Error deleting session message:', error)
|
||||
logger.error('Error deleting session message', {
|
||||
error,
|
||||
agentId: req.params.agentId,
|
||||
sessionId: req.params.sessionId,
|
||||
messageId: Number(req.params.messageId)
|
||||
})
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: 'Failed to delete session message',
|
||||
|
||||
@ -20,16 +20,16 @@ export const createSession = async (req: Request, res: Response): Promise<Respon
|
||||
try {
|
||||
const sessionData = req.body
|
||||
|
||||
logger.info(`Creating new session for agent: ${agentId}`)
|
||||
logger.debug('Session data:', sessionData)
|
||||
logger.debug('Creating new session', { agentId })
|
||||
logger.debug('Session payload', { sessionData })
|
||||
|
||||
const session = await sessionService.createSession(agentId, sessionData)
|
||||
|
||||
logger.info(`Session created successfully: ${session?.id}`)
|
||||
logger.info('Session created', { agentId, sessionId: session?.id })
|
||||
return res.status(201).json(session)
|
||||
} catch (error: any) {
|
||||
if (error instanceof AgentModelValidationError) {
|
||||
logger.warn('Session model validation error during create:', {
|
||||
logger.warn('Session model validation error during create', {
|
||||
agentId,
|
||||
agentType: error.context.agentType,
|
||||
field: error.context.field,
|
||||
@ -39,7 +39,7 @@ export const createSession = async (req: Request, res: Response): Promise<Respon
|
||||
return res.status(400).json(modelValidationErrorBody(error))
|
||||
}
|
||||
|
||||
logger.error('Error creating session:', error)
|
||||
logger.error('Error creating session', { error, agentId })
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: `Failed to create session: ${error.message}`,
|
||||
@ -51,17 +51,23 @@ export const createSession = async (req: Request, res: Response): Promise<Respon
|
||||
}
|
||||
|
||||
export const listSessions = async (req: Request, res: Response): Promise<Response> => {
|
||||
const { agentId } = req.params
|
||||
try {
|
||||
const { agentId } = req.params
|
||||
const limit = req.query.limit ? parseInt(req.query.limit as string) : 20
|
||||
const offset = req.query.offset ? parseInt(req.query.offset as string) : 0
|
||||
const status = req.query.status as any
|
||||
|
||||
logger.info(`Listing sessions for agent: ${agentId} with limit=${limit}, offset=${offset}, status=${status}`)
|
||||
logger.debug('Listing agent sessions', { agentId, limit, offset, status })
|
||||
|
||||
const result = await sessionService.listSessions(agentId, { limit, offset })
|
||||
|
||||
logger.info(`Retrieved ${result.sessions.length} sessions (total: ${result.total}) for agent: ${agentId}`)
|
||||
logger.info('Agent sessions listed', {
|
||||
agentId,
|
||||
returned: result.sessions.length,
|
||||
total: result.total,
|
||||
limit,
|
||||
offset
|
||||
})
|
||||
return res.json({
|
||||
data: result.sessions,
|
||||
total: result.total,
|
||||
@ -69,7 +75,7 @@ export const listSessions = async (req: Request, res: Response): Promise<Respons
|
||||
offset
|
||||
})
|
||||
} catch (error: any) {
|
||||
logger.error('Error listing sessions:', error)
|
||||
logger.error('Error listing sessions', { error, agentId })
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: 'Failed to list sessions',
|
||||
@ -83,12 +89,12 @@ export const listSessions = async (req: Request, res: Response): Promise<Respons
|
||||
export const getSession = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { agentId, sessionId } = req.params
|
||||
logger.info(`Getting session: ${sessionId} for agent: ${agentId}`)
|
||||
logger.debug('Getting session', { agentId, sessionId })
|
||||
|
||||
const session = await sessionService.getSession(agentId, sessionId)
|
||||
|
||||
if (!session) {
|
||||
logger.warn(`Session not found: ${sessionId}`)
|
||||
logger.warn('Session not found', { agentId, sessionId })
|
||||
return res.status(404).json({
|
||||
error: {
|
||||
message: 'Session not found',
|
||||
@ -110,7 +116,7 @@ export const getSession = async (req: Request, res: Response): Promise<Response>
|
||||
// }
|
||||
|
||||
// Fetch session messages
|
||||
logger.info(`Fetching messages for session: ${sessionId}`)
|
||||
logger.debug('Fetching session messages', { sessionId })
|
||||
const { messages } = await sessionMessageService.listSessionMessages(sessionId)
|
||||
|
||||
// Add messages to session
|
||||
@ -119,10 +125,10 @@ export const getSession = async (req: Request, res: Response): Promise<Response>
|
||||
messages: messages
|
||||
}
|
||||
|
||||
logger.info(`Session retrieved successfully: ${sessionId} with ${messages.length} messages`)
|
||||
logger.info('Session retrieved', { agentId, sessionId, messageCount: messages.length })
|
||||
return res.json(sessionWithMessages)
|
||||
} catch (error: any) {
|
||||
logger.error('Error getting session:', error)
|
||||
logger.error('Error getting session', { error, agentId: req.params.agentId, sessionId: req.params.sessionId })
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: 'Failed to get session',
|
||||
@ -136,13 +142,13 @@ export const getSession = async (req: Request, res: Response): Promise<Response>
|
||||
export const updateSession = async (req: Request, res: Response): Promise<Response> => {
|
||||
const { agentId, sessionId } = req.params
|
||||
try {
|
||||
logger.info(`Updating session: ${sessionId} for agent: ${agentId}`)
|
||||
logger.debug('Update data:', req.body)
|
||||
logger.debug('Updating session', { agentId, sessionId })
|
||||
logger.debug('Replace payload', { body: req.body })
|
||||
|
||||
// First check if session exists and belongs to agent
|
||||
const existingSession = await sessionService.getSession(agentId, sessionId)
|
||||
if (!existingSession || existingSession.agent_id !== agentId) {
|
||||
logger.warn(`Session ${sessionId} not found for agent ${agentId}`)
|
||||
logger.warn('Session not found for update', { agentId, sessionId })
|
||||
return res.status(404).json({
|
||||
error: {
|
||||
message: 'Session not found for this agent',
|
||||
@ -158,7 +164,7 @@ export const updateSession = async (req: Request, res: Response): Promise<Respon
|
||||
const session = await sessionService.updateSession(agentId, sessionId, replacePayload)
|
||||
|
||||
if (!session) {
|
||||
logger.warn(`Session not found for update: ${sessionId}`)
|
||||
logger.warn('Session missing during update', { agentId, sessionId })
|
||||
return res.status(404).json({
|
||||
error: {
|
||||
message: 'Session not found',
|
||||
@ -168,11 +174,11 @@ export const updateSession = async (req: Request, res: Response): Promise<Respon
|
||||
})
|
||||
}
|
||||
|
||||
logger.info(`Session updated successfully: ${sessionId}`)
|
||||
logger.info('Session updated', { agentId, sessionId })
|
||||
return res.json(session satisfies UpdateSessionResponse)
|
||||
} catch (error: any) {
|
||||
if (error instanceof AgentModelValidationError) {
|
||||
logger.warn('Session model validation error during update:', {
|
||||
logger.warn('Session model validation error during update', {
|
||||
agentId,
|
||||
sessionId,
|
||||
agentType: error.context.agentType,
|
||||
@ -183,7 +189,7 @@ export const updateSession = async (req: Request, res: Response): Promise<Respon
|
||||
return res.status(400).json(modelValidationErrorBody(error))
|
||||
}
|
||||
|
||||
logger.error('Error updating session:', error)
|
||||
logger.error('Error updating session', { error, agentId, sessionId })
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: `Failed to update session: ${error.message}`,
|
||||
@ -197,13 +203,13 @@ export const updateSession = async (req: Request, res: Response): Promise<Respon
|
||||
export const patchSession = async (req: Request, res: Response): Promise<Response> => {
|
||||
const { agentId, sessionId } = req.params
|
||||
try {
|
||||
logger.info(`Patching session: ${sessionId} for agent: ${agentId}`)
|
||||
logger.debug('Patch data:', req.body)
|
||||
logger.debug('Patching session', { agentId, sessionId })
|
||||
logger.debug('Patch payload', { body: req.body })
|
||||
|
||||
// First check if session exists and belongs to agent
|
||||
const existingSession = await sessionService.getSession(agentId, sessionId)
|
||||
if (!existingSession || existingSession.agent_id !== agentId) {
|
||||
logger.warn(`Session ${sessionId} not found for agent ${agentId}`)
|
||||
logger.warn('Session not found for patch', { agentId, sessionId })
|
||||
return res.status(404).json({
|
||||
error: {
|
||||
message: 'Session not found for this agent',
|
||||
@ -217,7 +223,7 @@ export const patchSession = async (req: Request, res: Response): Promise<Respons
|
||||
const session = await sessionService.updateSession(agentId, sessionId, updateSession)
|
||||
|
||||
if (!session) {
|
||||
logger.warn(`Session not found for patch: ${sessionId}`)
|
||||
logger.warn('Session missing while patching', { agentId, sessionId })
|
||||
return res.status(404).json({
|
||||
error: {
|
||||
message: 'Session not found',
|
||||
@ -227,11 +233,11 @@ export const patchSession = async (req: Request, res: Response): Promise<Respons
|
||||
})
|
||||
}
|
||||
|
||||
logger.info(`Session patched successfully: ${sessionId}`)
|
||||
logger.info('Session patched', { agentId, sessionId })
|
||||
return res.json(session)
|
||||
} catch (error: any) {
|
||||
if (error instanceof AgentModelValidationError) {
|
||||
logger.warn('Session model validation error during patch:', {
|
||||
logger.warn('Session model validation error during patch', {
|
||||
agentId,
|
||||
sessionId,
|
||||
agentType: error.context.agentType,
|
||||
@ -242,7 +248,7 @@ export const patchSession = async (req: Request, res: Response): Promise<Respons
|
||||
return res.status(400).json(modelValidationErrorBody(error))
|
||||
}
|
||||
|
||||
logger.error('Error patching session:', error)
|
||||
logger.error('Error patching session', { error, agentId, sessionId })
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: `Failed to patch session, ${error.message}`,
|
||||
@ -256,12 +262,12 @@ export const patchSession = async (req: Request, res: Response): Promise<Respons
|
||||
export const deleteSession = async (req: Request, res: Response): Promise<Response> => {
|
||||
try {
|
||||
const { agentId, sessionId } = req.params
|
||||
logger.info(`Deleting session: ${sessionId} for agent: ${agentId}`)
|
||||
logger.debug('Deleting session', { agentId, sessionId })
|
||||
|
||||
// First check if session exists and belongs to agent
|
||||
const existingSession = await sessionService.getSession(agentId, sessionId)
|
||||
if (!existingSession || existingSession.agent_id !== agentId) {
|
||||
logger.warn(`Session ${sessionId} not found for agent ${agentId}`)
|
||||
logger.warn('Session not found for deletion', { agentId, sessionId })
|
||||
return res.status(404).json({
|
||||
error: {
|
||||
message: 'Session not found for this agent',
|
||||
@ -274,7 +280,7 @@ export const deleteSession = async (req: Request, res: Response): Promise<Respon
|
||||
const deleted = await sessionService.deleteSession(agentId, sessionId)
|
||||
|
||||
if (!deleted) {
|
||||
logger.warn(`Session not found for deletion: ${sessionId}`)
|
||||
logger.warn('Session missing during delete', { agentId, sessionId })
|
||||
return res.status(404).json({
|
||||
error: {
|
||||
message: 'Session not found',
|
||||
@ -284,15 +290,15 @@ export const deleteSession = async (req: Request, res: Response): Promise<Respon
|
||||
})
|
||||
}
|
||||
|
||||
logger.info(`Session deleted successfully: ${sessionId}`)
|
||||
logger.info('Session deleted', { agentId, sessionId })
|
||||
|
||||
const { total } = await sessionService.listSessions(agentId, { limit: 1 })
|
||||
|
||||
if (total === 0) {
|
||||
logger.info(`No remaining sessions for agent ${agentId}, creating default session`)
|
||||
logger.info('No remaining sessions, creating default', { agentId })
|
||||
try {
|
||||
const fallbackSession = await sessionService.createSession(agentId, {})
|
||||
logger.info('Default session created after deleting last session', {
|
||||
logger.info('Default session created after delete', {
|
||||
agentId,
|
||||
sessionId: fallbackSession?.id
|
||||
})
|
||||
@ -313,7 +319,7 @@ export const deleteSession = async (req: Request, res: Response): Promise<Respon
|
||||
|
||||
return res.status(204).send()
|
||||
} catch (error: any) {
|
||||
logger.error('Error deleting session:', error)
|
||||
logger.error('Error deleting session', { error, agentId: req.params.agentId, sessionId: req.params.sessionId })
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: 'Failed to delete session',
|
||||
@ -331,11 +337,16 @@ export const listAllSessions = async (req: Request, res: Response): Promise<Resp
|
||||
const offset = req.query.offset ? parseInt(req.query.offset as string) : 0
|
||||
const status = req.query.status as any
|
||||
|
||||
logger.info(`Listing all sessions with limit=${limit}, offset=${offset}, status=${status}`)
|
||||
logger.debug('Listing all sessions', { limit, offset, status })
|
||||
|
||||
const result = await sessionService.listSessions(undefined, { limit, offset })
|
||||
|
||||
logger.info(`Retrieved ${result.sessions.length} sessions (total: ${result.total})`)
|
||||
logger.info('Sessions listed', {
|
||||
returned: result.sessions.length,
|
||||
total: result.total,
|
||||
limit,
|
||||
offset
|
||||
})
|
||||
return res.json({
|
||||
data: result.sessions,
|
||||
total: result.total,
|
||||
@ -343,7 +354,7 @@ export const listAllSessions = async (req: Request, res: Response): Promise<Resp
|
||||
offset
|
||||
} satisfies ListAgentSessionsResponse)
|
||||
} catch (error: any) {
|
||||
logger.error('Error listing all sessions:', error)
|
||||
logger.error('Error listing all sessions', { error })
|
||||
return res.status(500).json({
|
||||
error: {
|
||||
message: 'Failed to list sessions',
|
||||
|
||||
@ -29,7 +29,10 @@ export const checkAgentExists = async (req: Request, res: Response, next: any):
|
||||
|
||||
next()
|
||||
} catch (error) {
|
||||
logger.error('Error checking agent existence:', error as Error)
|
||||
logger.error('Error checking agent existence', {
|
||||
error: error as Error,
|
||||
agentId: req.params.agentId
|
||||
})
|
||||
res.status(500).json({
|
||||
error: {
|
||||
message: 'Failed to validate agent',
|
||||
|
||||
@ -22,7 +22,7 @@ interface ErrorResponseBody {
|
||||
|
||||
const mapChatCompletionError = (error: unknown): { status: number; body: ErrorResponseBody } => {
|
||||
if (error instanceof ChatCompletionValidationError) {
|
||||
logger.warn('Chat completion validation error:', {
|
||||
logger.warn('Chat completion validation error', {
|
||||
errors: error.errors
|
||||
})
|
||||
|
||||
@ -39,7 +39,7 @@ const mapChatCompletionError = (error: unknown): { status: number; body: ErrorRe
|
||||
}
|
||||
|
||||
if (error instanceof ChatCompletionModelError) {
|
||||
logger.warn('Chat completion model error:', error.error)
|
||||
logger.warn('Chat completion model error', error.error)
|
||||
|
||||
return {
|
||||
status: 400,
|
||||
@ -72,7 +72,7 @@ const mapChatCompletionError = (error: unknown): { status: number; body: ErrorRe
|
||||
errorCode = 'upstream_error'
|
||||
}
|
||||
|
||||
logger.error('Chat completion error:', { error })
|
||||
logger.error('Chat completion error', { error })
|
||||
|
||||
return {
|
||||
status: statusCode,
|
||||
@ -86,7 +86,7 @@ const mapChatCompletionError = (error: unknown): { status: number; body: ErrorRe
|
||||
}
|
||||
}
|
||||
|
||||
logger.error('Chat completion unknown error:', { error })
|
||||
logger.error('Chat completion unknown error', { error })
|
||||
|
||||
return {
|
||||
status: 500,
|
||||
@ -193,7 +193,7 @@ router.post('/completions', async (req: Request, res: Response) => {
|
||||
})
|
||||
}
|
||||
|
||||
logger.info('Chat completion request:', {
|
||||
logger.debug('Chat completion request', {
|
||||
model: request.model,
|
||||
messageCount: request.messages?.length || 0,
|
||||
stream: request.stream,
|
||||
@ -217,7 +217,7 @@ router.post('/completions', async (req: Request, res: Response) => {
|
||||
}
|
||||
res.write('data: [DONE]\n\n')
|
||||
} catch (streamError: any) {
|
||||
logger.error('Stream error:', streamError)
|
||||
logger.error('Stream error', { error: streamError })
|
||||
res.write(
|
||||
`data: ${JSON.stringify({
|
||||
error: {
|
||||
|
||||
@ -43,14 +43,14 @@ const router = express.Router()
|
||||
*/
|
||||
router.get('/', async (req: Request, res: Response) => {
|
||||
try {
|
||||
logger.info('Get all MCP servers request received')
|
||||
logger.debug('Listing MCP servers')
|
||||
const servers = await mcpApiService.getAllServers(req)
|
||||
return res.json({
|
||||
success: true,
|
||||
data: servers
|
||||
})
|
||||
} catch (error: any) {
|
||||
logger.error('Error fetching MCP servers:', error)
|
||||
logger.error('Error fetching MCP servers', { error })
|
||||
return res.status(503).json({
|
||||
success: false,
|
||||
error: {
|
||||
@ -103,10 +103,12 @@ router.get('/', async (req: Request, res: Response) => {
|
||||
*/
|
||||
router.get('/:server_id', async (req: Request, res: Response) => {
|
||||
try {
|
||||
logger.info('Get MCP server info request received')
|
||||
logger.debug('Get MCP server info request received', {
|
||||
serverId: req.params.server_id
|
||||
})
|
||||
const server = await mcpApiService.getServerInfo(req.params.server_id)
|
||||
if (!server) {
|
||||
logger.warn('MCP server not found')
|
||||
logger.warn('MCP server not found', { serverId: req.params.server_id })
|
||||
return res.status(404).json({
|
||||
success: false,
|
||||
error: {
|
||||
@ -121,7 +123,7 @@ router.get('/:server_id', async (req: Request, res: Response) => {
|
||||
data: server
|
||||
})
|
||||
} catch (error: any) {
|
||||
logger.error('Error fetching MCP server info:', error)
|
||||
logger.error('Error fetching MCP server info', { error, serverId: req.params.server_id })
|
||||
return res.status(503).json({
|
||||
success: false,
|
||||
error: {
|
||||
@ -137,7 +139,7 @@ router.get('/:server_id', async (req: Request, res: Response) => {
|
||||
router.all('/:server_id/mcp', async (req: Request, res: Response) => {
|
||||
const server = await mcpApiService.getServerById(req.params.server_id)
|
||||
if (!server) {
|
||||
logger.warn('MCP server not found')
|
||||
logger.warn('MCP server not found', { serverId: req.params.server_id })
|
||||
return res.status(404).json({
|
||||
success: false,
|
||||
error: {
|
||||
|
||||
@ -12,7 +12,6 @@ const providerRouter = express.Router({ mergeParams: true })
|
||||
|
||||
// Helper functions for shared logic
|
||||
async function validateRequestBody(req: Request): Promise<{ valid: boolean; error?: any }> {
|
||||
logger.info('Validating request body', { body: req.body })
|
||||
const request: MessageCreateParams = req.body
|
||||
|
||||
if (!request) {
|
||||
@ -43,14 +42,30 @@ async function handleStreamingResponse(
|
||||
res.setHeader('Connection', 'keep-alive')
|
||||
res.setHeader('X-Accel-Buffering', 'no')
|
||||
res.flushHeaders()
|
||||
const flushableResponse = res as Response & { flush?: () => void }
|
||||
const flushStream = () => {
|
||||
if (typeof flushableResponse.flush !== 'function') {
|
||||
return
|
||||
}
|
||||
try {
|
||||
flushableResponse.flush()
|
||||
} catch (flushError: unknown) {
|
||||
logger.warn('Failed to flush streaming response', {
|
||||
error: flushError
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
for await (const chunk of messagesService.processStreamingMessage(request, provider)) {
|
||||
res.write(`event: ${chunk.type}\n`)
|
||||
res.write(`data: ${JSON.stringify(chunk)}\n\n`)
|
||||
flushStream()
|
||||
}
|
||||
res.write('data: [DONE]\n\n')
|
||||
flushStream()
|
||||
} catch (streamError: any) {
|
||||
logger.error('Stream error:', streamError)
|
||||
logger.error('Stream error', { error: streamError })
|
||||
res.write(
|
||||
`data: ${JSON.stringify({
|
||||
type: 'error',
|
||||
@ -66,7 +81,7 @@ async function handleStreamingResponse(
|
||||
}
|
||||
|
||||
function handleErrorResponse(res: Response, error: any, logger: any): Response {
|
||||
logger.error('Message processing error:', error)
|
||||
logger.error('Message processing error', { error })
|
||||
|
||||
let statusCode = 500
|
||||
let errorType = 'api_error'
|
||||
@ -303,7 +318,10 @@ router.post('/', async (req: Request, res: Response) => {
|
||||
const modelValidation = await validateModelId(request.model)
|
||||
if (!modelValidation.valid) {
|
||||
const error = modelValidation.error!
|
||||
logger.warn(`Model validation failed for '${request.model}':`, error)
|
||||
logger.warn('Model validation failed', {
|
||||
model: request.model,
|
||||
error
|
||||
})
|
||||
return res.status(400).json({
|
||||
type: 'error',
|
||||
error: {
|
||||
|
||||
@ -75,13 +75,13 @@ const router = express
|
||||
*/
|
||||
.get('/', async (req: Request, res: Response) => {
|
||||
try {
|
||||
logger.info('Models list request received', { query: req.query })
|
||||
logger.debug('Models list request received', { query: req.query })
|
||||
|
||||
// Validate query parameters using Zod schema
|
||||
const filterResult = ApiModelsFilterSchema.safeParse(req.query)
|
||||
|
||||
if (!filterResult.success) {
|
||||
logger.warn('Invalid query parameters:', filterResult.error.issues)
|
||||
logger.warn('Invalid model query parameters', { issues: filterResult.error.issues })
|
||||
return res.status(400).json({
|
||||
error: {
|
||||
message: 'Invalid query parameters',
|
||||
@ -99,24 +99,20 @@ const router = express
|
||||
const response = await modelsService.getModels(filter)
|
||||
|
||||
if (response.data.length === 0) {
|
||||
logger.warn(
|
||||
'No models available from providers. This may be because no OpenAI/Anthropic providers are configured or enabled.',
|
||||
{ filter }
|
||||
)
|
||||
logger.warn('No models available from providers', { filter })
|
||||
}
|
||||
|
||||
logger.info(`Returning ${response.data.length} models`, {
|
||||
logger.info('Models response ready', {
|
||||
filter,
|
||||
total: response.total
|
||||
})
|
||||
logger.debug(
|
||||
'Model IDs:',
|
||||
response.data.map((m) => m.id)
|
||||
)
|
||||
logger.debug('Model IDs returned', {
|
||||
modelIds: response.data.map((m) => m.id)
|
||||
})
|
||||
|
||||
return res.json(response satisfies ApiModelsResponse)
|
||||
} catch (error: any) {
|
||||
logger.error('Error fetching models:', error)
|
||||
logger.error('Error fetching models', { error })
|
||||
return res.status(503).json({
|
||||
error: {
|
||||
message: 'Failed to retrieve models from available providers',
|
||||
|
||||
@ -7,6 +7,10 @@ import { config } from './config'
|
||||
|
||||
const logger = loggerService.withContext('ApiServer')
|
||||
|
||||
const GLOBAL_REQUEST_TIMEOUT_MS = 5 * 60_000
|
||||
const GLOBAL_HEADERS_TIMEOUT_MS = GLOBAL_REQUEST_TIMEOUT_MS + 5_000
|
||||
const GLOBAL_KEEPALIVE_TIMEOUT_MS = 60_000
|
||||
|
||||
export class ApiServer {
|
||||
private server: ReturnType<typeof createServer> | null = null
|
||||
|
||||
@ -17,21 +21,21 @@ export class ApiServer {
|
||||
}
|
||||
|
||||
// Load config
|
||||
const { port, host, apiKey } = await config.load()
|
||||
const { port, host } = await config.load()
|
||||
|
||||
// Initialize AgentService
|
||||
logger.info('Initializing AgentService...')
|
||||
logger.info('Initializing AgentService')
|
||||
await agentService.initialize()
|
||||
logger.info('AgentService initialized successfully')
|
||||
logger.info('AgentService initialized')
|
||||
|
||||
// Create server with Express app
|
||||
this.server = createServer(app)
|
||||
this.applyServerTimeouts(this.server)
|
||||
|
||||
// Start server
|
||||
return new Promise((resolve, reject) => {
|
||||
this.server!.listen(port, host, () => {
|
||||
logger.info(`API Server started at http://${host}:${port}`)
|
||||
logger.info(`API Key: ${apiKey}`)
|
||||
logger.info('API server started', { host, port })
|
||||
resolve()
|
||||
})
|
||||
|
||||
@ -39,12 +43,19 @@ export class ApiServer {
|
||||
})
|
||||
}
|
||||
|
||||
private applyServerTimeouts(server: ReturnType<typeof createServer>): void {
|
||||
server.requestTimeout = GLOBAL_REQUEST_TIMEOUT_MS
|
||||
server.headersTimeout = Math.max(GLOBAL_HEADERS_TIMEOUT_MS, server.requestTimeout + 1_000)
|
||||
server.keepAliveTimeout = GLOBAL_KEEPALIVE_TIMEOUT_MS
|
||||
server.setTimeout(0)
|
||||
}
|
||||
|
||||
async stop(): Promise<void> {
|
||||
if (!this.server) return
|
||||
|
||||
return new Promise((resolve) => {
|
||||
this.server!.close(() => {
|
||||
logger.info('API Server stopped')
|
||||
logger.info('API server stopped')
|
||||
this.server = null
|
||||
resolve()
|
||||
})
|
||||
@ -62,7 +73,7 @@ export class ApiServer {
|
||||
const isListening = this.server?.listening || false
|
||||
const result = hasServer && isListening
|
||||
|
||||
logger.debug('isRunning check:', { hasServer, isListening, result })
|
||||
logger.debug('isRunning check', { hasServer, isListening, result })
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@ -98,7 +98,7 @@ export class ChatCompletionService {
|
||||
|
||||
const { provider, modelId, client } = providerContext
|
||||
|
||||
logger.info('Model validation successful:', {
|
||||
logger.debug('Model validation successful', {
|
||||
provider: provider.id,
|
||||
providerType: provider.type,
|
||||
modelId,
|
||||
@ -160,7 +160,7 @@ export class ChatCompletionService {
|
||||
response: OpenAI.Chat.Completions.ChatCompletion
|
||||
}> {
|
||||
try {
|
||||
logger.info('Processing chat completion request:', {
|
||||
logger.debug('Processing chat completion request', {
|
||||
model: request.model,
|
||||
messageCount: request.messages.length,
|
||||
stream: request.stream
|
||||
@ -177,7 +177,7 @@ export class ChatCompletionService {
|
||||
|
||||
const { provider, modelId, client, providerRequest } = preparation
|
||||
|
||||
logger.debug('Sending request to provider:', {
|
||||
logger.debug('Sending request to provider', {
|
||||
provider: provider.id,
|
||||
model: modelId,
|
||||
apiHost: provider.apiHost
|
||||
@ -185,14 +185,20 @@ export class ChatCompletionService {
|
||||
|
||||
const response = (await client.chat.completions.create(providerRequest)) as OpenAI.Chat.Completions.ChatCompletion
|
||||
|
||||
logger.info('Successfully processed chat completion')
|
||||
logger.info('Chat completion processed', {
|
||||
modelId,
|
||||
provider: provider.id
|
||||
})
|
||||
return {
|
||||
provider,
|
||||
modelId,
|
||||
response
|
||||
}
|
||||
} catch (error: any) {
|
||||
logger.error('Error processing chat completion:', error)
|
||||
logger.error('Error processing chat completion', {
|
||||
error,
|
||||
model: request.model
|
||||
})
|
||||
throw error
|
||||
}
|
||||
}
|
||||
@ -203,7 +209,7 @@ export class ChatCompletionService {
|
||||
stream: AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>
|
||||
}> {
|
||||
try {
|
||||
logger.info('Processing streaming chat completion request:', {
|
||||
logger.debug('Processing streaming chat completion request', {
|
||||
model: request.model,
|
||||
messageCount: request.messages.length
|
||||
})
|
||||
@ -219,7 +225,7 @@ export class ChatCompletionService {
|
||||
|
||||
const { provider, modelId, client, providerRequest } = preparation
|
||||
|
||||
logger.debug('Sending streaming request to provider:', {
|
||||
logger.debug('Sending streaming request to provider', {
|
||||
provider: provider.id,
|
||||
model: modelId,
|
||||
apiHost: provider.apiHost
|
||||
@ -230,14 +236,20 @@ export class ChatCompletionService {
|
||||
streamRequest
|
||||
)) as AsyncIterable<OpenAI.Chat.Completions.ChatCompletionChunk>
|
||||
|
||||
logger.info('Successfully started streaming chat completion')
|
||||
logger.info('Streaming chat completion started', {
|
||||
modelId,
|
||||
provider: provider.id
|
||||
})
|
||||
return {
|
||||
provider,
|
||||
modelId,
|
||||
stream
|
||||
}
|
||||
} catch (error: any) {
|
||||
logger.error('Error processing streaming chat completion:', error)
|
||||
logger.error('Error processing streaming chat completion', {
|
||||
error,
|
||||
model: request.model
|
||||
})
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
@ -49,7 +49,7 @@ class MCPApiService extends EventEmitter {
|
||||
constructor() {
|
||||
super()
|
||||
this.initMcpServer()
|
||||
logger.silly('MCPApiService initialized')
|
||||
logger.debug('MCPApiService initialized')
|
||||
}
|
||||
|
||||
private initMcpServer() {
|
||||
@ -60,7 +60,7 @@ class MCPApiService extends EventEmitter {
|
||||
async getAllServers(req: Request): Promise<McpServersResp> {
|
||||
try {
|
||||
const servers = await getMCPServersFromRedux()
|
||||
logger.silly(`Returning ${servers.length} servers`)
|
||||
logger.debug('Returning servers from Redux', { count: servers.length })
|
||||
const resp: McpServersResp = {
|
||||
servers: {}
|
||||
}
|
||||
@ -77,7 +77,7 @@ class MCPApiService extends EventEmitter {
|
||||
}
|
||||
return resp
|
||||
} catch (error: any) {
|
||||
logger.error('Failed to get all servers:', error)
|
||||
logger.error('Failed to get all servers', { error })
|
||||
throw new Error('Failed to retrieve servers')
|
||||
}
|
||||
}
|
||||
@ -85,17 +85,17 @@ class MCPApiService extends EventEmitter {
|
||||
// get server by id
|
||||
async getServerById(id: string): Promise<MCPServer | null> {
|
||||
try {
|
||||
logger.silly(`getServerById called with id: ${id}`)
|
||||
logger.debug('getServerById called', { id })
|
||||
const servers = await getMCPServersFromRedux()
|
||||
const server = servers.find((s) => s.id === id)
|
||||
if (!server) {
|
||||
logger.warn(`Server with id ${id} not found`)
|
||||
logger.warn('Server not found', { id })
|
||||
return null
|
||||
}
|
||||
logger.silly(`Returning server with id ${id}`)
|
||||
logger.debug('Returning server', { id })
|
||||
return server
|
||||
} catch (error: any) {
|
||||
logger.error(`Failed to get server with id ${id}:`, error)
|
||||
logger.error('Failed to get server', { id, error })
|
||||
throw new Error('Failed to retrieve server')
|
||||
}
|
||||
}
|
||||
@ -104,7 +104,7 @@ class MCPApiService extends EventEmitter {
|
||||
try {
|
||||
const server = await this.getServerById(id)
|
||||
if (!server) {
|
||||
logger.warn(`Server with id ${id} not found`)
|
||||
logger.warn('Server not found while fetching info', { id })
|
||||
return null
|
||||
}
|
||||
|
||||
@ -118,14 +118,14 @@ class MCPApiService extends EventEmitter {
|
||||
tools: tools.tools
|
||||
}
|
||||
} catch (error: any) {
|
||||
logger.error(`Failed to get server info with id ${id}:`, error)
|
||||
logger.error('Failed to get server info', { id, error })
|
||||
throw new Error('Failed to retrieve server info')
|
||||
}
|
||||
}
|
||||
|
||||
async handleRequest(req: Request, res: Response, server: MCPServer) {
|
||||
const sessionId = req.headers['mcp-session-id'] as string | undefined
|
||||
logger.silly(`Handling request for server with sessionId ${sessionId}`)
|
||||
logger.debug('Handling MCP request', { sessionId, serverId: server.id })
|
||||
let transport: StreamableHTTPServerTransport
|
||||
if (sessionId && transports[sessionId]) {
|
||||
transport = transports[sessionId]
|
||||
@ -138,7 +138,7 @@ class MCPApiService extends EventEmitter {
|
||||
})
|
||||
|
||||
transport.onclose = () => {
|
||||
logger.info(`Transport for sessionId ${sessionId} closed`)
|
||||
logger.info('Transport closed', { sessionId })
|
||||
if (transport.sessionId) {
|
||||
delete transports[transport.sessionId]
|
||||
}
|
||||
@ -173,12 +173,15 @@ class MCPApiService extends EventEmitter {
|
||||
}
|
||||
}
|
||||
|
||||
logger.info(`Request body`, { rawBody: req.body, messages: JSON.stringify(messages) })
|
||||
logger.debug('Dispatching MCP request', {
|
||||
sessionId: transport.sessionId ?? sessionId,
|
||||
messageCount: messages.length
|
||||
})
|
||||
await transport.handleRequest(req as IncomingMessage, res as ServerResponse, messages)
|
||||
}
|
||||
|
||||
private onMessage(message: JSONRPCMessage, extra?: MessageExtraInfo) {
|
||||
logger.info(`Received message: ${JSON.stringify(message)}`, extra)
|
||||
logger.debug('Received MCP message', { message, extra })
|
||||
// Handle message here
|
||||
}
|
||||
}
|
||||
|
||||
@ -58,7 +58,11 @@ export class ModelsService {
|
||||
logger.debug(`Applied offset: offset=${offset}, showing ${modelData.length} of ${total} models`)
|
||||
}
|
||||
|
||||
logger.info(`Successfully retrieved ${modelData.length} models from ${models.length} total models`)
|
||||
logger.info('Models retrieved', {
|
||||
returned: modelData.length,
|
||||
discovered: models.length,
|
||||
filter
|
||||
})
|
||||
|
||||
if (models.length > total) {
|
||||
logger.debug(`Filtered out ${models.length - total} models after deduplication and filtering`)
|
||||
@ -80,7 +84,7 @@ export class ModelsService {
|
||||
|
||||
return response
|
||||
} catch (error: any) {
|
||||
logger.error('Error getting models:', error)
|
||||
logger.error('Error getting models', { error, filter })
|
||||
return {
|
||||
object: 'list',
|
||||
data: []
|
||||
|
||||
@ -14,14 +14,16 @@ export async function getAvailableProviders(): Promise<Provider[]> {
|
||||
// Try to get from cache first (faster)
|
||||
const cachedSupportedProviders = CacheService.get<Provider[]>(PROVIDERS_CACHE_KEY)
|
||||
if (cachedSupportedProviders) {
|
||||
logger.debug(`Found ${cachedSupportedProviders.length} supported providers (from cache)`)
|
||||
logger.debug('Providers resolved from cache', {
|
||||
count: cachedSupportedProviders.length
|
||||
})
|
||||
return cachedSupportedProviders
|
||||
}
|
||||
|
||||
// If cache is not available, get fresh data from Redux
|
||||
const providers = await reduxService.select('state.llm.providers')
|
||||
if (!providers || !Array.isArray(providers)) {
|
||||
logger.warn('No providers found in Redux store, returning empty array')
|
||||
logger.warn('No providers found in Redux store')
|
||||
return []
|
||||
}
|
||||
|
||||
@ -33,11 +35,14 @@ export async function getAvailableProviders(): Promise<Provider[]> {
|
||||
// Cache the filtered results
|
||||
CacheService.set(PROVIDERS_CACHE_KEY, supportedProviders, PROVIDERS_CACHE_TTL)
|
||||
|
||||
logger.info(`Filtered to ${supportedProviders.length} supported providers from ${providers.length} total providers`)
|
||||
logger.info('Providers filtered', {
|
||||
supported: supportedProviders.length,
|
||||
total: providers.length
|
||||
})
|
||||
|
||||
return supportedProviders
|
||||
} catch (error: any) {
|
||||
logger.error('Failed to get providers from Redux store:', error)
|
||||
logger.error('Failed to get providers from Redux store', { error })
|
||||
return []
|
||||
}
|
||||
}
|
||||
@ -47,7 +52,7 @@ export async function listAllAvailableModels(): Promise<Model[]> {
|
||||
const providers = await getAvailableProviders()
|
||||
return providers.map((p: Provider) => p.models || []).flat()
|
||||
} catch (error: any) {
|
||||
logger.error('Failed to list available models:', error)
|
||||
logger.error('Failed to list available models', { error })
|
||||
return []
|
||||
}
|
||||
}
|
||||
@ -55,15 +60,13 @@ export async function listAllAvailableModels(): Promise<Model[]> {
|
||||
export async function getProviderByModel(model: string): Promise<Provider | undefined> {
|
||||
try {
|
||||
if (!model || typeof model !== 'string') {
|
||||
logger.warn(`Invalid model parameter: ${model}`)
|
||||
logger.warn('Invalid model parameter', { model })
|
||||
return undefined
|
||||
}
|
||||
|
||||
// Validate model format first
|
||||
if (!model.includes(':')) {
|
||||
logger.warn(
|
||||
`Invalid model format, must contain ':' separator. Expected format "provider:model_id", got: ${model}`
|
||||
)
|
||||
logger.warn('Invalid model format missing separator', { model })
|
||||
return undefined
|
||||
}
|
||||
|
||||
@ -71,7 +74,7 @@ export async function getProviderByModel(model: string): Promise<Provider | unde
|
||||
const modelInfo = model.split(':')
|
||||
|
||||
if (modelInfo.length < 2 || modelInfo[0].length === 0 || modelInfo[1].length === 0) {
|
||||
logger.warn(`Invalid model format, expected "provider:model_id" with non-empty parts, got: ${model}`)
|
||||
logger.warn('Invalid model format with empty parts', { model })
|
||||
return undefined
|
||||
}
|
||||
|
||||
@ -79,16 +82,17 @@ export async function getProviderByModel(model: string): Promise<Provider | unde
|
||||
const provider = providers.find((p: Provider) => p.id === providerId)
|
||||
|
||||
if (!provider) {
|
||||
logger.warn(
|
||||
`Provider '${providerId}' not found or not enabled. Available providers: ${providers.map((p) => p.id).join(', ')}`
|
||||
)
|
||||
logger.warn('Provider not found for model', {
|
||||
providerId,
|
||||
available: providers.map((p) => p.id)
|
||||
})
|
||||
return undefined
|
||||
}
|
||||
|
||||
logger.debug(`Found provider '${providerId}' for model: ${model}`)
|
||||
logger.debug('Provider resolved for model', { providerId, model })
|
||||
return provider
|
||||
} catch (error: any) {
|
||||
logger.error('Failed to get provider by model:', error)
|
||||
logger.error('Failed to get provider by model', { error, model })
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
@ -176,7 +180,7 @@ export async function validateModelId(
|
||||
modelId
|
||||
}
|
||||
} catch (error: any) {
|
||||
logger.error('Error validating model ID:', error)
|
||||
logger.error('Error validating model ID', { error, model })
|
||||
return {
|
||||
valid: false,
|
||||
error: {
|
||||
@ -207,7 +211,7 @@ export function transformModelToOpenAI(model: Model, providers: Provider[]): Api
|
||||
export async function getProviderById(providerId: string): Promise<Provider | undefined> {
|
||||
try {
|
||||
if (!providerId || typeof providerId !== 'string') {
|
||||
logger.warn(`Invalid provider ID parameter: ${providerId}`)
|
||||
logger.warn('Invalid provider ID parameter', { providerId })
|
||||
return undefined
|
||||
}
|
||||
|
||||
@ -215,16 +219,17 @@ export async function getProviderById(providerId: string): Promise<Provider | un
|
||||
const provider = providers.find((p: Provider) => p.id === providerId)
|
||||
|
||||
if (!provider) {
|
||||
logger.warn(
|
||||
`Provider '${providerId}' not found or not enabled. Available providers: ${providers.map((p) => p.id).join(', ')}`
|
||||
)
|
||||
logger.warn('Provider not found by ID', {
|
||||
providerId,
|
||||
available: providers.map((p) => p.id)
|
||||
})
|
||||
return undefined
|
||||
}
|
||||
|
||||
logger.debug(`Found provider '${providerId}'`)
|
||||
logger.debug('Provider found by ID', { providerId })
|
||||
return provider
|
||||
} catch (error: any) {
|
||||
logger.error('Failed to get provider by ID:', error)
|
||||
logger.error('Failed to get provider by ID', { error, providerId })
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
@ -237,7 +242,7 @@ export function validateProvider(provider: Provider): boolean {
|
||||
|
||||
// Check required fields
|
||||
if (!provider.id || !provider.type || !provider.apiKey || !provider.apiHost) {
|
||||
logger.warn('Provider missing required fields:', {
|
||||
logger.warn('Provider missing required fields', {
|
||||
id: !!provider.id,
|
||||
type: !!provider.type,
|
||||
apiKey: !!provider.apiKey,
|
||||
@ -248,21 +253,22 @@ export function validateProvider(provider: Provider): boolean {
|
||||
|
||||
// Check if provider is enabled
|
||||
if (!provider.enabled) {
|
||||
logger.debug(`Provider is disabled: ${provider.id}`)
|
||||
logger.debug('Provider is disabled', { providerId: provider.id })
|
||||
return false
|
||||
}
|
||||
|
||||
// Support OpenAI and Anthropic type providers
|
||||
if (provider.type !== 'openai' && provider.type !== 'anthropic') {
|
||||
logger.debug(
|
||||
`Provider type '${provider.type}' not supported, only 'openai' and 'anthropic' types are currently supported: ${provider.id}`
|
||||
)
|
||||
logger.debug('Provider type not supported', {
|
||||
providerId: provider.id,
|
||||
providerType: provider.type
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
} catch (error: any) {
|
||||
logger.error('Error validating provider:', error)
|
||||
logger.error('Error validating provider', { error, providerId: provider?.id })
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@ -47,12 +47,12 @@ async function getMcpServerConfigById(id: string): Promise<MCPServer | undefined
|
||||
*/
|
||||
export async function getMCPServersFromRedux(): Promise<MCPServer[]> {
|
||||
try {
|
||||
logger.silly('Getting servers from Redux store')
|
||||
logger.debug('Getting servers from Redux store')
|
||||
|
||||
// Try to get from cache first (faster)
|
||||
const cachedServers = CacheService.get<MCPServer[]>(MCP_SERVERS_CACHE_KEY)
|
||||
if (cachedServers) {
|
||||
logger.silly(`Found ${cachedServers.length} servers (from cache)`)
|
||||
logger.debug('MCP servers resolved from cache', { count: cachedServers.length })
|
||||
return cachedServers
|
||||
}
|
||||
|
||||
@ -63,10 +63,10 @@ export async function getMCPServersFromRedux(): Promise<MCPServer[]> {
|
||||
// Cache the results
|
||||
CacheService.set(MCP_SERVERS_CACHE_KEY, serverList, MCP_SERVERS_CACHE_TTL)
|
||||
|
||||
logger.silly(`Fetched ${serverList.length} servers from Redux store`)
|
||||
logger.debug('Fetched servers from Redux store', { count: serverList.length })
|
||||
return serverList
|
||||
} catch (error: any) {
|
||||
logger.error('Failed to get servers from Redux:', error)
|
||||
logger.error('Failed to get servers from Redux', { error })
|
||||
return []
|
||||
}
|
||||
}
|
||||
@ -91,6 +91,6 @@ export async function getMcpServerById(id: string): Promise<Server> {
|
||||
cachedServers[id] = newServer
|
||||
return newServer
|
||||
}
|
||||
logger.silly('getMcpServer ', { server: server })
|
||||
logger.debug('Returning cached MCP server', { id, hasHandlers: Boolean(server) })
|
||||
return server
|
||||
}
|
||||
|
||||
@ -17,6 +17,13 @@ import { windowService } from './WindowService'
|
||||
|
||||
const logger = loggerService.withContext('AppUpdater')
|
||||
|
||||
// Language markers constants for multi-language release notes
|
||||
const LANG_MARKERS = {
|
||||
EN_START: '<!--LANG:en-->',
|
||||
ZH_CN_START: '<!--LANG:zh-CN-->',
|
||||
END: '<!--LANG:END-->'
|
||||
} as const
|
||||
|
||||
export default class AppUpdater {
|
||||
autoUpdater: _AppUpdater = autoUpdater
|
||||
private releaseInfo: UpdateInfo | undefined
|
||||
@ -41,7 +48,8 @@ export default class AppUpdater {
|
||||
|
||||
autoUpdater.on('update-available', (releaseInfo: UpdateInfo) => {
|
||||
logger.info('update available', releaseInfo)
|
||||
windowService.getMainWindow()?.webContents.send(IpcChannel.UpdateAvailable, releaseInfo)
|
||||
const processedReleaseInfo = this.processReleaseInfo(releaseInfo)
|
||||
windowService.getMainWindow()?.webContents.send(IpcChannel.UpdateAvailable, processedReleaseInfo)
|
||||
})
|
||||
|
||||
// 检测到不需要更新时
|
||||
@ -56,9 +64,10 @@ export default class AppUpdater {
|
||||
|
||||
// 当需要更新的内容下载完成后
|
||||
autoUpdater.on('update-downloaded', (releaseInfo: UpdateInfo) => {
|
||||
windowService.getMainWindow()?.webContents.send(IpcChannel.UpdateDownloaded, releaseInfo)
|
||||
this.releaseInfo = releaseInfo
|
||||
logger.info('update downloaded', releaseInfo)
|
||||
const processedReleaseInfo = this.processReleaseInfo(releaseInfo)
|
||||
windowService.getMainWindow()?.webContents.send(IpcChannel.UpdateDownloaded, processedReleaseInfo)
|
||||
this.releaseInfo = processedReleaseInfo
|
||||
logger.info('update downloaded', processedReleaseInfo)
|
||||
})
|
||||
|
||||
if (isWin) {
|
||||
@ -271,16 +280,99 @@ export default class AppUpdater {
|
||||
})
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if release notes contain multi-language markers
|
||||
*/
|
||||
private hasMultiLanguageMarkers(releaseNotes: string): boolean {
|
||||
return releaseNotes.includes(LANG_MARKERS.EN_START)
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse multi-language release notes and return the appropriate language version
|
||||
* @param releaseNotes - Release notes string with language markers
|
||||
* @returns Parsed release notes for the user's language
|
||||
*
|
||||
* Expected format:
|
||||
* <!--LANG:en-->English content<!--LANG:zh-CN-->Chinese content<!--LANG:END-->
|
||||
*/
|
||||
private parseMultiLangReleaseNotes(releaseNotes: string): string {
|
||||
try {
|
||||
const language = configManager.getLanguage()
|
||||
const isChineseUser = language === 'zh-CN' || language === 'zh-TW'
|
||||
|
||||
// Create regex patterns using constants
|
||||
const enPattern = new RegExp(
|
||||
`${LANG_MARKERS.EN_START.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}([\\s\\S]*?)${LANG_MARKERS.ZH_CN_START.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}`
|
||||
)
|
||||
const zhPattern = new RegExp(
|
||||
`${LANG_MARKERS.ZH_CN_START.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}([\\s\\S]*?)${LANG_MARKERS.END.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')}`
|
||||
)
|
||||
|
||||
// Extract language sections
|
||||
const enMatch = releaseNotes.match(enPattern)
|
||||
const zhMatch = releaseNotes.match(zhPattern)
|
||||
|
||||
// Return appropriate language version with proper fallback
|
||||
if (isChineseUser && zhMatch) {
|
||||
return zhMatch[1].trim()
|
||||
} else if (enMatch) {
|
||||
return enMatch[1].trim()
|
||||
} else {
|
||||
// Clean fallback: remove all language markers
|
||||
logger.warn('Failed to extract language-specific release notes, using cleaned fallback')
|
||||
return releaseNotes
|
||||
.replace(new RegExp(`${LANG_MARKERS.EN_START}|${LANG_MARKERS.ZH_CN_START}|${LANG_MARKERS.END}`, 'g'), '')
|
||||
.trim()
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to parse multi-language release notes', error as Error)
|
||||
// Return original notes as safe fallback
|
||||
return releaseNotes
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Process release info to handle multi-language release notes
|
||||
* @param releaseInfo - Original release info from updater
|
||||
* @returns Processed release info with localized release notes
|
||||
*/
|
||||
private processReleaseInfo(releaseInfo: UpdateInfo): UpdateInfo {
|
||||
const processedInfo = { ...releaseInfo }
|
||||
|
||||
// Handle multi-language release notes in string format
|
||||
if (releaseInfo.releaseNotes && typeof releaseInfo.releaseNotes === 'string') {
|
||||
// Check if it contains multi-language markers
|
||||
if (this.hasMultiLanguageMarkers(releaseInfo.releaseNotes)) {
|
||||
processedInfo.releaseNotes = this.parseMultiLangReleaseNotes(releaseInfo.releaseNotes)
|
||||
}
|
||||
}
|
||||
|
||||
return processedInfo
|
||||
}
|
||||
|
||||
/**
|
||||
* Format release notes for display
|
||||
* @param releaseNotes - Release notes in various formats
|
||||
* @returns Formatted string for display
|
||||
*/
|
||||
private formatReleaseNotes(releaseNotes: string | ReleaseNoteInfo[] | null | undefined): string {
|
||||
if (!releaseNotes) {
|
||||
return ''
|
||||
}
|
||||
|
||||
if (typeof releaseNotes === 'string') {
|
||||
// Check if it contains multi-language markers
|
||||
if (this.hasMultiLanguageMarkers(releaseNotes)) {
|
||||
return this.parseMultiLangReleaseNotes(releaseNotes)
|
||||
}
|
||||
return releaseNotes
|
||||
}
|
||||
|
||||
return releaseNotes.map((note) => note.note).join('\n')
|
||||
if (Array.isArray(releaseNotes)) {
|
||||
return releaseNotes.map((note) => note.note).join('\n')
|
||||
}
|
||||
|
||||
return ''
|
||||
}
|
||||
}
|
||||
interface GithubReleaseInfo {
|
||||
|
||||
@ -666,7 +666,7 @@ class CodeToolsService {
|
||||
const command = envPrefix ? `${envPrefix} && ${baseCommand}` : baseCommand
|
||||
|
||||
// Combine directory change with the main command to ensure they execute in the same shell session
|
||||
const fullCommand = `cd '${directory.replace(/'/g, "\\'")}' && clear && ${command}`
|
||||
const fullCommand = `cd "${directory.replace(/"/g, '\\"')}" && clear && ${command}`
|
||||
|
||||
const terminalConfig = await this.getTerminalConfig(options.terminal)
|
||||
logger.info(`Using terminal: ${terminalConfig.name} (${terminalConfig.id})`)
|
||||
|
||||
319
src/main/services/__tests__/AppUpdater.test.ts
Normal file
319
src/main/services/__tests__/AppUpdater.test.ts
Normal file
@ -0,0 +1,319 @@
|
||||
import { UpdateInfo } from 'builder-util-runtime'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('@logger', () => ({
|
||||
loggerService: {
|
||||
withContext: () => ({
|
||||
info: vi.fn(),
|
||||
error: vi.fn(),
|
||||
warn: vi.fn()
|
||||
})
|
||||
}
|
||||
}))
|
||||
|
||||
vi.mock('../ConfigManager', () => ({
|
||||
configManager: {
|
||||
getLanguage: vi.fn(),
|
||||
getAutoUpdate: vi.fn(() => false),
|
||||
getTestPlan: vi.fn(() => false),
|
||||
getTestChannel: vi.fn(),
|
||||
getClientId: vi.fn(() => 'test-client-id')
|
||||
}
|
||||
}))
|
||||
|
||||
vi.mock('../WindowService', () => ({
|
||||
windowService: {
|
||||
getMainWindow: vi.fn()
|
||||
}
|
||||
}))
|
||||
|
||||
vi.mock('@main/constant', () => ({
|
||||
isWin: false
|
||||
}))
|
||||
|
||||
vi.mock('@main/utils/ipService', () => ({
|
||||
getIpCountry: vi.fn(() => 'US')
|
||||
}))
|
||||
|
||||
vi.mock('@main/utils/locales', () => ({
|
||||
locales: {
|
||||
en: { translation: { update: {} } },
|
||||
'zh-CN': { translation: { update: {} } }
|
||||
}
|
||||
}))
|
||||
|
||||
vi.mock('@main/utils/systemInfo', () => ({
|
||||
generateUserAgent: vi.fn(() => 'test-user-agent')
|
||||
}))
|
||||
|
||||
vi.mock('electron', () => ({
|
||||
app: {
|
||||
isPackaged: true,
|
||||
getVersion: vi.fn(() => '1.0.0'),
|
||||
getPath: vi.fn(() => '/test/path')
|
||||
},
|
||||
dialog: {
|
||||
showMessageBox: vi.fn()
|
||||
},
|
||||
BrowserWindow: vi.fn(),
|
||||
net: {
|
||||
fetch: vi.fn()
|
||||
}
|
||||
}))
|
||||
|
||||
vi.mock('electron-updater', () => ({
|
||||
autoUpdater: {
|
||||
logger: null,
|
||||
forceDevUpdateConfig: false,
|
||||
autoDownload: false,
|
||||
autoInstallOnAppQuit: false,
|
||||
requestHeaders: {},
|
||||
on: vi.fn(),
|
||||
setFeedURL: vi.fn(),
|
||||
checkForUpdates: vi.fn(),
|
||||
downloadUpdate: vi.fn(),
|
||||
quitAndInstall: vi.fn(),
|
||||
channel: '',
|
||||
allowDowngrade: false,
|
||||
disableDifferentialDownload: false,
|
||||
currentVersion: '1.0.0'
|
||||
},
|
||||
Logger: vi.fn(),
|
||||
NsisUpdater: vi.fn(),
|
||||
AppUpdater: vi.fn()
|
||||
}))
|
||||
|
||||
// Import after mocks
|
||||
import AppUpdater from '../AppUpdater'
|
||||
import { configManager } from '../ConfigManager'
|
||||
|
||||
describe('AppUpdater', () => {
|
||||
let appUpdater: AppUpdater
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
appUpdater = new AppUpdater()
|
||||
})
|
||||
|
||||
describe('parseMultiLangReleaseNotes', () => {
|
||||
const sampleReleaseNotes = `<!--LANG:en-->
|
||||
🚀 New Features:
|
||||
- Feature A
|
||||
- Feature B
|
||||
|
||||
🎨 UI Improvements:
|
||||
- Improvement A
|
||||
<!--LANG:zh-CN-->
|
||||
🚀 新功能:
|
||||
- 功能 A
|
||||
- 功能 B
|
||||
|
||||
🎨 界面改进:
|
||||
- 改进 A
|
||||
<!--LANG:END-->`
|
||||
|
||||
it('should return Chinese notes for zh-CN users', () => {
|
||||
vi.mocked(configManager.getLanguage).mockReturnValue('zh-CN')
|
||||
|
||||
const result = (appUpdater as any).parseMultiLangReleaseNotes(sampleReleaseNotes)
|
||||
|
||||
expect(result).toContain('新功能')
|
||||
expect(result).toContain('功能 A')
|
||||
expect(result).not.toContain('New Features')
|
||||
})
|
||||
|
||||
it('should return Chinese notes for zh-TW users', () => {
|
||||
vi.mocked(configManager.getLanguage).mockReturnValue('zh-TW')
|
||||
|
||||
const result = (appUpdater as any).parseMultiLangReleaseNotes(sampleReleaseNotes)
|
||||
|
||||
expect(result).toContain('新功能')
|
||||
expect(result).toContain('功能 A')
|
||||
expect(result).not.toContain('New Features')
|
||||
})
|
||||
|
||||
it('should return English notes for non-Chinese users', () => {
|
||||
vi.mocked(configManager.getLanguage).mockReturnValue('en-US')
|
||||
|
||||
const result = (appUpdater as any).parseMultiLangReleaseNotes(sampleReleaseNotes)
|
||||
|
||||
expect(result).toContain('New Features')
|
||||
expect(result).toContain('Feature A')
|
||||
expect(result).not.toContain('新功能')
|
||||
})
|
||||
|
||||
it('should return English notes for other language users', () => {
|
||||
vi.mocked(configManager.getLanguage).mockReturnValue('ru-RU')
|
||||
|
||||
const result = (appUpdater as any).parseMultiLangReleaseNotes(sampleReleaseNotes)
|
||||
|
||||
expect(result).toContain('New Features')
|
||||
expect(result).not.toContain('新功能')
|
||||
})
|
||||
|
||||
it('should handle missing language sections gracefully', () => {
|
||||
const malformedNotes = 'Simple release notes without markers'
|
||||
|
||||
const result = (appUpdater as any).parseMultiLangReleaseNotes(malformedNotes)
|
||||
|
||||
expect(result).toBe('Simple release notes without markers')
|
||||
})
|
||||
|
||||
it('should handle malformed markers', () => {
|
||||
const malformedNotes = `<!--LANG:en-->English only`
|
||||
vi.mocked(configManager.getLanguage).mockReturnValue('zh-CN')
|
||||
|
||||
const result = (appUpdater as any).parseMultiLangReleaseNotes(malformedNotes)
|
||||
|
||||
// Should clean up markers and return cleaned content
|
||||
expect(result).toContain('English only')
|
||||
expect(result).not.toContain('<!--LANG:')
|
||||
})
|
||||
|
||||
it('should handle empty release notes', () => {
|
||||
const result = (appUpdater as any).parseMultiLangReleaseNotes('')
|
||||
|
||||
expect(result).toBe('')
|
||||
})
|
||||
|
||||
it('should handle errors gracefully', () => {
|
||||
// Force an error by mocking configManager to throw
|
||||
vi.mocked(configManager.getLanguage).mockImplementation(() => {
|
||||
throw new Error('Test error')
|
||||
})
|
||||
|
||||
const result = (appUpdater as any).parseMultiLangReleaseNotes(sampleReleaseNotes)
|
||||
|
||||
// Should return original notes as fallback
|
||||
expect(result).toBe(sampleReleaseNotes)
|
||||
})
|
||||
})
|
||||
|
||||
describe('hasMultiLanguageMarkers', () => {
|
||||
it('should return true when markers are present', () => {
|
||||
const notes = '<!--LANG:en-->Test'
|
||||
|
||||
const result = (appUpdater as any).hasMultiLanguageMarkers(notes)
|
||||
|
||||
expect(result).toBe(true)
|
||||
})
|
||||
|
||||
it('should return false when no markers are present', () => {
|
||||
const notes = 'Simple text without markers'
|
||||
|
||||
const result = (appUpdater as any).hasMultiLanguageMarkers(notes)
|
||||
|
||||
expect(result).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('processReleaseInfo', () => {
|
||||
it('should process multi-language release notes in string format', () => {
|
||||
vi.mocked(configManager.getLanguage).mockReturnValue('zh-CN')
|
||||
|
||||
const releaseInfo = {
|
||||
version: '1.0.0',
|
||||
files: [],
|
||||
path: '',
|
||||
sha512: '',
|
||||
releaseDate: new Date().toISOString(),
|
||||
releaseNotes: `<!--LANG:en-->English notes<!--LANG:zh-CN-->中文说明<!--LANG:END-->`
|
||||
} as UpdateInfo
|
||||
|
||||
const result = (appUpdater as any).processReleaseInfo(releaseInfo)
|
||||
|
||||
expect(result.releaseNotes).toBe('中文说明')
|
||||
})
|
||||
|
||||
it('should not process release notes without markers', () => {
|
||||
const releaseInfo = {
|
||||
version: '1.0.0',
|
||||
files: [],
|
||||
path: '',
|
||||
sha512: '',
|
||||
releaseDate: new Date().toISOString(),
|
||||
releaseNotes: 'Simple release notes'
|
||||
} as UpdateInfo
|
||||
|
||||
const result = (appUpdater as any).processReleaseInfo(releaseInfo)
|
||||
|
||||
expect(result.releaseNotes).toBe('Simple release notes')
|
||||
})
|
||||
|
||||
it('should handle array format release notes', () => {
|
||||
const releaseInfo = {
|
||||
version: '1.0.0',
|
||||
files: [],
|
||||
path: '',
|
||||
sha512: '',
|
||||
releaseDate: new Date().toISOString(),
|
||||
releaseNotes: [
|
||||
{ version: '1.0.0', note: 'Note 1' },
|
||||
{ version: '1.0.1', note: 'Note 2' }
|
||||
]
|
||||
} as UpdateInfo
|
||||
|
||||
const result = (appUpdater as any).processReleaseInfo(releaseInfo)
|
||||
|
||||
expect(result.releaseNotes).toEqual(releaseInfo.releaseNotes)
|
||||
})
|
||||
|
||||
it('should handle null release notes', () => {
|
||||
const releaseInfo = {
|
||||
version: '1.0.0',
|
||||
files: [],
|
||||
path: '',
|
||||
sha512: '',
|
||||
releaseDate: new Date().toISOString(),
|
||||
releaseNotes: null
|
||||
} as UpdateInfo
|
||||
|
||||
const result = (appUpdater as any).processReleaseInfo(releaseInfo)
|
||||
|
||||
expect(result.releaseNotes).toBeNull()
|
||||
})
|
||||
})
|
||||
|
||||
describe('formatReleaseNotes', () => {
|
||||
it('should format string release notes with markers', () => {
|
||||
vi.mocked(configManager.getLanguage).mockReturnValue('en-US')
|
||||
const notes = `<!--LANG:en-->English<!--LANG:zh-CN-->中文<!--LANG:END-->`
|
||||
|
||||
const result = (appUpdater as any).formatReleaseNotes(notes)
|
||||
|
||||
expect(result).toBe('English')
|
||||
})
|
||||
|
||||
it('should format string release notes without markers', () => {
|
||||
const notes = 'Simple notes'
|
||||
|
||||
const result = (appUpdater as any).formatReleaseNotes(notes)
|
||||
|
||||
expect(result).toBe('Simple notes')
|
||||
})
|
||||
|
||||
it('should format array release notes', () => {
|
||||
const notes = [
|
||||
{ version: '1.0.0', note: 'Note 1' },
|
||||
{ version: '1.0.1', note: 'Note 2' }
|
||||
]
|
||||
|
||||
const result = (appUpdater as any).formatReleaseNotes(notes)
|
||||
|
||||
expect(result).toBe('Note 1\nNote 2')
|
||||
})
|
||||
|
||||
it('should handle null release notes', () => {
|
||||
const result = (appUpdater as any).formatReleaseNotes(null)
|
||||
|
||||
expect(result).toBe('')
|
||||
})
|
||||
|
||||
it('should handle undefined release notes', () => {
|
||||
const result = (appUpdater as any).formatReleaseNotes(undefined)
|
||||
|
||||
expect(result).toBe('')
|
||||
})
|
||||
})
|
||||
})
|
||||
@ -0,0 +1,290 @@
|
||||
import type { SDKMessage } from '@anthropic-ai/claude-code'
|
||||
import { describe, expect, it } from 'vitest'
|
||||
|
||||
import { ClaudeStreamState, transformSDKMessageToStreamParts } from '../transform'
|
||||
|
||||
const baseStreamMetadata = {
|
||||
parent_tool_use_id: null,
|
||||
session_id: 'session-123'
|
||||
}
|
||||
|
||||
const uuid = (n: number) => `00000000-0000-0000-0000-${n.toString().padStart(12, '0')}`
|
||||
|
||||
describe('Claude → AiSDK transform', () => {
|
||||
it('handles tool call streaming lifecycle', () => {
|
||||
const state = new ClaudeStreamState()
|
||||
const parts: ReturnType<typeof transformSDKMessageToStreamParts>[number][] = []
|
||||
|
||||
const messages: SDKMessage[] = [
|
||||
{
|
||||
...baseStreamMetadata,
|
||||
type: 'stream_event',
|
||||
uuid: uuid(1),
|
||||
event: {
|
||||
type: 'message_start',
|
||||
message: {
|
||||
id: 'msg-start',
|
||||
type: 'message',
|
||||
role: 'assistant',
|
||||
model: 'claude-test',
|
||||
content: [],
|
||||
stop_reason: null,
|
||||
stop_sequence: null,
|
||||
usage: {}
|
||||
}
|
||||
}
|
||||
} as unknown as SDKMessage,
|
||||
{
|
||||
...baseStreamMetadata,
|
||||
type: 'stream_event',
|
||||
uuid: uuid(2),
|
||||
event: {
|
||||
type: 'content_block_start',
|
||||
index: 0,
|
||||
content_block: {
|
||||
type: 'tool_use',
|
||||
id: 'tool-1',
|
||||
name: 'Bash',
|
||||
input: {}
|
||||
}
|
||||
}
|
||||
} as unknown as SDKMessage,
|
||||
{
|
||||
...baseStreamMetadata,
|
||||
type: 'stream_event',
|
||||
uuid: uuid(3),
|
||||
event: {
|
||||
type: 'content_block_delta',
|
||||
index: 0,
|
||||
delta: {
|
||||
type: 'input_json_delta',
|
||||
partial_json: '{"command":"ls"}'
|
||||
}
|
||||
}
|
||||
} as unknown as SDKMessage,
|
||||
{
|
||||
...baseStreamMetadata,
|
||||
type: 'assistant',
|
||||
uuid: uuid(4),
|
||||
message: {
|
||||
id: 'msg-tool',
|
||||
type: 'message',
|
||||
role: 'assistant',
|
||||
model: 'claude-test',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_use',
|
||||
id: 'tool-1',
|
||||
name: 'Bash',
|
||||
input: {
|
||||
command: 'ls'
|
||||
}
|
||||
}
|
||||
],
|
||||
stop_reason: 'tool_use',
|
||||
stop_sequence: null,
|
||||
usage: {
|
||||
input_tokens: 1,
|
||||
output_tokens: 0
|
||||
}
|
||||
}
|
||||
} as unknown as SDKMessage,
|
||||
{
|
||||
...baseStreamMetadata,
|
||||
type: 'stream_event',
|
||||
uuid: uuid(5),
|
||||
event: {
|
||||
type: 'content_block_stop',
|
||||
index: 0
|
||||
}
|
||||
} as unknown as SDKMessage,
|
||||
{
|
||||
...baseStreamMetadata,
|
||||
type: 'stream_event',
|
||||
uuid: uuid(6),
|
||||
event: {
|
||||
type: 'message_delta',
|
||||
delta: {
|
||||
stop_reason: 'tool_use',
|
||||
stop_sequence: null
|
||||
},
|
||||
usage: {
|
||||
input_tokens: 1,
|
||||
output_tokens: 5
|
||||
}
|
||||
}
|
||||
} as unknown as SDKMessage,
|
||||
{
|
||||
...baseStreamMetadata,
|
||||
type: 'stream_event',
|
||||
uuid: uuid(7),
|
||||
event: {
|
||||
type: 'message_stop'
|
||||
}
|
||||
} as unknown as SDKMessage,
|
||||
{
|
||||
...baseStreamMetadata,
|
||||
type: 'user',
|
||||
uuid: uuid(8),
|
||||
message: {
|
||||
role: 'user',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_result',
|
||||
tool_use_id: 'tool-1',
|
||||
content: 'ok',
|
||||
is_error: false
|
||||
}
|
||||
]
|
||||
}
|
||||
} as SDKMessage
|
||||
]
|
||||
|
||||
for (const message of messages) {
|
||||
const transformed = transformSDKMessageToStreamParts(message, state)
|
||||
for (const part of transformed) {
|
||||
parts.push(part)
|
||||
}
|
||||
}
|
||||
|
||||
const types = parts.map((part) => part.type)
|
||||
expect(types).toEqual([
|
||||
'start-step',
|
||||
'tool-input-start',
|
||||
'tool-input-delta',
|
||||
'tool-call',
|
||||
'tool-input-end',
|
||||
'finish-step',
|
||||
'tool-result'
|
||||
])
|
||||
|
||||
const finishStep = parts.find((part) => part.type === 'finish-step') as Extract<
|
||||
(typeof parts)[number],
|
||||
{ type: 'finish-step' }
|
||||
>
|
||||
expect(finishStep.finishReason).toBe('tool-calls')
|
||||
expect(finishStep.usage).toEqual({ inputTokens: 1, outputTokens: 5, totalTokens: 6 })
|
||||
|
||||
const toolResult = parts.find((part) => part.type === 'tool-result') as Extract<
|
||||
(typeof parts)[number],
|
||||
{ type: 'tool-result' }
|
||||
>
|
||||
expect(toolResult.toolCallId).toBe('tool-1')
|
||||
expect(toolResult.toolName).toBe('Bash')
|
||||
expect(toolResult.input).toEqual({ command: 'ls' })
|
||||
expect(toolResult.output).toBe('ok')
|
||||
})
|
||||
|
||||
it('handles streaming text completion', () => {
|
||||
const state = new ClaudeStreamState()
|
||||
const parts: ReturnType<typeof transformSDKMessageToStreamParts>[number][] = []
|
||||
|
||||
const messages: SDKMessage[] = [
|
||||
{
|
||||
...baseStreamMetadata,
|
||||
type: 'stream_event',
|
||||
uuid: uuid(9),
|
||||
event: {
|
||||
type: 'message_start',
|
||||
message: {
|
||||
id: 'msg-text',
|
||||
type: 'message',
|
||||
role: 'assistant',
|
||||
model: 'claude-text',
|
||||
content: [],
|
||||
stop_reason: null,
|
||||
stop_sequence: null,
|
||||
usage: {}
|
||||
}
|
||||
}
|
||||
} as unknown as SDKMessage,
|
||||
{
|
||||
...baseStreamMetadata,
|
||||
type: 'stream_event',
|
||||
uuid: uuid(10),
|
||||
event: {
|
||||
type: 'content_block_start',
|
||||
index: 0,
|
||||
content_block: {
|
||||
type: 'text',
|
||||
text: ''
|
||||
}
|
||||
}
|
||||
} as unknown as SDKMessage,
|
||||
{
|
||||
...baseStreamMetadata,
|
||||
type: 'stream_event',
|
||||
uuid: uuid(11),
|
||||
event: {
|
||||
type: 'content_block_delta',
|
||||
index: 0,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: 'Hello'
|
||||
}
|
||||
}
|
||||
} as unknown as SDKMessage,
|
||||
{
|
||||
...baseStreamMetadata,
|
||||
type: 'stream_event',
|
||||
uuid: uuid(12),
|
||||
event: {
|
||||
type: 'content_block_delta',
|
||||
index: 0,
|
||||
delta: {
|
||||
type: 'text_delta',
|
||||
text: ' world'
|
||||
}
|
||||
}
|
||||
} as unknown as SDKMessage,
|
||||
{
|
||||
...baseStreamMetadata,
|
||||
type: 'stream_event',
|
||||
uuid: uuid(13),
|
||||
event: {
|
||||
type: 'content_block_stop',
|
||||
index: 0
|
||||
}
|
||||
} as unknown as SDKMessage,
|
||||
{
|
||||
...baseStreamMetadata,
|
||||
type: 'stream_event',
|
||||
uuid: uuid(14),
|
||||
event: {
|
||||
type: 'message_delta',
|
||||
delta: {
|
||||
stop_reason: 'end_turn',
|
||||
stop_sequence: null
|
||||
},
|
||||
usage: {
|
||||
input_tokens: 2,
|
||||
output_tokens: 4
|
||||
}
|
||||
}
|
||||
} as unknown as SDKMessage,
|
||||
{
|
||||
...baseStreamMetadata,
|
||||
type: 'stream_event',
|
||||
uuid: uuid(15),
|
||||
event: {
|
||||
type: 'message_stop'
|
||||
}
|
||||
} as SDKMessage
|
||||
]
|
||||
|
||||
for (const message of messages) {
|
||||
const transformed = transformSDKMessageToStreamParts(message, state)
|
||||
parts.push(...transformed)
|
||||
}
|
||||
|
||||
const types = parts.map((part) => part.type)
|
||||
expect(types).toEqual(['start-step', 'text-start', 'text-delta', 'text-delta', 'text-end', 'finish-step'])
|
||||
|
||||
const finishStep = parts.find((part) => part.type === 'finish-step') as Extract<
|
||||
(typeof parts)[number],
|
||||
{ type: 'finish-step' }
|
||||
>
|
||||
expect(finishStep.finishReason).toBe('stop')
|
||||
expect(finishStep.usage).toEqual({ inputTokens: 2, outputTokens: 4, totalTokens: 6 })
|
||||
})
|
||||
})
|
||||
@ -0,0 +1,241 @@
|
||||
/**
|
||||
* Lightweight state container shared by the Claude → AiSDK transformer. Anthropic does not send
|
||||
* deterministic identifiers for intermediate content blocks, so we stitch one together by tracking
|
||||
* block indices and associated AiSDK ids. This class also keeps:
|
||||
* • incremental text / reasoning buffers so we can emit only deltas while retaining the full
|
||||
* aggregate for later tool-call emission;
|
||||
* • a reverse lookup for tool calls so `tool_result` snapshots can recover their metadata;
|
||||
* • pending usage + finish reason from `message_delta` events until the corresponding
|
||||
* `message_stop` arrives.
|
||||
* Every Claude turn gets its own instance. `resetStep` should be invoked once the finish event has
|
||||
* been emitted to avoid leaking state into the next turn.
|
||||
*/
|
||||
import type { FinishReason, LanguageModelUsage, ProviderMetadata } from 'ai'
|
||||
|
||||
/**
|
||||
* Shared fields for every block that Claude can stream (text, reasoning, tool).
|
||||
*/
|
||||
type BaseBlockState = {
|
||||
id: string
|
||||
index: number
|
||||
}
|
||||
|
||||
type TextBlockState = BaseBlockState & {
|
||||
kind: 'text'
|
||||
text: string
|
||||
}
|
||||
|
||||
type ReasoningBlockState = BaseBlockState & {
|
||||
kind: 'reasoning'
|
||||
text: string
|
||||
redacted: boolean
|
||||
}
|
||||
|
||||
type ToolBlockState = BaseBlockState & {
|
||||
kind: 'tool'
|
||||
toolCallId: string
|
||||
toolName: string
|
||||
inputBuffer: string
|
||||
providerMetadata?: ProviderMetadata
|
||||
resolvedInput?: unknown
|
||||
}
|
||||
|
||||
export type BlockState = TextBlockState | ReasoningBlockState | ToolBlockState
|
||||
|
||||
type PendingUsageState = {
|
||||
usage?: LanguageModelUsage
|
||||
finishReason?: FinishReason
|
||||
}
|
||||
|
||||
type PendingToolCall = {
|
||||
toolCallId: string
|
||||
toolName: string
|
||||
input: unknown
|
||||
providerMetadata?: ProviderMetadata
|
||||
}
|
||||
|
||||
/**
|
||||
* Tracks the lifecycle of Claude streaming blocks (text, thinking, tool calls)
|
||||
* across individual websocket events. The transformer relies on this class to
|
||||
* stitch together deltas, manage pending tool inputs/results, and propagate
|
||||
* usage/finish metadata once Anthropic closes a message.
|
||||
*/
|
||||
export class ClaudeStreamState {
|
||||
private blocksByIndex = new Map<number, BlockState>()
|
||||
private toolIndexById = new Map<string, number>()
|
||||
private pendingUsage: PendingUsageState = {}
|
||||
private pendingToolCalls = new Map<string, PendingToolCall>()
|
||||
private stepActive = false
|
||||
|
||||
/** Marks the beginning of a new AiSDK step. */
|
||||
beginStep(): void {
|
||||
this.stepActive = true
|
||||
}
|
||||
|
||||
hasActiveStep(): boolean {
|
||||
return this.stepActive
|
||||
}
|
||||
|
||||
/** Creates a text block placeholder so future deltas can accumulate into it. */
|
||||
openTextBlock(index: number, id: string): TextBlockState {
|
||||
const block: TextBlockState = {
|
||||
kind: 'text',
|
||||
id,
|
||||
index,
|
||||
text: ''
|
||||
}
|
||||
this.blocksByIndex.set(index, block)
|
||||
return block
|
||||
}
|
||||
|
||||
/** Starts tracking an Anthropic "thinking" block, optionally flagged as redacted. */
|
||||
openReasoningBlock(index: number, id: string, redacted: boolean): ReasoningBlockState {
|
||||
const block: ReasoningBlockState = {
|
||||
kind: 'reasoning',
|
||||
id,
|
||||
index,
|
||||
redacted,
|
||||
text: ''
|
||||
}
|
||||
this.blocksByIndex.set(index, block)
|
||||
return block
|
||||
}
|
||||
|
||||
/** Caches tool metadata so subsequent input deltas and results can find it. */
|
||||
openToolBlock(
|
||||
index: number,
|
||||
params: { toolCallId: string; toolName: string; providerMetadata?: ProviderMetadata }
|
||||
): ToolBlockState {
|
||||
const block: ToolBlockState = {
|
||||
kind: 'tool',
|
||||
id: params.toolCallId,
|
||||
index,
|
||||
toolCallId: params.toolCallId,
|
||||
toolName: params.toolName,
|
||||
inputBuffer: '',
|
||||
providerMetadata: params.providerMetadata
|
||||
}
|
||||
this.blocksByIndex.set(index, block)
|
||||
this.toolIndexById.set(params.toolCallId, index)
|
||||
return block
|
||||
}
|
||||
|
||||
getBlock(index: number): BlockState | undefined {
|
||||
return this.blocksByIndex.get(index)
|
||||
}
|
||||
|
||||
getToolBlockById(toolCallId: string): ToolBlockState | undefined {
|
||||
const index = this.toolIndexById.get(toolCallId)
|
||||
if (index === undefined) return undefined
|
||||
const block = this.blocksByIndex.get(index)
|
||||
if (!block || block.kind !== 'tool') return undefined
|
||||
return block
|
||||
}
|
||||
|
||||
/** Appends streamed text to a text block, returning the updated state when present. */
|
||||
appendTextDelta(index: number, text: string): TextBlockState | undefined {
|
||||
const block = this.blocksByIndex.get(index)
|
||||
if (!block || block.kind !== 'text') return undefined
|
||||
block.text += text
|
||||
return block
|
||||
}
|
||||
|
||||
/** Appends streamed "thinking" content to the tracked reasoning block. */
|
||||
appendReasoningDelta(index: number, text: string): ReasoningBlockState | undefined {
|
||||
const block = this.blocksByIndex.get(index)
|
||||
if (!block || block.kind !== 'reasoning') return undefined
|
||||
block.text += text
|
||||
return block
|
||||
}
|
||||
|
||||
/** Concatenates incremental JSON payloads for tool input blocks. */
|
||||
appendToolInputDelta(index: number, jsonDelta: string): ToolBlockState | undefined {
|
||||
const block = this.blocksByIndex.get(index)
|
||||
if (!block || block.kind !== 'tool') return undefined
|
||||
block.inputBuffer += jsonDelta
|
||||
return block
|
||||
}
|
||||
|
||||
/** Records a tool call to be consumed once its result arrives from the user. */
|
||||
registerToolCall(
|
||||
toolCallId: string,
|
||||
payload: { toolName: string; input: unknown; providerMetadata?: ProviderMetadata }
|
||||
): void {
|
||||
this.pendingToolCalls.set(toolCallId, {
|
||||
toolCallId,
|
||||
toolName: payload.toolName,
|
||||
input: payload.input,
|
||||
providerMetadata: payload.providerMetadata
|
||||
})
|
||||
}
|
||||
|
||||
/** Retrieves and clears the buffered tool call metadata for the given id. */
|
||||
consumePendingToolCall(toolCallId: string): PendingToolCall | undefined {
|
||||
const entry = this.pendingToolCalls.get(toolCallId)
|
||||
if (entry) {
|
||||
this.pendingToolCalls.delete(toolCallId)
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
/**
|
||||
* Persists the final input payload for a tool block once the provider signals
|
||||
* completion so that downstream tool results can reference the original call.
|
||||
*/
|
||||
completeToolBlock(toolCallId: string, input: unknown, providerMetadata?: ProviderMetadata): void {
|
||||
this.registerToolCall(toolCallId, {
|
||||
toolName: this.getToolBlockById(toolCallId)?.toolName ?? 'unknown',
|
||||
input,
|
||||
providerMetadata
|
||||
})
|
||||
const block = this.getToolBlockById(toolCallId)
|
||||
if (block) {
|
||||
block.resolvedInput = input
|
||||
}
|
||||
}
|
||||
|
||||
/** Removes a block from the active index map when Claude signals it is done. */
|
||||
closeBlock(index: number): BlockState | undefined {
|
||||
const block = this.blocksByIndex.get(index)
|
||||
if (!block) return undefined
|
||||
this.blocksByIndex.delete(index)
|
||||
if (block.kind === 'tool') {
|
||||
this.toolIndexById.delete(block.toolCallId)
|
||||
}
|
||||
return block
|
||||
}
|
||||
|
||||
/** Stores interim usage metrics so they can be emitted with the `finish-step`. */
|
||||
setPendingUsage(usage?: LanguageModelUsage, finishReason?: FinishReason): void {
|
||||
if (usage) {
|
||||
this.pendingUsage.usage = usage
|
||||
}
|
||||
if (finishReason) {
|
||||
this.pendingUsage.finishReason = finishReason
|
||||
}
|
||||
}
|
||||
|
||||
getPendingUsage(): PendingUsageState {
|
||||
return { ...this.pendingUsage }
|
||||
}
|
||||
|
||||
/** Clears any accumulated usage values for the next streamed message. */
|
||||
resetPendingUsage(): void {
|
||||
this.pendingUsage = {}
|
||||
}
|
||||
|
||||
/** Drops cached block metadata for the currently active message. */
|
||||
resetBlocks(): void {
|
||||
this.blocksByIndex.clear()
|
||||
this.toolIndexById.clear()
|
||||
}
|
||||
|
||||
/** Resets the entire step lifecycle after emitting a terminal frame. */
|
||||
resetStep(): void {
|
||||
this.resetBlocks()
|
||||
this.resetPendingUsage()
|
||||
this.stepActive = false
|
||||
}
|
||||
}
|
||||
|
||||
export type { PendingToolCall }
|
||||
@ -11,7 +11,7 @@ import { app } from 'electron'
|
||||
|
||||
import { GetAgentSessionResponse } from '../..'
|
||||
import { AgentServiceInterface, AgentStream, AgentStreamEvent } from '../../interfaces/AgentStreamInterface'
|
||||
import { transformSDKMessageToStreamParts } from './transform'
|
||||
import { ClaudeStreamState, transformSDKMessageToStreamParts } from './transform'
|
||||
|
||||
const require_ = createRequire(import.meta.url)
|
||||
const logger = loggerService.withContext('ClaudeCodeService')
|
||||
@ -92,6 +92,7 @@ class ClaudeCodeService implements AgentServiceInterface {
|
||||
errorChunks.push(chunk)
|
||||
},
|
||||
appendSystemPrompt: session.instructions,
|
||||
includePartialMessages: true,
|
||||
permissionMode: session.configuration?.permission_mode,
|
||||
maxTurns: session.configuration?.max_turns,
|
||||
allowedTools: session.allowed_tools
|
||||
@ -164,6 +165,7 @@ class ClaudeCodeService implements AgentServiceInterface {
|
||||
let hasCompleted = false
|
||||
const startTime = Date.now()
|
||||
|
||||
const streamState = new ClaudeStreamState()
|
||||
try {
|
||||
// Process streaming responses using SDK query
|
||||
for await (const message of query({
|
||||
@ -173,15 +175,21 @@ class ClaudeCodeService implements AgentServiceInterface {
|
||||
if (hasCompleted) break
|
||||
|
||||
jsonOutput.push(message)
|
||||
logger.silly('claude response', { message })
|
||||
|
||||
if (message.type === 'assistant' || message.type === 'user') {
|
||||
logger.silly('message content', {
|
||||
message: JSON.stringify({ role: message.message.role, content: message.message.content })
|
||||
logger.silly('claude response', {
|
||||
message,
|
||||
content: JSON.stringify(message.message.content)
|
||||
})
|
||||
} else if (message.type === 'stream_event') {
|
||||
logger.silly('Claude stream event', {
|
||||
message,
|
||||
event: JSON.stringify(message.event)
|
||||
})
|
||||
}
|
||||
|
||||
// Transform SDKMessage to UIMessageChunks
|
||||
const chunks = transformSDKMessageToStreamParts(message)
|
||||
const chunks = transformSDKMessageToStreamParts(message, streamState)
|
||||
for (const chunk of chunks) {
|
||||
stream.emit('data', {
|
||||
type: 'chunk',
|
||||
|
||||
@ -1,66 +1,82 @@
|
||||
// This file is used to transform claude code json response to aisdk streaming format
|
||||
/**
|
||||
* Translates Anthropic Claude Code streaming messages into the generic AiSDK stream
|
||||
* parts that the agent runtime understands. The transformer coordinates batched
|
||||
* text/tool payloads, keeps per-message state using {@link ClaudeStreamState},
|
||||
* and normalises usage metadata and finish reasons so downstream consumers do
|
||||
* not need to reason about Anthropic-specific payload shapes.
|
||||
*
|
||||
* Stream lifecycle cheatsheet (per Claude turn):
|
||||
* 1. `stream_event.message_start` → emit `start-step` and mark the state as active.
|
||||
* 2. `content_block_start` (by index) → open a stateful block; emits one of
|
||||
* `text-start` | `reasoning-start` | `tool-input-start`.
|
||||
* 3. `content_block_delta` → append incremental text / reasoning / tool JSON,
|
||||
* emitting only the delta to minimise UI churn.
|
||||
* 4. `content_block_stop` → emit the matching `*-end` event and release the block.
|
||||
* 5. `message_delta` → capture usage + stop reason but defer emission.
|
||||
* 6. `message_stop` → emit `finish-step` with cached usage & reason, then reset.
|
||||
* 7. Assistant snapshots with `tool_use` finalise the tool block (`tool-call`).
|
||||
* 8. User snapshots with `tool_result` emit `tool-result`/`tool-error` using the cached payload.
|
||||
* 9. Assistant snapshots with plain text (when no stream events were provided) fall back to
|
||||
* emitting `text-*` parts and a synthetic `finish-step`.
|
||||
*/
|
||||
|
||||
import type { LanguageModelV2Usage } from '@ai-sdk/provider'
|
||||
import { SDKMessage } from '@anthropic-ai/claude-code'
|
||||
import type { BetaStopReason } from '@anthropic-ai/sdk/resources/beta/messages/messages.mjs'
|
||||
import { loggerService } from '@logger'
|
||||
import type { ClaudeCodeRawValue } from '@shared/agents/claudecode/types'
|
||||
import type { ProviderMetadata, TextStreamPart } from 'ai'
|
||||
import type { FinishReason, LanguageModelUsage, ProviderMetadata, TextStreamPart } from 'ai'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
|
||||
import { ClaudeStreamState } from './claude-stream-state'
|
||||
import { mapClaudeCodeFinishReason } from './map-claude-code-finish-reason'
|
||||
|
||||
const logger = loggerService.withContext('ClaudeCodeTransform')
|
||||
|
||||
type AgentStreamPart = TextStreamPart<Record<string, any>>
|
||||
|
||||
type contentBlock =
|
||||
| {
|
||||
type: 'text'
|
||||
}
|
||||
| {
|
||||
type: 'tool-call'
|
||||
toolCallId: string
|
||||
toolName: string
|
||||
input: unknown
|
||||
}
|
||||
|
||||
const contentBlockState = new Map<string, contentBlock>()
|
||||
|
||||
type toolCallBlock = Extract<contentBlock, { type: 'tool-call' }>
|
||||
|
||||
// Helper function to generate unique IDs for text blocks
|
||||
const generateMessageId = (): string => `msg_${uuidv4().replace(/-/g, '')}`
|
||||
|
||||
// Main transform function
|
||||
export function transformSDKMessageToStreamParts(sdkMessage: SDKMessage): AgentStreamPart[] {
|
||||
const chunks: AgentStreamPart[] = []
|
||||
// logger.silly('Transforming SDKMessage to stream parts', sdkMessage)
|
||||
switch (sdkMessage.type) {
|
||||
case 'assistant':
|
||||
case 'user':
|
||||
chunks.push(...handleUserOrAssistantMessage(sdkMessage))
|
||||
break
|
||||
|
||||
case 'stream_event':
|
||||
chunks.push(...handleStreamEvent(sdkMessage))
|
||||
break
|
||||
|
||||
case 'system':
|
||||
chunks.push(...handleSystemMessage(sdkMessage))
|
||||
break
|
||||
|
||||
case 'result':
|
||||
chunks.push(...handleResultMessage(sdkMessage))
|
||||
break
|
||||
|
||||
default:
|
||||
logger.warn('Unknown SDKMessage type:', { type: (sdkMessage as any).type })
|
||||
break
|
||||
}
|
||||
|
||||
return chunks
|
||||
type ToolUseContent = {
|
||||
type: 'tool_use'
|
||||
id: string
|
||||
name: string
|
||||
input: unknown
|
||||
}
|
||||
|
||||
type ToolResultContent = {
|
||||
type: 'tool_result'
|
||||
tool_use_id: string
|
||||
content: unknown
|
||||
is_error?: boolean
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps Anthropic stop reasons to the AiSDK equivalents so higher level
|
||||
* consumers can treat completion states uniformly across providers.
|
||||
*/
|
||||
const finishReasonMapping: Record<BetaStopReason, FinishReason> = {
|
||||
end_turn: 'stop',
|
||||
max_tokens: 'length',
|
||||
stop_sequence: 'stop',
|
||||
tool_use: 'tool-calls',
|
||||
pause_turn: 'unknown',
|
||||
refusal: 'content-filter'
|
||||
}
|
||||
|
||||
const emptyUsage: LanguageModelUsage = {
|
||||
inputTokens: 0,
|
||||
outputTokens: 0,
|
||||
totalTokens: 0
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates deterministic-ish message identifiers that are compatible with the
|
||||
* AiSDK text stream contract. Anthropic deltas sometimes omit ids, so we create
|
||||
* our own to ensure the downstream renderer can stitch chunks together.
|
||||
*/
|
||||
const generateMessageId = (): string => `msg_${uuidv4().replace(/-/g, '')}`
|
||||
|
||||
/**
|
||||
* Extracts provider metadata from the raw Claude message so we can surface it
|
||||
* on every emitted stream part for observability and debugging purposes.
|
||||
*/
|
||||
const sdkMessageToProviderMetadata = (message: SDKMessage): ProviderMetadata => {
|
||||
return {
|
||||
anthropic: {
|
||||
@ -71,250 +87,523 @@ const sdkMessageToProviderMetadata = (message: SDKMessage): ProviderMetadata =>
|
||||
}
|
||||
}
|
||||
|
||||
function generateTextChunks(id: string, text: string, message: SDKMessage): AgentStreamPart[] {
|
||||
const providerMetadata = sdkMessageToProviderMetadata(message)
|
||||
return [
|
||||
{
|
||||
type: 'text-start',
|
||||
id
|
||||
},
|
||||
{
|
||||
type: 'text-delta',
|
||||
id,
|
||||
text
|
||||
},
|
||||
{
|
||||
type: 'text-end',
|
||||
id,
|
||||
providerMetadata: {
|
||||
...providerMetadata
|
||||
}
|
||||
}
|
||||
]
|
||||
/**
|
||||
* Central entrypoint that receives Claude Code websocket events and converts
|
||||
* them into AiSDK `TextStreamPart`s. The state machine tracks outstanding
|
||||
* blocks across calls so that incremental deltas can be correlated correctly.
|
||||
*/
|
||||
export function transformSDKMessageToStreamParts(sdkMessage: SDKMessage, state: ClaudeStreamState): AgentStreamPart[] {
|
||||
switch (sdkMessage.type) {
|
||||
case 'assistant':
|
||||
return handleAssistantMessage(sdkMessage, state)
|
||||
case 'user':
|
||||
return handleUserMessage(sdkMessage, state)
|
||||
case 'stream_event':
|
||||
return handleStreamEvent(sdkMessage, state)
|
||||
case 'system':
|
||||
return handleSystemMessage(sdkMessage)
|
||||
case 'result':
|
||||
return handleResultMessage(sdkMessage)
|
||||
default:
|
||||
logger.warn('Unknown SDKMessage type', { type: (sdkMessage as any).type })
|
||||
return []
|
||||
}
|
||||
}
|
||||
|
||||
function handleUserOrAssistantMessage(message: Extract<SDKMessage, { type: 'assistant' | 'user' }>): AgentStreamPart[] {
|
||||
/**
|
||||
* Handles aggregated assistant messages that arrive outside of the streaming
|
||||
* protocol (e.g. after a tool call finishes). We emit the appropriate
|
||||
* text/tool events and close the active step once the payload is fully
|
||||
* processed.
|
||||
*/
|
||||
function handleAssistantMessage(
|
||||
message: Extract<SDKMessage, { type: 'assistant' }>,
|
||||
state: ClaudeStreamState
|
||||
): AgentStreamPart[] {
|
||||
const chunks: AgentStreamPart[] = []
|
||||
const messageId = message.uuid?.toString() || generateMessageId()
|
||||
const providerMetadata = sdkMessageToProviderMetadata(message)
|
||||
const content = message.message.content
|
||||
const isStreamingActive = state.hasActiveStep()
|
||||
|
||||
// handle normal text content
|
||||
if (typeof message.message.content === 'string') {
|
||||
const textContent = message.message.content
|
||||
if (textContent) {
|
||||
chunks.push(...generateTextChunks(messageId, textContent, message))
|
||||
if (typeof content === 'string') {
|
||||
if (!content) {
|
||||
return chunks
|
||||
}
|
||||
} else if (Array.isArray(message.message.content)) {
|
||||
for (const block of message.message.content) {
|
||||
switch (block.type) {
|
||||
case 'text':
|
||||
chunks.push(...generateTextChunks(messageId, block.text, message))
|
||||
break
|
||||
case 'tool_use':
|
||||
chunks.push({
|
||||
type: 'tool-call',
|
||||
toolCallId: block.id,
|
||||
toolName: block.name,
|
||||
input: block.input,
|
||||
providerExecuted: true,
|
||||
providerMetadata: sdkMessageToProviderMetadata(message)
|
||||
})
|
||||
contentBlockState.set(block.id, {
|
||||
type: 'tool-call',
|
||||
toolCallId: block.id,
|
||||
toolName: block.name,
|
||||
input: block.input
|
||||
})
|
||||
break
|
||||
case 'tool_result': {
|
||||
logger.silly('Handling tool result:', { block, content: contentBlockState })
|
||||
const hasToolCall = contentBlockState.has(block.tool_use_id)
|
||||
const toolCall = contentBlockState.get(block.tool_use_id) as toolCallBlock
|
||||
chunks.push({
|
||||
type: 'tool-result',
|
||||
toolCallId: block.tool_use_id,
|
||||
toolName: hasToolCall ? toolCall.toolName : 'Unknown',
|
||||
input: hasToolCall ? toolCall.input : '',
|
||||
output: block.content
|
||||
})
|
||||
break
|
||||
|
||||
if (!isStreamingActive) {
|
||||
state.beginStep()
|
||||
chunks.push({
|
||||
type: 'start-step',
|
||||
request: { body: '' },
|
||||
warnings: []
|
||||
})
|
||||
}
|
||||
|
||||
const textId = message.uuid?.toString() || generateMessageId()
|
||||
chunks.push({
|
||||
type: 'text-start',
|
||||
id: textId,
|
||||
providerMetadata
|
||||
})
|
||||
chunks.push({
|
||||
type: 'text-delta',
|
||||
id: textId,
|
||||
text: content,
|
||||
providerMetadata
|
||||
})
|
||||
chunks.push({
|
||||
type: 'text-end',
|
||||
id: textId,
|
||||
providerMetadata
|
||||
})
|
||||
return finalizeNonStreamingStep(message, state, chunks)
|
||||
}
|
||||
|
||||
if (!Array.isArray(content)) {
|
||||
return chunks
|
||||
}
|
||||
|
||||
const textBlocks: string[] = []
|
||||
|
||||
for (const block of content) {
|
||||
switch (block.type) {
|
||||
case 'text':
|
||||
if (!isStreamingActive) {
|
||||
textBlocks.push(block.text)
|
||||
}
|
||||
default:
|
||||
logger.warn('Unknown content block type in user/assistant message:', {
|
||||
type: block.type
|
||||
})
|
||||
chunks.push({
|
||||
type: 'raw',
|
||||
rawValue: block
|
||||
})
|
||||
break
|
||||
break
|
||||
case 'tool_use':
|
||||
handleAssistantToolUse(block as ToolUseContent, providerMetadata, state, chunks)
|
||||
break
|
||||
default:
|
||||
logger.warn('Unhandled assistant content block', { type: (block as any).type })
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if (!isStreamingActive && textBlocks.length > 0) {
|
||||
const id = message.uuid?.toString() || generateMessageId()
|
||||
state.beginStep()
|
||||
chunks.push({
|
||||
type: 'start-step',
|
||||
request: { body: '' },
|
||||
warnings: []
|
||||
})
|
||||
chunks.push({
|
||||
type: 'text-start',
|
||||
id,
|
||||
providerMetadata
|
||||
})
|
||||
chunks.push({
|
||||
type: 'text-delta',
|
||||
id,
|
||||
text: textBlocks.join(''),
|
||||
providerMetadata
|
||||
})
|
||||
chunks.push({
|
||||
type: 'text-end',
|
||||
id,
|
||||
providerMetadata
|
||||
})
|
||||
return finalizeNonStreamingStep(message, state, chunks)
|
||||
}
|
||||
|
||||
return chunks
|
||||
}
|
||||
|
||||
/**
|
||||
* Registers tool invocations with the stream state so that later tool results
|
||||
* can be matched with the originating call.
|
||||
*/
|
||||
function handleAssistantToolUse(
|
||||
block: ToolUseContent,
|
||||
providerMetadata: ProviderMetadata,
|
||||
state: ClaudeStreamState,
|
||||
chunks: AgentStreamPart[]
|
||||
): void {
|
||||
chunks.push({
|
||||
type: 'tool-call',
|
||||
toolCallId: block.id,
|
||||
toolName: block.name,
|
||||
input: block.input,
|
||||
providerExecuted: true,
|
||||
providerMetadata
|
||||
})
|
||||
state.completeToolBlock(block.id, block.input, providerMetadata)
|
||||
}
|
||||
|
||||
/**
|
||||
* Emits the terminating `finish-step` frame for non-streamed responses and
|
||||
* clears the currently active step in the state tracker.
|
||||
*/
|
||||
function finalizeNonStreamingStep(
|
||||
message: Extract<SDKMessage, { type: 'assistant' }>,
|
||||
state: ClaudeStreamState,
|
||||
chunks: AgentStreamPart[]
|
||||
): AgentStreamPart[] {
|
||||
const usage = calculateUsageFromMessage(message)
|
||||
const finishReason = inferFinishReason(message.message.stop_reason)
|
||||
chunks.push({
|
||||
type: 'finish-step',
|
||||
response: {
|
||||
id: message.uuid,
|
||||
timestamp: new Date(),
|
||||
modelId: message.message.model ?? ''
|
||||
},
|
||||
usage: usage ?? emptyUsage,
|
||||
finishReason,
|
||||
providerMetadata: sdkMessageToProviderMetadata(message)
|
||||
})
|
||||
state.resetStep()
|
||||
return chunks
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts user-originated websocket frames (text, tool results, etc.) into
|
||||
* the AiSDK format. Tool results are matched back to pending tool calls via the
|
||||
* shared `ClaudeStreamState` instance.
|
||||
*/
|
||||
function handleUserMessage(
|
||||
message: Extract<SDKMessage, { type: 'user' }>,
|
||||
state: ClaudeStreamState
|
||||
): AgentStreamPart[] {
|
||||
const chunks: AgentStreamPart[] = []
|
||||
const providerMetadata = sdkMessageToProviderMetadata(message)
|
||||
const content = message.message.content
|
||||
|
||||
if (typeof content === 'string') {
|
||||
if (!content) {
|
||||
return chunks
|
||||
}
|
||||
|
||||
const id = message.uuid?.toString() || generateMessageId()
|
||||
chunks.push({
|
||||
type: 'text-start',
|
||||
id,
|
||||
providerMetadata
|
||||
})
|
||||
chunks.push({
|
||||
type: 'text-delta',
|
||||
id,
|
||||
text: content,
|
||||
providerMetadata
|
||||
})
|
||||
chunks.push({
|
||||
type: 'text-end',
|
||||
id,
|
||||
providerMetadata
|
||||
})
|
||||
return chunks
|
||||
}
|
||||
|
||||
if (!Array.isArray(content)) {
|
||||
return chunks
|
||||
}
|
||||
|
||||
for (const block of content) {
|
||||
if (block.type === 'tool_result') {
|
||||
const toolResult = block as ToolResultContent
|
||||
const pendingCall = state.consumePendingToolCall(toolResult.tool_use_id)
|
||||
if (toolResult.is_error) {
|
||||
chunks.push({
|
||||
type: 'tool-error',
|
||||
toolCallId: toolResult.tool_use_id,
|
||||
toolName: pendingCall?.toolName ?? 'unknown',
|
||||
input: pendingCall?.input,
|
||||
error: toolResult.content,
|
||||
providerExecuted: true
|
||||
} as AgentStreamPart)
|
||||
} else {
|
||||
chunks.push({
|
||||
type: 'tool-result',
|
||||
toolCallId: toolResult.tool_use_id,
|
||||
toolName: pendingCall?.toolName ?? 'unknown',
|
||||
input: pendingCall?.input,
|
||||
output: toolResult.content,
|
||||
providerExecuted: true
|
||||
})
|
||||
}
|
||||
} else if (block.type === 'text') {
|
||||
const id = message.uuid?.toString() || generateMessageId()
|
||||
chunks.push({
|
||||
type: 'text-start',
|
||||
id,
|
||||
providerMetadata
|
||||
})
|
||||
chunks.push({
|
||||
type: 'text-delta',
|
||||
id,
|
||||
text: (block as { text: string }).text,
|
||||
providerMetadata
|
||||
})
|
||||
chunks.push({
|
||||
type: 'text-end',
|
||||
id,
|
||||
providerMetadata
|
||||
})
|
||||
} else {
|
||||
logger.warn('Unhandled user content block', { type: (block as any).type })
|
||||
}
|
||||
}
|
||||
|
||||
return chunks
|
||||
}
|
||||
|
||||
// Handle stream events (real-time streaming)
|
||||
function handleStreamEvent(message: Extract<SDKMessage, { type: 'stream_event' }>): AgentStreamPart[] {
|
||||
/**
|
||||
* Handles the fine-grained real-time streaming protocol where Anthropic emits
|
||||
* discrete events for message lifecycle, content blocks, and usage deltas.
|
||||
*/
|
||||
function handleStreamEvent(
|
||||
message: Extract<SDKMessage, { type: 'stream_event' }>,
|
||||
state: ClaudeStreamState
|
||||
): AgentStreamPart[] {
|
||||
const chunks: AgentStreamPart[] = []
|
||||
const event = message.event
|
||||
const blockKey = `${message.uuid ?? message.session_id ?? 'session'}:${event.type}`
|
||||
logger.silly('Handling stream event:', { event })
|
||||
const providerMetadata = sdkMessageToProviderMetadata(message)
|
||||
const { event } = message
|
||||
|
||||
switch (event.type) {
|
||||
case 'message_start':
|
||||
// No specific UI chunk needed for message start in this protocol
|
||||
state.beginStep()
|
||||
chunks.push({
|
||||
type: 'start-step',
|
||||
request: { body: '' },
|
||||
warnings: []
|
||||
})
|
||||
break
|
||||
|
||||
case 'content_block_start':
|
||||
switch (event.content_block.type) {
|
||||
case 'text': {
|
||||
contentBlockState.set(blockKey, { type: 'text' })
|
||||
chunks.push({
|
||||
type: 'text-start',
|
||||
id: String(event.index),
|
||||
providerMetadata: {
|
||||
...sdkMessageToProviderMetadata(message),
|
||||
anthropic: {
|
||||
uuid: message.uuid,
|
||||
session_id: message.session_id,
|
||||
content_block_index: event.index
|
||||
}
|
||||
}
|
||||
})
|
||||
break
|
||||
}
|
||||
case 'tool_use': {
|
||||
contentBlockState.set(event.content_block.id, {
|
||||
type: 'tool-call',
|
||||
toolCallId: event.content_block.id,
|
||||
toolName: event.content_block.name,
|
||||
input: ''
|
||||
})
|
||||
chunks.push({
|
||||
type: 'tool-call',
|
||||
toolCallId: event.content_block.id,
|
||||
toolName: event.content_block.name,
|
||||
input: event.content_block.input,
|
||||
providerExecuted: true,
|
||||
providerMetadata: sdkMessageToProviderMetadata(message)
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
break
|
||||
case 'content_block_delta':
|
||||
switch (event.delta.type) {
|
||||
case 'text_delta': {
|
||||
chunks.push({
|
||||
type: 'text-delta',
|
||||
id: String(event.index),
|
||||
text: event.delta.text,
|
||||
providerMetadata: {
|
||||
...sdkMessageToProviderMetadata(message),
|
||||
anthropic: {
|
||||
uuid: message.uuid,
|
||||
session_id: message.session_id,
|
||||
content_block_index: event.index
|
||||
}
|
||||
}
|
||||
})
|
||||
break
|
||||
}
|
||||
// case 'thinking_delta': {
|
||||
// chunks.push({
|
||||
// type: 'reasoning-delta',
|
||||
// id: String(event.index),
|
||||
// text: event.delta.thinking,
|
||||
// });
|
||||
// break
|
||||
// }
|
||||
// case 'signature_delta': {
|
||||
// if (blockType === 'thinking') {
|
||||
// chunks.push({
|
||||
// type: 'reasoning-delta',
|
||||
// id: String(event.index),
|
||||
// text: '',
|
||||
// providerMetadata: {
|
||||
// ...sdkMessageToProviderMetadata(message),
|
||||
// anthropic: {
|
||||
// uuid: message.uuid,
|
||||
// session_id: message.session_id,
|
||||
// content_block_index: event.index,
|
||||
// signature: event.delta.signature
|
||||
// }
|
||||
// }
|
||||
// })
|
||||
// }
|
||||
// break
|
||||
// }
|
||||
case 'input_json_delta': {
|
||||
const contentBlock = contentBlockState.get(blockKey)
|
||||
if (contentBlock && contentBlock.type === 'tool-call') {
|
||||
contentBlockState.set(blockKey, {
|
||||
...contentBlock,
|
||||
input: `${contentBlock.input ?? ''}${event.delta.partial_json ?? ''}`
|
||||
})
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
handleContentBlockStart(event.index, event.content_block, providerMetadata, state, chunks)
|
||||
break
|
||||
|
||||
case 'content_block_stop':
|
||||
{
|
||||
const contentBlock = contentBlockState.get(blockKey)
|
||||
if (contentBlock?.type === 'text') {
|
||||
case 'content_block_delta':
|
||||
handleContentBlockDelta(event.index, event.delta, providerMetadata, state, chunks)
|
||||
break
|
||||
|
||||
case 'content_block_stop': {
|
||||
const block = state.closeBlock(event.index)
|
||||
if (!block) {
|
||||
logger.warn('Received content_block_stop for unknown index', { index: event.index })
|
||||
break
|
||||
}
|
||||
|
||||
switch (block.kind) {
|
||||
case 'text':
|
||||
chunks.push({
|
||||
type: 'text-end',
|
||||
id: String(event.index)
|
||||
id: block.id,
|
||||
providerMetadata
|
||||
})
|
||||
}
|
||||
contentBlockState.delete(blockKey)
|
||||
break
|
||||
case 'reasoning':
|
||||
chunks.push({
|
||||
type: 'reasoning-end',
|
||||
id: block.id,
|
||||
providerMetadata
|
||||
})
|
||||
break
|
||||
case 'tool':
|
||||
chunks.push({
|
||||
type: 'tool-input-end',
|
||||
id: block.toolCallId,
|
||||
providerMetadata
|
||||
})
|
||||
break
|
||||
default:
|
||||
break
|
||||
}
|
||||
break
|
||||
case 'message_delta':
|
||||
// Handle usage updates or other message-level deltas
|
||||
}
|
||||
|
||||
case 'message_delta': {
|
||||
const finishReason = event.delta.stop_reason
|
||||
? mapStopReason(event.delta.stop_reason as BetaStopReason)
|
||||
: undefined
|
||||
const usage = convertUsage(event.usage)
|
||||
state.setPendingUsage(usage, finishReason)
|
||||
break
|
||||
case 'message_stop':
|
||||
// This could signal the end of the message
|
||||
}
|
||||
|
||||
case 'message_stop': {
|
||||
const pending = state.getPendingUsage()
|
||||
chunks.push({
|
||||
type: 'finish-step',
|
||||
response: {
|
||||
id: message.uuid,
|
||||
timestamp: new Date(),
|
||||
modelId: ''
|
||||
},
|
||||
usage: pending.usage ?? emptyUsage,
|
||||
finishReason: pending.finishReason ?? 'stop',
|
||||
providerMetadata
|
||||
})
|
||||
state.resetStep()
|
||||
break
|
||||
}
|
||||
|
||||
default:
|
||||
logger.warn('Unknown stream event type:', { type: (event as any).type })
|
||||
logger.warn('Unknown stream event type', { type: (event as any).type })
|
||||
break
|
||||
}
|
||||
|
||||
return chunks
|
||||
}
|
||||
|
||||
// Handle system messages
|
||||
/**
|
||||
* Opens the appropriate block type when Claude starts streaming a new content
|
||||
* section so later deltas know which logical entity to append to.
|
||||
*/
|
||||
function handleContentBlockStart(
|
||||
index: number,
|
||||
contentBlock: any,
|
||||
providerMetadata: ProviderMetadata,
|
||||
state: ClaudeStreamState,
|
||||
chunks: AgentStreamPart[]
|
||||
): void {
|
||||
switch (contentBlock.type) {
|
||||
case 'text': {
|
||||
const block = state.openTextBlock(index, generateMessageId())
|
||||
chunks.push({
|
||||
type: 'text-start',
|
||||
id: block.id,
|
||||
providerMetadata
|
||||
})
|
||||
break
|
||||
}
|
||||
case 'thinking':
|
||||
case 'redacted_thinking': {
|
||||
const block = state.openReasoningBlock(index, generateMessageId(), contentBlock.type === 'redacted_thinking')
|
||||
chunks.push({
|
||||
type: 'reasoning-start',
|
||||
id: block.id,
|
||||
providerMetadata
|
||||
})
|
||||
break
|
||||
}
|
||||
case 'tool_use': {
|
||||
const block = state.openToolBlock(index, {
|
||||
toolCallId: contentBlock.id,
|
||||
toolName: contentBlock.name,
|
||||
providerMetadata
|
||||
})
|
||||
chunks.push({
|
||||
type: 'tool-input-start',
|
||||
id: block.toolCallId,
|
||||
toolName: block.toolName,
|
||||
providerMetadata
|
||||
})
|
||||
break
|
||||
}
|
||||
default:
|
||||
logger.warn('Unhandled content_block_start type', { type: contentBlock.type })
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Applies incremental deltas to the active block (text, thinking, tool input)
|
||||
* and emits the translated AiSDK chunk immediately.
|
||||
*/
|
||||
function handleContentBlockDelta(
|
||||
index: number,
|
||||
delta: any,
|
||||
providerMetadata: ProviderMetadata,
|
||||
state: ClaudeStreamState,
|
||||
chunks: AgentStreamPart[]
|
||||
): void {
|
||||
switch (delta.type) {
|
||||
case 'text_delta': {
|
||||
const block = state.appendTextDelta(index, delta.text)
|
||||
if (!block) {
|
||||
logger.warn('Received text_delta for unknown block', { index })
|
||||
return
|
||||
}
|
||||
chunks.push({
|
||||
type: 'text-delta',
|
||||
id: block.id,
|
||||
text: block.text,
|
||||
providerMetadata
|
||||
})
|
||||
break
|
||||
}
|
||||
case 'thinking_delta': {
|
||||
const block = state.appendReasoningDelta(index, delta.thinking)
|
||||
if (!block) {
|
||||
logger.warn('Received thinking_delta for unknown block', { index })
|
||||
return
|
||||
}
|
||||
chunks.push({
|
||||
type: 'reasoning-delta',
|
||||
id: block.id,
|
||||
text: delta.thinking,
|
||||
providerMetadata
|
||||
})
|
||||
break
|
||||
}
|
||||
case 'signature_delta': {
|
||||
const block = state.getBlock(index)
|
||||
if (block && block.kind === 'reasoning') {
|
||||
chunks.push({
|
||||
type: 'reasoning-delta',
|
||||
id: block.id,
|
||||
text: '',
|
||||
providerMetadata
|
||||
})
|
||||
}
|
||||
break
|
||||
}
|
||||
case 'input_json_delta': {
|
||||
const block = state.appendToolInputDelta(index, delta.partial_json)
|
||||
if (!block) {
|
||||
logger.warn('Received input_json_delta for unknown block', { index })
|
||||
return
|
||||
}
|
||||
chunks.push({
|
||||
type: 'tool-input-delta',
|
||||
id: block.toolCallId,
|
||||
delta: block.inputBuffer,
|
||||
providerMetadata
|
||||
})
|
||||
break
|
||||
}
|
||||
default:
|
||||
logger.warn('Unhandled content_block_delta type', { type: delta.type })
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* System messages currently only deliver the session bootstrap payload. We
|
||||
* forward it as both a `start` marker and a raw snapshot for diagnostics.
|
||||
*/
|
||||
function handleSystemMessage(message: Extract<SDKMessage, { type: 'system' }>): AgentStreamPart[] {
|
||||
const chunks: AgentStreamPart[] = []
|
||||
switch (message.subtype) {
|
||||
case 'init': {
|
||||
chunks.push({
|
||||
type: 'start'
|
||||
})
|
||||
const rawValue: ClaudeCodeRawValue = {
|
||||
if (message.subtype === 'init') {
|
||||
chunks.push({
|
||||
type: 'start'
|
||||
})
|
||||
chunks.push({
|
||||
type: 'raw',
|
||||
rawValue: {
|
||||
type: 'init',
|
||||
session_id: message.session_id,
|
||||
slash_commands: message.slash_commands,
|
||||
tools: message.tools,
|
||||
raw: message
|
||||
}
|
||||
chunks.push({
|
||||
type: 'raw',
|
||||
rawValue
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
return chunks
|
||||
}
|
||||
|
||||
// Handle result messages (completion with usage stats)
|
||||
/**
|
||||
* Terminal result messages arrive once the Claude Code session concludes.
|
||||
* Successful runs yield a `finish` frame with aggregated usage metrics, while
|
||||
* failures are surfaced as `error` frames.
|
||||
*/
|
||||
function handleResultMessage(message: Extract<SDKMessage, { type: 'result' }>): AgentStreamPart[] {
|
||||
const chunks: AgentStreamPart[] = []
|
||||
|
||||
let usage: LanguageModelV2Usage | undefined
|
||||
let usage: LanguageModelUsage | undefined
|
||||
if ('usage' in message) {
|
||||
usage = {
|
||||
inputTokens: message.usage.input_tokens ?? 0,
|
||||
@ -322,10 +611,11 @@ function handleResultMessage(message: Extract<SDKMessage, { type: 'result' }>):
|
||||
totalTokens: (message.usage.input_tokens ?? 0) + (message.usage.output_tokens ?? 0)
|
||||
}
|
||||
}
|
||||
|
||||
if (message.subtype === 'success') {
|
||||
chunks.push({
|
||||
type: 'finish',
|
||||
totalUsage: usage,
|
||||
totalUsage: usage ?? emptyUsage,
|
||||
finishReason: mapClaudeCodeFinishReason(message.subtype),
|
||||
providerMetadata: {
|
||||
...sdkMessageToProviderMetadata(message),
|
||||
@ -345,3 +635,60 @@ function handleResultMessage(message: Extract<SDKMessage, { type: 'result' }>):
|
||||
}
|
||||
return chunks
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalises usage payloads so the caller always receives numeric values even
|
||||
* when the provider omits certain fields.
|
||||
*/
|
||||
function convertUsage(
|
||||
usage?: {
|
||||
input_tokens?: number | null
|
||||
output_tokens?: number | null
|
||||
} | null
|
||||
): LanguageModelUsage | undefined {
|
||||
if (!usage) {
|
||||
return undefined
|
||||
}
|
||||
const inputTokens = usage.input_tokens ?? 0
|
||||
const outputTokens = usage.output_tokens ?? 0
|
||||
return {
|
||||
inputTokens,
|
||||
outputTokens,
|
||||
totalTokens: inputTokens + outputTokens
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Anthropic-only wrapper around {@link finishReasonMapping} that defaults to
|
||||
* `unknown` to avoid surprising downstream consumers when new stop reasons are
|
||||
* introduced.
|
||||
*/
|
||||
function mapStopReason(reason: BetaStopReason): FinishReason {
|
||||
return finishReasonMapping[reason] ?? 'unknown'
|
||||
}
|
||||
|
||||
/**
|
||||
* Extracts token accounting details from an assistant message, if available.
|
||||
*/
|
||||
function calculateUsageFromMessage(
|
||||
message: Extract<SDKMessage, { type: 'assistant' }>
|
||||
): LanguageModelUsage | undefined {
|
||||
const usage = message.message.usage
|
||||
if (!usage) return undefined
|
||||
return {
|
||||
inputTokens: usage.input_tokens ?? 0,
|
||||
outputTokens: usage.output_tokens ?? 0,
|
||||
totalTokens: (usage.input_tokens ?? 0) + (usage.output_tokens ?? 0)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Converts Anthropic stop reasons into AiSDK finish reasons, falling back to a
|
||||
* generic `stop` if the provider omits the detail entirely.
|
||||
*/
|
||||
function inferFinishReason(stopReason: BetaStopReason | null | undefined): FinishReason {
|
||||
if (!stopReason) return 'stop'
|
||||
return mapStopReason(stopReason)
|
||||
}
|
||||
|
||||
export { ClaudeStreamState }
|
||||
|
||||
@ -185,8 +185,7 @@ export class AiSdkToChunkAdapter {
|
||||
case 'reasoning-end':
|
||||
this.onChunk({
|
||||
type: ChunkType.THINKING_COMPLETE,
|
||||
text: (chunk.providerMetadata?.metadata?.thinking_content as string) || '',
|
||||
thinking_millsec: (chunk.providerMetadata?.metadata?.thinking_millsec as number) || 0
|
||||
text: (chunk.providerMetadata?.metadata?.thinking_content as string) || final.reasoningContent
|
||||
})
|
||||
final.reasoningContent = ''
|
||||
break
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import { loggerService } from '@logger'
|
||||
import { isNewApiProvider } from '@renderer/config/providers'
|
||||
import { Provider } from '@renderer/types'
|
||||
|
||||
import { AihubmixAPIClient } from './aihubmix/AihubmixAPIClient'
|
||||
@ -45,7 +46,7 @@ export class ApiClientFactory {
|
||||
return instance
|
||||
}
|
||||
|
||||
if (provider.id === 'new-api') {
|
||||
if (isNewApiProvider(provider)) {
|
||||
logger.debug(`Creating NewAPIClient for provider: ${provider.id}`)
|
||||
instance = new NewAPIClient(provider) as BaseApiClient
|
||||
return instance
|
||||
|
||||
@ -67,7 +67,9 @@ vi.mock('@renderer/config/models', () => ({
|
||||
silicon: [],
|
||||
defaultModel: []
|
||||
},
|
||||
isOpenAIModel: vi.fn(() => false)
|
||||
isOpenAIModel: vi.fn(() => false),
|
||||
glm45FlashModel: {},
|
||||
qwen38bModel: {}
|
||||
}))
|
||||
|
||||
describe('ApiClientFactory', () => {
|
||||
|
||||
@ -35,18 +35,8 @@ vi.mock('@renderer/config/models', () => ({
|
||||
findTokenLimit: vi.fn().mockReturnValue(4096),
|
||||
isFunctionCallingModel: vi.fn().mockReturnValue(false),
|
||||
DEFAULT_MAX_TOKENS: 4096,
|
||||
qwen38bModel: {
|
||||
id: 'Qwen/Qwen3-8B',
|
||||
name: 'Qwen3-8B',
|
||||
provider: 'cherryai',
|
||||
group: 'Qwen'
|
||||
},
|
||||
glm45FlashModel: {
|
||||
id: 'glm-4.5-flash',
|
||||
name: 'GLM-4.5-Flash',
|
||||
provider: 'cherryai',
|
||||
group: 'GLM-4.5'
|
||||
}
|
||||
qwen38bModel: {},
|
||||
glm45FlashModel: {}
|
||||
}))
|
||||
|
||||
vi.mock('@renderer/services/AssistantService', () => ({
|
||||
|
||||
@ -143,12 +143,14 @@ export function buildAiSdkMiddlewares(config: AiSdkMiddlewareConfig): LanguageMo
|
||||
const tagName = {
|
||||
reasoning: 'reasoning',
|
||||
think: 'think',
|
||||
thought: 'thought'
|
||||
thought: 'thought',
|
||||
seedThink: 'seed:think'
|
||||
}
|
||||
|
||||
function getReasoningTagName(modelId: string | undefined): string {
|
||||
if (modelId?.includes('gpt-oss')) return tagName.reasoning
|
||||
if (modelId?.includes('gemini')) return tagName.thought
|
||||
if (modelId?.includes('seed-oss-36b')) return tagName.seedThink
|
||||
return tagName.think
|
||||
}
|
||||
|
||||
|
||||
@ -7,18 +7,14 @@ export default definePlugin({
|
||||
transformStream: () => () => {
|
||||
// === 时间跟踪状态 ===
|
||||
let thinkingStartTime = 0
|
||||
let hasStartedThinking = false
|
||||
let accumulatedThinkingContent = ''
|
||||
let reasoningBlockId = ''
|
||||
|
||||
return new TransformStream<TextStreamPart<ToolSet>, TextStreamPart<ToolSet>>({
|
||||
transform(chunk: TextStreamPart<ToolSet>, controller: TransformStreamDefaultController<TextStreamPart<ToolSet>>) {
|
||||
// === 处理 reasoning 类型 ===
|
||||
if (chunk.type === 'reasoning-start') {
|
||||
controller.enqueue(chunk)
|
||||
hasStartedThinking = true
|
||||
thinkingStartTime = performance.now()
|
||||
reasoningBlockId = chunk.id
|
||||
} else if (chunk.type === 'reasoning-delta') {
|
||||
accumulatedThinkingContent += chunk.text
|
||||
controller.enqueue({
|
||||
@ -32,21 +28,6 @@ export default definePlugin({
|
||||
}
|
||||
}
|
||||
})
|
||||
} else if (chunk.type === 'reasoning-end' && hasStartedThinking) {
|
||||
controller.enqueue({
|
||||
type: 'reasoning-end',
|
||||
id: reasoningBlockId,
|
||||
providerMetadata: {
|
||||
metadata: {
|
||||
thinking_millsec: performance.now() - thinkingStartTime,
|
||||
thinking_content: accumulatedThinkingContent
|
||||
}
|
||||
}
|
||||
})
|
||||
accumulatedThinkingContent = ''
|
||||
hasStartedThinking = false
|
||||
thinkingStartTime = 0
|
||||
reasoningBlockId = ''
|
||||
} else {
|
||||
controller.enqueue(chunk)
|
||||
}
|
||||
|
||||
@ -6,6 +6,7 @@ import {
|
||||
type ProviderSettingsMap
|
||||
} from '@cherrystudio/ai-core/provider'
|
||||
import { isOpenAIChatCompletionOnlyModel } from '@renderer/config/models'
|
||||
import { isNewApiProvider } from '@renderer/config/providers'
|
||||
import {
|
||||
getAwsBedrockAccessKeyId,
|
||||
getAwsBedrockRegion,
|
||||
@ -65,7 +66,7 @@ function handleSpecialProviders(model: Model, provider: Provider): Provider {
|
||||
if (provider.id === 'aihubmix') {
|
||||
return aihubmixProviderCreator(model, provider)
|
||||
}
|
||||
if (provider.id === 'new-api') {
|
||||
if (isNewApiProvider(provider)) {
|
||||
return newApiResolverCreator(model, provider)
|
||||
}
|
||||
if (provider.id === 'vertexai') {
|
||||
|
||||
@ -52,7 +52,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
return {}
|
||||
}
|
||||
// Don't disable reasoning for models that require it
|
||||
if (isGrokReasoningModel(model) || isOpenAIReasoningModel(model)) {
|
||||
if (isGrokReasoningModel(model) || isOpenAIReasoningModel(model) || model.id.includes('seed-oss')) {
|
||||
return {}
|
||||
}
|
||||
return { reasoning: { enabled: false, exclude: true } }
|
||||
@ -112,6 +112,8 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
return {
|
||||
enable_thinking: true
|
||||
}
|
||||
case SystemProviderIds.hunyuan:
|
||||
case SystemProviderIds['tencent-cloud-ti']:
|
||||
case SystemProviderIds.doubao:
|
||||
return {
|
||||
thinking: {
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import { DEFAULT_MIN_APPS } from '@renderer/config/minapps'
|
||||
import { MinAppType } from '@renderer/types'
|
||||
import { FC } from 'react'
|
||||
import styled from 'styled-components'
|
||||
|
||||
interface Props {
|
||||
app: MinAppType
|
||||
@ -11,31 +10,52 @@ interface Props {
|
||||
}
|
||||
|
||||
const MinAppIcon: FC<Props> = ({ app, size = 48, style, sidebar = false }) => {
|
||||
// First try to find in DEFAULT_MIN_APPS for predefined styling
|
||||
const _app = DEFAULT_MIN_APPS.find((item) => item.id === app.id)
|
||||
|
||||
if (!_app) {
|
||||
return null
|
||||
// If found in DEFAULT_MIN_APPS, use predefined styling
|
||||
if (_app) {
|
||||
return (
|
||||
<img
|
||||
src={_app.logo}
|
||||
className="select-none rounded-2xl"
|
||||
style={{
|
||||
border: _app.bodered ? '0.5px solid var(--color-border)' : 'none',
|
||||
width: `${size}px`,
|
||||
height: `${size}px`,
|
||||
backgroundColor: _app.background,
|
||||
userSelect: 'none',
|
||||
...(sidebar ? {} : app.style),
|
||||
...style
|
||||
}}
|
||||
draggable={false}
|
||||
alt={app.name || 'MinApp Icon'}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
return (
|
||||
<Container
|
||||
src={_app.logo}
|
||||
style={{
|
||||
border: _app.bodered ? '0.5px solid var(--color-border)' : 'none',
|
||||
width: `${size}px`,
|
||||
height: `${size}px`,
|
||||
backgroundColor: _app.background,
|
||||
...(sidebar ? {} : app.style),
|
||||
...style
|
||||
}}
|
||||
/>
|
||||
)
|
||||
// If not found in DEFAULT_MIN_APPS but app has logo, use it (for temporary apps)
|
||||
if (app.logo) {
|
||||
return (
|
||||
<img
|
||||
src={app.logo}
|
||||
className="select-none rounded-2xl"
|
||||
style={{
|
||||
border: 'none',
|
||||
width: `${size}px`,
|
||||
height: `${size}px`,
|
||||
backgroundColor: 'transparent',
|
||||
userSelect: 'none',
|
||||
...(sidebar ? {} : app.style),
|
||||
...style
|
||||
}}
|
||||
draggable={false}
|
||||
alt={app.name || 'MinApp Icon'}
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
const Container = styled.img`
|
||||
border-radius: 16px;
|
||||
user-select: none;
|
||||
-webkit-user-drag: none;
|
||||
`
|
||||
|
||||
export default MinAppIcon
|
||||
|
||||
@ -1,15 +1,11 @@
|
||||
// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
|
||||
|
||||
exports[`MinAppIcon > should render correctly with various props 1`] = `
|
||||
.c0 {
|
||||
border-radius: 16px;
|
||||
user-select: none;
|
||||
-webkit-user-drag: none;
|
||||
}
|
||||
|
||||
<img
|
||||
class="c0"
|
||||
alt="Test App"
|
||||
class="select-none rounded-2xl"
|
||||
draggable="false"
|
||||
src="/test-logo-1.png"
|
||||
style="border: 0.5px solid var(--color-border); width: 64px; height: 64px; background-color: rgb(240, 240, 240); opacity: 0.8; transform: scale(1.1); margin-top: 10px;"
|
||||
style="border: 0.5px solid var(--color-border); width: 64px; height: 64px; background-color: rgb(240, 240, 240); user-select: none; opacity: 0.8; transform: scale(1.1); margin-top: 10px;"
|
||||
/>
|
||||
`;
|
||||
|
||||
@ -180,7 +180,7 @@ const PopupContainer: React.FC<Props> = ({ model, filter: baseFilter, showTagFil
|
||||
key: `provider-${p.id}`,
|
||||
type: 'group',
|
||||
name: getFancyProviderName(p),
|
||||
actions: (
|
||||
actions: p.id !== 'cherryai' && (
|
||||
<Tooltip title={t('navigate.provider_settings')} mouseEnterDelay={0.5} mouseLeaveDelay={0}>
|
||||
<Settings2
|
||||
size={12}
|
||||
|
||||
@ -18,7 +18,8 @@ export function renderSvgInShadowHost(svgContent: string, hostElement: HTMLEleme
|
||||
// Sanitize the SVG content
|
||||
const sanitizedContent = DOMPurify.sanitize(svgContent, {
|
||||
ADD_TAGS: ['animate', 'foreignObject', 'use'],
|
||||
ADD_ATTR: ['from', 'to']
|
||||
ADD_ATTR: ['from', 'to'],
|
||||
HTML_INTEGRATION_POINTS: { foreignobject: true }
|
||||
})
|
||||
|
||||
const shadowRoot = hostElement.shadowRoot || hostElement.attachShadow({ mode: 'open' })
|
||||
@ -36,6 +37,7 @@ export function renderSvgInShadowHost(svgContent: string, hostElement: HTMLEleme
|
||||
border-radius: var(--shadow-host-border-radius);
|
||||
padding: 1em;
|
||||
overflow: hidden; /* Prevent scrollbars, as scaling is now handled */
|
||||
white-space: normal;
|
||||
display: block;
|
||||
position: relative;
|
||||
width: 100%;
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
import { PlusOutlined } from '@ant-design/icons'
|
||||
import { loggerService } from '@logger'
|
||||
import { Sortable, useDndReorder } from '@renderer/components/dnd'
|
||||
import HorizontalScrollContainer from '@renderer/components/HorizontalScrollContainer'
|
||||
import { isMac } from '@renderer/config/constant'
|
||||
@ -12,9 +13,10 @@ import tabsService from '@renderer/services/TabsService'
|
||||
import { useAppDispatch, useAppSelector } from '@renderer/store'
|
||||
import type { Tab } from '@renderer/store/tabs'
|
||||
import { addTab, removeTab, setActiveTab, setTabs } from '@renderer/store/tabs'
|
||||
import { ThemeMode } from '@renderer/types'
|
||||
import { MinAppType, ThemeMode } from '@renderer/types'
|
||||
import { classNames } from '@renderer/utils'
|
||||
import { Tooltip } from 'antd'
|
||||
import { LRUCache } from 'lru-cache'
|
||||
import {
|
||||
FileSearch,
|
||||
Folder,
|
||||
@ -45,14 +47,40 @@ interface TabsContainerProps {
|
||||
children: React.ReactNode
|
||||
}
|
||||
|
||||
const getTabIcon = (tabId: string, minapps: any[]): React.ReactNode | undefined => {
|
||||
const logger = loggerService.withContext('TabContainer')
|
||||
|
||||
const getTabIcon = (
|
||||
tabId: string,
|
||||
minapps: MinAppType[],
|
||||
minAppsCache?: LRUCache<string, MinAppType>
|
||||
): React.ReactNode | undefined => {
|
||||
// Check if it's a minapp tab (format: apps:appId)
|
||||
if (tabId.startsWith('apps:')) {
|
||||
const appId = tabId.replace('apps:', '')
|
||||
const app = [...DEFAULT_MIN_APPS, ...minapps].find((app) => app.id === appId)
|
||||
let app = [...DEFAULT_MIN_APPS, ...minapps].find((app) => app.id === appId)
|
||||
|
||||
// If not found in permanent apps, search in temporary apps cache
|
||||
// The cache stores apps opened via openSmartMinapp() for top navbar mode
|
||||
// These are temporary MinApps that were opened but not yet saved to user's config
|
||||
// The cache is LRU (Least Recently Used) with max size from settings
|
||||
// Cache validity: Apps in cache are currently active/recently used, not outdated
|
||||
if (!app && minAppsCache) {
|
||||
app = minAppsCache.get(appId)
|
||||
|
||||
// Defensive programming: If app not found in cache but tab exists,
|
||||
// the cache entry may have been evicted due to LRU policy
|
||||
// Log warning for debugging potential sync issues
|
||||
if (!app) {
|
||||
logger.warn(`MinApp ${appId} not found in cache, using fallback icon`)
|
||||
}
|
||||
}
|
||||
|
||||
if (app) {
|
||||
return <MinAppIcon size={14} app={app} />
|
||||
}
|
||||
|
||||
// Fallback: If no app found (cache evicted), show default icon
|
||||
return <LayoutGrid size={14} />
|
||||
}
|
||||
|
||||
switch (tabId) {
|
||||
@ -94,7 +122,7 @@ const TabsContainer: React.FC<TabsContainerProps> = ({ children }) => {
|
||||
const activeTabId = useAppSelector((state) => state.tabs.activeTabId)
|
||||
const isFullscreen = useFullscreen()
|
||||
const { settedTheme, toggleTheme } = useTheme()
|
||||
const { hideMinappPopup } = useMinappPopup()
|
||||
const { hideMinappPopup, minAppsCache } = useMinappPopup()
|
||||
const { minapps } = useMinapps()
|
||||
const { t } = useTranslation()
|
||||
|
||||
@ -112,8 +140,23 @@ const TabsContainer: React.FC<TabsContainerProps> = ({ children }) => {
|
||||
// Check if it's a minapp tab
|
||||
if (tabId.startsWith('apps:')) {
|
||||
const appId = tabId.replace('apps:', '')
|
||||
const app = [...DEFAULT_MIN_APPS, ...minapps].find((app) => app.id === appId)
|
||||
return app ? app.name : 'MinApp'
|
||||
let app = [...DEFAULT_MIN_APPS, ...minapps].find((app) => app.id === appId)
|
||||
|
||||
// If not found in permanent apps, search in temporary apps cache
|
||||
// This ensures temporary MinApps display proper titles while being used
|
||||
// The LRU cache automatically manages app lifecycle and prevents memory leaks
|
||||
if (!app && minAppsCache) {
|
||||
app = minAppsCache.get(appId)
|
||||
|
||||
// Defensive programming: If app not found in cache but tab exists,
|
||||
// the cache entry may have been evicted due to LRU policy
|
||||
if (!app) {
|
||||
logger.warn(`MinApp ${appId} not found in cache, using fallback title`)
|
||||
}
|
||||
}
|
||||
|
||||
// Return app name if found, otherwise use fallback with appId
|
||||
return app ? app.name : `MinApp-${appId}`
|
||||
}
|
||||
return getTitleLabel(tabId)
|
||||
}
|
||||
@ -196,7 +239,7 @@ const TabsContainer: React.FC<TabsContainerProps> = ({ children }) => {
|
||||
renderItem={(tab) => (
|
||||
<Tab key={tab.id} active={tab.id === activeTabId} onClick={() => handleTabClick(tab)}>
|
||||
<TabHeader>
|
||||
{tab.id && <TabIcon>{getTabIcon(tab.id, minapps)}</TabIcon>}
|
||||
{tab.id && <TabIcon>{getTabIcon(tab.id, minapps, minAppsCache)}</TabIcon>}
|
||||
<TabTitle>{getTabTitle(tab.id)}</TabTitle>
|
||||
</TabHeader>
|
||||
{tab.id !== 'home' && (
|
||||
@ -259,7 +302,7 @@ const TabsBar = styled.div<{ $isFullscreen: boolean }>`
|
||||
flex-direction: row;
|
||||
align-items: center;
|
||||
gap: 5px;
|
||||
padding-left: ${({ $isFullscreen }) => (!$isFullscreen && isMac ? 'env(titlebar-area-x)' : '15px')};
|
||||
padding-left: ${({ $isFullscreen }) => (!$isFullscreen && isMac ? 'calc(env(titlebar-area-x) + 4px)' : '15px')};
|
||||
padding-right: ${({ $isFullscreen }) => ($isFullscreen ? '12px' : '0')};
|
||||
height: var(--navbar-height);
|
||||
min-height: ${({ $isFullscreen }) => (!$isFullscreen && isMac ? 'env(titlebar-area-height)' : '')};
|
||||
|
||||
@ -88,6 +88,7 @@ const NavbarCenterContainer = styled.div`
|
||||
display: flex;
|
||||
align-items: center;
|
||||
padding: 0 ${isMac ? '20px' : 0};
|
||||
padding-left: 10px;
|
||||
font-weight: bold;
|
||||
color: var(--color-text-1);
|
||||
position: relative;
|
||||
@ -108,7 +109,8 @@ const NavbarMainContainer = styled.div<{ $isFullscreen: boolean }>`
|
||||
flex-direction: row;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
padding: 0 ${isMac ? '20px' : 0};
|
||||
padding-right: ${isMac ? '20px' : 0};
|
||||
padding-left: 10px;
|
||||
font-weight: bold;
|
||||
color: var(--color-text-1);
|
||||
padding-right: ${({ $isFullscreen }) => ($isFullscreen ? '12px' : isWin ? '140px' : isLinux ? '120px' : '12px')};
|
||||
|
||||
@ -17,6 +17,7 @@ interface ItemRendererProps<T> {
|
||||
transform?: Transform | null
|
||||
transition?: string | null
|
||||
listeners?: DraggableSyntheticListeners
|
||||
itemStyle?: React.CSSProperties
|
||||
}
|
||||
|
||||
export function ItemRenderer<T>({
|
||||
@ -30,6 +31,7 @@ export function ItemRenderer<T>({
|
||||
transform,
|
||||
transition,
|
||||
listeners,
|
||||
itemStyle,
|
||||
...props
|
||||
}: ItemRendererProps<T>) {
|
||||
useEffect(() => {
|
||||
@ -44,7 +46,7 @@ export function ItemRenderer<T>({
|
||||
}
|
||||
}, [dragOverlay])
|
||||
|
||||
const wrapperStyle = {
|
||||
const style = {
|
||||
transition,
|
||||
transform: CSS.Transform.toString(transform ?? null)
|
||||
} as React.CSSProperties
|
||||
@ -54,7 +56,7 @@ export function ItemRenderer<T>({
|
||||
ref={ref}
|
||||
data-index={index}
|
||||
className={classNames({ dragOverlay: dragOverlay })}
|
||||
style={{ ...wrapperStyle }}>
|
||||
style={{ ...style, ...itemStyle }}>
|
||||
<DraggableItem
|
||||
className={classNames({ dragging: dragging, dragOverlay: dragOverlay, ghost: ghost })}
|
||||
{...listeners}
|
||||
|
||||
@ -61,6 +61,8 @@ interface SortableProps<T> {
|
||||
className?: string
|
||||
/** Item list style */
|
||||
listStyle?: React.CSSProperties
|
||||
/** Item style */
|
||||
itemStyle?: React.CSSProperties
|
||||
/** Item gap */
|
||||
gap?: number | string
|
||||
/** Restrictions, shortcuts for some modifiers */
|
||||
@ -87,6 +89,7 @@ function Sortable<T>({
|
||||
showGhost = false,
|
||||
className,
|
||||
listStyle,
|
||||
itemStyle,
|
||||
gap,
|
||||
restrictions,
|
||||
modifiers: customModifiers
|
||||
@ -195,19 +198,19 @@ function Sortable<T>({
|
||||
renderItem={renderItem}
|
||||
useDragOverlay={useDragOverlay}
|
||||
showGhost={showGhost}
|
||||
itemStyle={itemStyle}
|
||||
/>
|
||||
))}
|
||||
</ListWrapper>
|
||||
</SortableContext>
|
||||
|
||||
{useDragOverlay
|
||||
? createPortal(
|
||||
<DragOverlay adjustScale dropAnimation={dropAnimation}>
|
||||
{activeItem ? <ItemRenderer item={activeItem} renderItem={renderItem} dragOverlay /> : null}
|
||||
</DragOverlay>,
|
||||
document.body
|
||||
)
|
||||
: null}
|
||||
{useDragOverlay &&
|
||||
createPortal(
|
||||
<DragOverlay adjustScale dropAnimation={dropAnimation}>
|
||||
{activeItem && <ItemRenderer item={activeItem} renderItem={renderItem} itemStyle={itemStyle} dragOverlay />}
|
||||
</DragOverlay>,
|
||||
document.body
|
||||
)}
|
||||
</DndContext>
|
||||
)
|
||||
}
|
||||
|
||||
@ -10,6 +10,7 @@ interface SortableItemProps<T> {
|
||||
renderItem: RenderItemType<T>
|
||||
useDragOverlay?: boolean
|
||||
showGhost?: boolean
|
||||
itemStyle?: React.CSSProperties
|
||||
}
|
||||
|
||||
export function SortableItem<T>({
|
||||
@ -18,7 +19,8 @@ export function SortableItem<T>({
|
||||
index,
|
||||
renderItem,
|
||||
useDragOverlay = true,
|
||||
showGhost = true
|
||||
showGhost = true,
|
||||
itemStyle
|
||||
}: SortableItemProps<T>) {
|
||||
const { attributes, listeners, setNodeRef, transform, transition, isDragging } = useSortable({
|
||||
id
|
||||
@ -36,6 +38,7 @@ export function SortableItem<T>({
|
||||
transform={transform}
|
||||
transition={transition}
|
||||
listeners={listeners}
|
||||
itemStyle={itemStyle}
|
||||
{...attributes}
|
||||
/>
|
||||
)
|
||||
|
||||
@ -93,7 +93,17 @@ export function isSupportedThinkingTokenModel(model?: Model): boolean {
|
||||
// Specifically for DeepSeek V3.1. White list for now
|
||||
if (isDeepSeekHybridInferenceModel(model)) {
|
||||
return (
|
||||
['openrouter', 'dashscope', 'modelscope', 'doubao', 'silicon', 'nvidia', 'ppio'] satisfies SystemProviderId[]
|
||||
[
|
||||
'openrouter',
|
||||
'dashscope',
|
||||
'modelscope',
|
||||
'doubao',
|
||||
'silicon',
|
||||
'nvidia',
|
||||
'ppio',
|
||||
'hunyuan',
|
||||
'tencent-cloud-ti'
|
||||
] satisfies SystemProviderId[]
|
||||
).some((id) => id === model.provider)
|
||||
}
|
||||
|
||||
@ -381,7 +391,8 @@ export function isReasoningModel(model?: Model): boolean {
|
||||
isDeepSeekHybridInferenceModel(model) ||
|
||||
modelId.includes('magistral') ||
|
||||
modelId.includes('minimax-m1') ||
|
||||
modelId.includes('pangu-pro-moe')
|
||||
modelId.includes('pangu-pro-moe') ||
|
||||
modelId.includes('seed-oss')
|
||||
) {
|
||||
return true
|
||||
}
|
||||
|
||||
@ -138,16 +138,6 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
},
|
||||
ppio: {
|
||||
id: 'ppio',
|
||||
name: 'PPIO',
|
||||
type: 'openai',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.ppinfra.com/v3/openai/',
|
||||
models: SYSTEM_MODELS.ppio,
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
},
|
||||
alayanew: {
|
||||
id: 'alayanew',
|
||||
name: 'AlayaNew',
|
||||
@ -158,16 +148,6 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
},
|
||||
qiniu: {
|
||||
id: 'qiniu',
|
||||
name: 'Qiniu',
|
||||
type: 'openai',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.qnaigc.com',
|
||||
models: SYSTEM_MODELS.qiniu,
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
},
|
||||
dmxapi: {
|
||||
id: 'dmxapi',
|
||||
name: 'DMXAPI',
|
||||
@ -178,6 +158,16 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
},
|
||||
aionly: {
|
||||
id: 'aionly',
|
||||
name: 'AIOnly',
|
||||
type: 'openai',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.aiionly.com',
|
||||
models: SYSTEM_MODELS.aionly,
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
},
|
||||
burncloud: {
|
||||
id: 'burncloud',
|
||||
name: 'BurnCloud',
|
||||
@ -238,6 +228,26 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
},
|
||||
ppio: {
|
||||
id: 'ppio',
|
||||
name: 'PPIO',
|
||||
type: 'openai',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.ppinfra.com/v3/openai/',
|
||||
models: SYSTEM_MODELS.ppio,
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
},
|
||||
qiniu: {
|
||||
id: 'qiniu',
|
||||
name: 'Qiniu',
|
||||
type: 'openai',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.qnaigc.com',
|
||||
models: SYSTEM_MODELS.qiniu,
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
},
|
||||
openrouter: {
|
||||
id: 'openrouter',
|
||||
name: 'OpenRouter',
|
||||
@ -612,16 +622,6 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
|
||||
models: SYSTEM_MODELS['poe'],
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
},
|
||||
aionly: {
|
||||
id: 'aionly',
|
||||
name: 'AIOnly',
|
||||
type: 'openai',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.aiionly.com',
|
||||
models: SYSTEM_MODELS.aionly,
|
||||
isSystem: true,
|
||||
enabled: false
|
||||
}
|
||||
} as const
|
||||
|
||||
@ -1375,3 +1375,7 @@ const SUPPORT_GEMINI_NATIVE_WEB_SEARCH_PROVIDERS = ['gemini', 'vertexai'] as con
|
||||
export const isGeminiWebSearchProvider = (provider: Provider) => {
|
||||
return SUPPORT_GEMINI_NATIVE_WEB_SEARCH_PROVIDERS.some((id) => id === provider.id)
|
||||
}
|
||||
|
||||
export const isNewApiProvider = (provider: Provider) => {
|
||||
return ['new-api', 'cherryin'].includes(provider.id)
|
||||
}
|
||||
|
||||
@ -172,7 +172,10 @@ export function useAssistant(id: string) {
|
||||
(model: Model) => assistant && dispatch(setModel({ assistantId: assistant?.id, model })),
|
||||
[assistant, dispatch]
|
||||
),
|
||||
updateAssistant: useCallback((assistant: Partial<Assistant>) => dispatch(updateAssistant(assistant)), [dispatch]),
|
||||
updateAssistant: useCallback(
|
||||
(update: Partial<Omit<Assistant, 'id'>>) => dispatch(updateAssistant({ id, ...update })),
|
||||
[dispatch, id]
|
||||
),
|
||||
updateAssistantSettings
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import { DEFAULT_MIN_APPS } from '@renderer/config/minapps'
|
||||
import { useRuntime } from '@renderer/hooks/useRuntime'
|
||||
import { useSettings } from '@renderer/hooks/useSettings' // 使用设置中的值
|
||||
import NavigationService from '@renderer/services/NavigationService'
|
||||
import TabsService from '@renderer/services/TabsService'
|
||||
import { useAppDispatch } from '@renderer/store'
|
||||
import {
|
||||
@ -14,6 +15,8 @@ import { clearWebviewState } from '@renderer/utils/webviewStateManager'
|
||||
import { LRUCache } from 'lru-cache'
|
||||
import { useCallback } from 'react'
|
||||
|
||||
import { useNavbarPosition } from './useSettings'
|
||||
|
||||
let minAppsCache: LRUCache<string, MinAppType>
|
||||
|
||||
/**
|
||||
@ -34,6 +37,7 @@ export const useMinappPopup = () => {
|
||||
const dispatch = useAppDispatch()
|
||||
const { openedKeepAliveMinapps, openedOneOffMinapp, minappShow } = useRuntime()
|
||||
const { maxKeepAliveMinapps } = useSettings() // 使用设置中的值
|
||||
const { isTopNavbar } = useNavbarPosition()
|
||||
|
||||
const createLRUCache = useCallback(() => {
|
||||
return new LRUCache<string, MinAppType>({
|
||||
@ -165,6 +169,33 @@ export const useMinappPopup = () => {
|
||||
dispatch(setMinappShow(false))
|
||||
}, [dispatch, minappShow, openedOneOffMinapp])
|
||||
|
||||
/** Smart open minapp that adapts to navbar position */
|
||||
const openSmartMinapp = useCallback(
|
||||
(config: MinAppType, keepAlive: boolean = false) => {
|
||||
if (isTopNavbar) {
|
||||
// For top navbar mode, need to add to cache first for temporary apps
|
||||
const cacheApp = minAppsCache.get(config.id)
|
||||
if (!cacheApp) {
|
||||
// Add temporary app to cache so MinAppPage can find it
|
||||
minAppsCache.set(config.id, config)
|
||||
}
|
||||
|
||||
// Set current minapp and show state
|
||||
dispatch(setCurrentMinappId(config.id))
|
||||
dispatch(setMinappShow(true))
|
||||
|
||||
// Then navigate to the app tab using NavigationService
|
||||
if (NavigationService.navigate) {
|
||||
NavigationService.navigate(`/apps/${config.id}`)
|
||||
}
|
||||
} else {
|
||||
// For side navbar, use the traditional popup system
|
||||
openMinapp(config, keepAlive)
|
||||
}
|
||||
},
|
||||
[isTopNavbar, openMinapp, dispatch]
|
||||
)
|
||||
|
||||
return {
|
||||
openMinapp,
|
||||
openMinappKeepAlive,
|
||||
@ -172,6 +203,7 @@ export const useMinappPopup = () => {
|
||||
closeMinapp,
|
||||
hideMinappPopup,
|
||||
closeAllMinapps,
|
||||
openSmartMinapp,
|
||||
// Expose cache instance for TabsService integration
|
||||
minAppsCache
|
||||
}
|
||||
|
||||
@ -13,7 +13,7 @@ import { loggerService } from '@renderer/services/LoggerService'
|
||||
import { getModelUniqId } from '@renderer/services/ModelService'
|
||||
import { useAppDispatch, useAppSelector } from '@renderer/store'
|
||||
import { setIsBunInstalled } from '@renderer/store/mcp'
|
||||
import { Model } from '@renderer/types'
|
||||
import { EndpointType, Model } from '@renderer/types'
|
||||
import { getClaudeSupportedProviders } from '@renderer/utils/provider'
|
||||
import { codeTools, terminalApps, TerminalConfig } from '@shared/config/constant'
|
||||
import { Alert, Avatar, Button, Checkbox, Input, Popover, Select, Space, Tooltip } from 'antd'
|
||||
@ -70,18 +70,43 @@ const CodeToolsPage: FC = () => {
|
||||
if (isEmbeddingModel(m) || isRerankModel(m) || isTextToImageModel(m)) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (m.provider === 'cherryai') {
|
||||
return false
|
||||
}
|
||||
|
||||
if (selectedCliTool === codeTools.claudeCode) {
|
||||
if (m.supported_endpoint_types) {
|
||||
return m.supported_endpoint_types.includes('anthropic')
|
||||
}
|
||||
return m.id.includes('claude') || CLAUDE_OFFICIAL_SUPPORTED_PROVIDERS.includes(m.provider)
|
||||
}
|
||||
|
||||
if (selectedCliTool === codeTools.geminiCli) {
|
||||
if (m.supported_endpoint_types) {
|
||||
return m.supported_endpoint_types.includes('gemini')
|
||||
}
|
||||
return m.id.includes('gemini')
|
||||
}
|
||||
|
||||
if (selectedCliTool === codeTools.openaiCodex) {
|
||||
if (m.supported_endpoint_types) {
|
||||
return ['openai', 'openai-response'].some((type) =>
|
||||
m.supported_endpoint_types?.includes(type as EndpointType)
|
||||
)
|
||||
}
|
||||
return m.id.includes('openai') || OPENAI_CODEX_SUPPORTED_PROVIDERS.includes(m.provider)
|
||||
}
|
||||
|
||||
if (selectedCliTool === codeTools.qwenCode || selectedCliTool === codeTools.iFlowCli) {
|
||||
if (m.supported_endpoint_types) {
|
||||
return ['openai', 'openai-response'].some((type) =>
|
||||
m.supported_endpoint_types?.includes(type as EndpointType)
|
||||
)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
},
|
||||
[selectedCliTool]
|
||||
|
||||
@ -23,10 +23,16 @@ export const CLI_TOOLS = [
|
||||
{ value: codeTools.iFlowCli, label: 'iFlow CLI' }
|
||||
]
|
||||
|
||||
export const GEMINI_SUPPORTED_PROVIDERS = ['aihubmix', 'dmxapi', 'new-api']
|
||||
export const GEMINI_SUPPORTED_PROVIDERS = ['aihubmix', 'dmxapi', 'new-api', 'cherryin']
|
||||
export const CLAUDE_OFFICIAL_SUPPORTED_PROVIDERS = ['deepseek', 'moonshot', 'zhipu', 'dashscope', 'modelscope']
|
||||
export const CLAUDE_SUPPORTED_PROVIDERS = ['aihubmix', 'dmxapi', 'new-api', ...CLAUDE_OFFICIAL_SUPPORTED_PROVIDERS]
|
||||
export const OPENAI_CODEX_SUPPORTED_PROVIDERS = ['openai', 'openrouter', 'aihubmix', 'new-api']
|
||||
export const CLAUDE_SUPPORTED_PROVIDERS = [
|
||||
'aihubmix',
|
||||
'dmxapi',
|
||||
'new-api',
|
||||
'cherryin',
|
||||
...CLAUDE_OFFICIAL_SUPPORTED_PROVIDERS
|
||||
]
|
||||
export const OPENAI_CODEX_SUPPORTED_PROVIDERS = ['openai', 'openrouter', 'aihubmix', 'new-api', 'cherryin']
|
||||
|
||||
// Provider 过滤映射
|
||||
export const CLI_TOOL_PROVIDER_MAP: Record<string, (providers: Provider[]) => Provider[]> = {
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import { LoadingIcon } from '@renderer/components/Icons'
|
||||
import { Spinner } from '@heroui/react'
|
||||
import { MessageBlockStatus, MessageBlockType, type PlaceholderMessageBlock } from '@renderer/types/newMessage'
|
||||
import React from 'react'
|
||||
import styled from 'styled-components'
|
||||
@ -10,7 +10,7 @@ const PlaceholderBlock: React.FC<PlaceholderBlockProps> = ({ block }) => {
|
||||
if (block.status === MessageBlockStatus.PROCESSING && block.type === MessageBlockType.UNKNOWN) {
|
||||
return (
|
||||
<MessageContentLoading>
|
||||
<LoadingIcon />
|
||||
<Spinner color="current" variant="dots" />
|
||||
</MessageContentLoading>
|
||||
)
|
||||
}
|
||||
|
||||
@ -3,7 +3,7 @@ import type { RootState } from '@renderer/store'
|
||||
import { messageBlocksSelectors } from '@renderer/store/messageBlock'
|
||||
import type { ImageMessageBlock, Message, MessageBlock } from '@renderer/types/newMessage'
|
||||
import { MessageBlockStatus, MessageBlockType } from '@renderer/types/newMessage'
|
||||
import { isMainTextBlock, isVideoBlock } from '@renderer/utils/messageUtils/is'
|
||||
import { isMainTextBlock, isMessageProcessing, isVideoBlock } from '@renderer/utils/messageUtils/is'
|
||||
import { AnimatePresence, motion, type Variants } from 'motion/react'
|
||||
import React, { useMemo } from 'react'
|
||||
import { useSelector } from 'react-redux'
|
||||
@ -107,6 +107,9 @@ const MessageBlockRenderer: React.FC<Props> = ({ blocks, message }) => {
|
||||
const renderedBlocks = blocks.map((blockId) => blockEntities[blockId]).filter(Boolean)
|
||||
const groupedBlocks = useMemo(() => groupSimilarBlocks(renderedBlocks), [renderedBlocks])
|
||||
|
||||
// Check if message is still processing
|
||||
const isProcessing = isMessageProcessing(message)
|
||||
|
||||
return (
|
||||
<AnimatePresence mode="sync">
|
||||
{groupedBlocks.map((block) => {
|
||||
@ -151,9 +154,6 @@ const MessageBlockRenderer: React.FC<Props> = ({ blocks, message }) => {
|
||||
|
||||
switch (block.type) {
|
||||
case MessageBlockType.UNKNOWN:
|
||||
if (block.status === MessageBlockStatus.PROCESSING) {
|
||||
blockComponent = <PlaceholderBlock key={block.id} block={block} />
|
||||
}
|
||||
break
|
||||
case MessageBlockType.MAIN_TEXT:
|
||||
case MessageBlockType.CODE: {
|
||||
@ -213,6 +213,19 @@ const MessageBlockRenderer: React.FC<Props> = ({ blocks, message }) => {
|
||||
</AnimatedBlockWrapper>
|
||||
)
|
||||
})}
|
||||
{isProcessing && (
|
||||
<AnimatedBlockWrapper key="message-loading-placeholder" enableAnimation={true}>
|
||||
<PlaceholderBlock
|
||||
block={{
|
||||
id: `loading-${message.id}`,
|
||||
messageId: message.id,
|
||||
type: MessageBlockType.UNKNOWN,
|
||||
status: MessageBlockStatus.PROCESSING,
|
||||
createdAt: new Date().toISOString()
|
||||
}}
|
||||
/>
|
||||
</AnimatedBlockWrapper>
|
||||
)}
|
||||
</AnimatePresence>
|
||||
)
|
||||
}
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import { Navbar, NavbarLeft, NavbarRight } from '@renderer/components/app/Navbar'
|
||||
import { HStack } from '@renderer/components/Layout'
|
||||
import SearchPopup from '@renderer/components/Popups/SearchPopup'
|
||||
import { isLinux, isWin } from '@renderer/config/constant'
|
||||
import { isLinux, isMac, isWin } from '@renderer/config/constant'
|
||||
import { useAssistant } from '@renderer/hooks/useAssistant'
|
||||
import { modelGenerating } from '@renderer/hooks/useRuntime'
|
||||
import { useSettings } from '@renderer/hooks/useSettings'
|
||||
@ -86,7 +86,14 @@ const HeaderNavbar: FC<Props> = ({ activeAssistant, setActiveAssistant, activeTo
|
||||
)}
|
||||
</AnimatePresence>
|
||||
{!showAssistants && (
|
||||
<NavbarLeft style={{ justifyContent: 'flex-start', borderRight: 'none', padding: '0 10px', minWidth: 'auto' }}>
|
||||
<NavbarLeft
|
||||
style={{
|
||||
justifyContent: 'flex-start',
|
||||
borderRight: 'none',
|
||||
paddingLeft: 0,
|
||||
paddingRight: 10,
|
||||
minWidth: 'auto'
|
||||
}}>
|
||||
<Tooltip title={t('navbar.show_sidebar')} mouseEnterDelay={0.8}>
|
||||
<NavbarIcon onClick={() => toggleShowAssistants()}>
|
||||
<PanelRightClose size={18} />
|
||||
@ -106,7 +113,7 @@ const HeaderNavbar: FC<Props> = ({ activeAssistant, setActiveAssistant, activeTo
|
||||
</AnimatePresence>
|
||||
</NavbarLeft>
|
||||
)}
|
||||
<HStack alignItems="center" gap={6}>
|
||||
<HStack alignItems="center" gap={6} ml={!isMac ? 16 : 0}>
|
||||
<SelectModelButton assistant={assistant} />
|
||||
</HStack>
|
||||
<NavbarRight
|
||||
@ -114,7 +121,7 @@ const HeaderNavbar: FC<Props> = ({ activeAssistant, setActiveAssistant, activeTo
|
||||
justifyContent: 'flex-end',
|
||||
flex: 1,
|
||||
position: 'relative',
|
||||
paddingRight: isWin || isLinux ? '144px' : '6px'
|
||||
paddingRight: isWin || isLinux ? '144px' : '15px'
|
||||
}}
|
||||
className="home-navbar-right">
|
||||
<HStack alignItems="center" gap={6}>
|
||||
|
||||
@ -412,7 +412,7 @@ const SettingsTab: FC<Props> = (props) => {
|
||||
<SettingDivider />
|
||||
</SettingGroup>
|
||||
</CollapsibleSettingGroup>
|
||||
<CollapsibleSettingGroup title={t('settings.math.title')} defaultExpanded={true}>
|
||||
<CollapsibleSettingGroup title={t('settings.math.title')} defaultExpanded={false}>
|
||||
<SettingGroup>
|
||||
<SettingRow>
|
||||
<SettingRowTitleSmall>{t('settings.math.engine.label')}</SettingRowTitleSmall>
|
||||
@ -441,7 +441,7 @@ const SettingsTab: FC<Props> = (props) => {
|
||||
<SettingDivider />
|
||||
</SettingGroup>
|
||||
</CollapsibleSettingGroup>
|
||||
<CollapsibleSettingGroup title={t('chat.settings.code.title')} defaultExpanded={true}>
|
||||
<CollapsibleSettingGroup title={t('chat.settings.code.title')} defaultExpanded={false}>
|
||||
<SettingGroup>
|
||||
<SettingRow>
|
||||
<SettingRowTitleSmall>{t('message.message.code_style')}</SettingRowTitleSmall>
|
||||
@ -585,7 +585,7 @@ const SettingsTab: FC<Props> = (props) => {
|
||||
</SettingGroup>
|
||||
<SettingDivider />
|
||||
</CollapsibleSettingGroup>
|
||||
<CollapsibleSettingGroup title={t('settings.messages.input.title')} defaultExpanded={true}>
|
||||
<CollapsibleSettingGroup title={t('settings.messages.input.title')} defaultExpanded={false}>
|
||||
<SettingGroup>
|
||||
<SettingRow>
|
||||
<SettingRowTitleSmall>{t('settings.messages.input.show_estimated_tokens')}</SettingRowTitleSmall>
|
||||
|
||||
@ -44,11 +44,20 @@ const MinAppPage: FC = () => {
|
||||
}
|
||||
}, [isTopNavbar])
|
||||
|
||||
// Find the app from all available apps
|
||||
// Find the app from all available apps (including cached ones)
|
||||
const app = useMemo(() => {
|
||||
if (!appId) return null
|
||||
return [...DEFAULT_MIN_APPS, ...minapps].find((app) => app.id === appId)
|
||||
}, [appId, minapps])
|
||||
|
||||
// First try to find in default and custom mini-apps
|
||||
let foundApp = [...DEFAULT_MIN_APPS, ...minapps].find((app) => app.id === appId)
|
||||
|
||||
// If not found and we have cache, try to find in cache (for temporary apps)
|
||||
if (!foundApp && minAppsCache) {
|
||||
foundApp = minAppsCache.get(appId)
|
||||
}
|
||||
|
||||
return foundApp
|
||||
}, [appId, minapps, minAppsCache])
|
||||
|
||||
useEffect(() => {
|
||||
// If app not found, redirect to apps list
|
||||
|
||||
@ -111,7 +111,7 @@ const NotesSidebar: FC<NotesSidebarProps> = ({
|
||||
const targetScrollTop = elementOffsetTop - (containerHeight - elementHeight) / 2
|
||||
scrollContainer.scrollTo({
|
||||
top: Math.max(0, targetScrollTop),
|
||||
behavior: 'smooth'
|
||||
behavior: 'instant'
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -307,7 +307,7 @@ const ZhipuPage: FC<{ Options: string[] }> = ({ Options }) => {
|
||||
}
|
||||
}
|
||||
|
||||
const createNewPainting = () => {
|
||||
const handleAddPainting = () => {
|
||||
if (generating) return
|
||||
const newPainting = getNewPainting()
|
||||
const addedPainting = addPainting('zhipu_paintings', newPainting)
|
||||
@ -342,12 +342,12 @@ const ZhipuPage: FC<{ Options: string[] }> = ({ Options }) => {
|
||||
return (
|
||||
<Container>
|
||||
<Navbar>
|
||||
<NavbarCenter>
|
||||
<Title>{t('title.paintings')}</Title>
|
||||
</NavbarCenter>
|
||||
<NavbarCenter style={{ borderRight: 'none' }}>{t('paintings.title')}</NavbarCenter>
|
||||
{isMac && (
|
||||
<NavbarRight>
|
||||
<Button type="text" icon={<PlusOutlined />} onClick={createNewPainting} disabled={generating} />
|
||||
<NavbarRight style={{ justifyContent: 'flex-end' }}>
|
||||
<Button size="small" className="nodrag" icon={<PlusOutlined />} onClick={handleAddPainting}>
|
||||
{t('paintings.button.new.image')}
|
||||
</Button>
|
||||
</NavbarRight>
|
||||
)}
|
||||
</Navbar>
|
||||
@ -482,7 +482,7 @@ const ZhipuPage: FC<{ Options: string[] }> = ({ Options }) => {
|
||||
selectedPainting={painting}
|
||||
onSelectPainting={onSelectPainting}
|
||||
onDeletePainting={onDeletePainting}
|
||||
onNewPainting={createNewPainting}
|
||||
onNewPainting={handleAddPainting}
|
||||
/>
|
||||
</ContentContainer>
|
||||
</Container>
|
||||
@ -556,12 +556,6 @@ const ToolbarMenu = styled.div`
|
||||
gap: 8px;
|
||||
`
|
||||
|
||||
const Title = styled.h1`
|
||||
margin: 0;
|
||||
font-size: 18px;
|
||||
font-weight: 600;
|
||||
`
|
||||
|
||||
const ProviderTitleContainer = styled.div`
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
|
||||
@ -14,7 +14,7 @@ import { runAsyncFunction } from '@renderer/utils'
|
||||
import { UpgradeChannel } from '@shared/config/constant'
|
||||
import { Avatar, Button, Progress, Radio, Row, Switch, Tag, Tooltip } from 'antd'
|
||||
import { debounce } from 'lodash'
|
||||
import { Bug, FileCheck, Github, Globe, Mail, Rss } from 'lucide-react'
|
||||
import { Bug, FileCheck, Globe, Mail, Rss } from 'lucide-react'
|
||||
import { BadgeQuestionMark } from 'lucide-react'
|
||||
import { FC, useEffect, useState } from 'react'
|
||||
import { useTranslation } from 'react-i18next'
|
||||
@ -32,7 +32,7 @@ const AboutSettings: FC = () => {
|
||||
const { theme } = useTheme()
|
||||
const dispatch = useAppDispatch()
|
||||
const { update } = useRuntime()
|
||||
const { openMinapp } = useMinappPopup()
|
||||
const { openSmartMinapp } = useMinappPopup()
|
||||
|
||||
const onCheckUpdate = debounce(
|
||||
async () => {
|
||||
@ -79,7 +79,7 @@ const AboutSettings: FC = () => {
|
||||
|
||||
const showLicense = async () => {
|
||||
const { appPath } = await window.api.getAppInfo()
|
||||
openMinapp({
|
||||
openSmartMinapp({
|
||||
id: 'cherrystudio-license',
|
||||
name: t('settings.about.license.title'),
|
||||
url: `file://${appPath}/resources/cherry-studio/license.html`,
|
||||
@ -89,7 +89,7 @@ const AboutSettings: FC = () => {
|
||||
|
||||
const showReleases = async () => {
|
||||
const { appPath } = await window.api.getAppInfo()
|
||||
openMinapp({
|
||||
openSmartMinapp({
|
||||
id: 'cherrystudio-releases',
|
||||
name: t('settings.about.releases.title'),
|
||||
url: `file://${appPath}/resources/cherry-studio/releases.html?theme=${theme === ThemeMode.dark ? 'dark' : 'light'}`,
|
||||
@ -273,7 +273,7 @@ const AboutSettings: FC = () => {
|
||||
<IndicatorLight color="green" />
|
||||
</SettingRowTitle>
|
||||
</SettingRow>
|
||||
<UpdateNotesWrapper>
|
||||
<UpdateNotesWrapper className="markdown">
|
||||
<Markdown>
|
||||
{typeof update.info.releaseNotes === 'string'
|
||||
? update.info.releaseNotes.replace(/\n/g, '\n\n')
|
||||
@ -309,7 +309,7 @@ const AboutSettings: FC = () => {
|
||||
<SettingDivider />
|
||||
<SettingRow>
|
||||
<SettingRowTitle>
|
||||
<Github size={18} />
|
||||
<GithubOutlined size={18} />
|
||||
{t('settings.about.feedback.title')}
|
||||
</SettingRowTitle>
|
||||
<Button onClick={() => onOpenWebsite('https://github.com/CherryHQ/cherry-studio/issues/new/choose')}>
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import { InfoCircleOutlined } from '@ant-design/icons'
|
||||
import { HStack } from '@renderer/components/Layout'
|
||||
import { AppLogo } from '@renderer/config/env'
|
||||
import { useTheme } from '@renderer/context/ThemeProvider'
|
||||
import { useMinappPopup } from '@renderer/hooks/useMinappPopup'
|
||||
import { RootState, useAppDispatch } from '@renderer/store'
|
||||
@ -16,7 +17,7 @@ const JoplinSettings: FC = () => {
|
||||
const { t } = useTranslation()
|
||||
const { theme } = useTheme()
|
||||
const dispatch = useAppDispatch()
|
||||
const { openMinapp } = useMinappPopup()
|
||||
const { openSmartMinapp } = useMinappPopup()
|
||||
|
||||
const joplinToken = useSelector((state: RootState) => state.settings.joplinToken)
|
||||
const joplinUrl = useSelector((state: RootState) => state.settings.joplinUrl)
|
||||
@ -66,10 +67,11 @@ const JoplinSettings: FC = () => {
|
||||
}
|
||||
|
||||
const handleJoplinHelpClick = () => {
|
||||
openMinapp({
|
||||
openSmartMinapp({
|
||||
id: 'joplin-help',
|
||||
name: 'Joplin Help',
|
||||
url: 'https://joplinapp.org/help/apps/clipper'
|
||||
url: 'https://joplinapp.org/help/apps/clipper',
|
||||
logo: AppLogo
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import { InfoCircleOutlined } from '@ant-design/icons'
|
||||
import { Client } from '@notionhq/client'
|
||||
import { HStack } from '@renderer/components/Layout'
|
||||
import { AppLogo } from '@renderer/config/env'
|
||||
import { useTheme } from '@renderer/context/ThemeProvider'
|
||||
import { useMinappPopup } from '@renderer/hooks/useMinappPopup'
|
||||
import { RootState, useAppDispatch } from '@renderer/store'
|
||||
@ -21,7 +22,7 @@ const NotionSettings: FC = () => {
|
||||
const { t } = useTranslation()
|
||||
const { theme } = useTheme()
|
||||
const dispatch = useAppDispatch()
|
||||
const { openMinapp } = useMinappPopup()
|
||||
const { openSmartMinapp } = useMinappPopup()
|
||||
|
||||
const notionApiKey = useSelector((state: RootState) => state.settings.notionApiKey)
|
||||
const notionDatabaseID = useSelector((state: RootState) => state.settings.notionDatabaseID)
|
||||
@ -67,10 +68,11 @@ const NotionSettings: FC = () => {
|
||||
}
|
||||
|
||||
const handleNotionTitleClick = () => {
|
||||
openMinapp({
|
||||
openSmartMinapp({
|
||||
id: 'notion-help',
|
||||
name: 'Notion Help',
|
||||
url: 'https://docs.cherry-ai.com/advanced-basic/notion'
|
||||
url: 'https://docs.cherry-ai.com/advanced-basic/notion',
|
||||
logo: AppLogo
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@ -3,6 +3,7 @@ import { HStack } from '@renderer/components/Layout'
|
||||
import { S3BackupManager } from '@renderer/components/S3BackupManager'
|
||||
import { S3BackupModal, useS3BackupModal } from '@renderer/components/S3Modals'
|
||||
import Selector from '@renderer/components/Selector'
|
||||
import { AppLogo } from '@renderer/config/env'
|
||||
import { useTheme } from '@renderer/context/ThemeProvider'
|
||||
import { useMinappPopup } from '@renderer/hooks/useMinappPopup'
|
||||
import { useSettings } from '@renderer/hooks/useSettings'
|
||||
@ -47,7 +48,7 @@ const S3Settings: FC = () => {
|
||||
const dispatch = useAppDispatch()
|
||||
const { theme } = useTheme()
|
||||
const { t } = useTranslation()
|
||||
const { openMinapp } = useMinappPopup()
|
||||
const { openSmartMinapp } = useMinappPopup()
|
||||
|
||||
const { s3Sync } = useAppSelector((state) => state.backup)
|
||||
|
||||
@ -62,10 +63,11 @@ const S3Settings: FC = () => {
|
||||
}
|
||||
|
||||
const handleTitleClick = () => {
|
||||
openMinapp({
|
||||
openSmartMinapp({
|
||||
id: 's3-help',
|
||||
name: 'S3 Compatible Storage Help',
|
||||
url: 'https://docs.cherry-ai.com/data-settings/s3-compatible'
|
||||
url: 'https://docs.cherry-ai.com/data-settings/s3-compatible',
|
||||
logo: AppLogo
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import { InfoCircleOutlined } from '@ant-design/icons'
|
||||
import { loggerService } from '@logger'
|
||||
import { HStack } from '@renderer/components/Layout'
|
||||
import { AppLogo } from '@renderer/config/env'
|
||||
import { useTheme } from '@renderer/context/ThemeProvider'
|
||||
import { useMinappPopup } from '@renderer/hooks/useMinappPopup'
|
||||
import { RootState, useAppDispatch } from '@renderer/store'
|
||||
@ -16,7 +17,7 @@ import { SettingDivider, SettingGroup, SettingRow, SettingRowTitle, SettingTitle
|
||||
const logger = loggerService.withContext('SiyuanSettings')
|
||||
|
||||
const SiyuanSettings: FC = () => {
|
||||
const { openMinapp } = useMinappPopup()
|
||||
const { openSmartMinapp } = useMinappPopup()
|
||||
const { t } = useTranslation()
|
||||
const { theme } = useTheme()
|
||||
const dispatch = useAppDispatch()
|
||||
@ -43,10 +44,11 @@ const SiyuanSettings: FC = () => {
|
||||
}
|
||||
|
||||
const handleSiyuanHelpClick = () => {
|
||||
openMinapp({
|
||||
openSmartMinapp({
|
||||
id: 'siyuan-help',
|
||||
name: 'Siyuan Help',
|
||||
url: 'https://docs.cherry-ai.com/advanced-basic/siyuan'
|
||||
url: 'https://docs.cherry-ai.com/advanced-basic/siyuan',
|
||||
logo: AppLogo
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import { InfoCircleOutlined } from '@ant-design/icons'
|
||||
import { HStack } from '@renderer/components/Layout'
|
||||
import { AppLogo } from '@renderer/config/env'
|
||||
import { useTheme } from '@renderer/context/ThemeProvider'
|
||||
import { useMinappPopup } from '@renderer/hooks/useMinappPopup'
|
||||
import { RootState, useAppDispatch } from '@renderer/store'
|
||||
@ -16,7 +17,7 @@ const YuqueSettings: FC = () => {
|
||||
const { t } = useTranslation()
|
||||
const { theme } = useTheme()
|
||||
const dispatch = useAppDispatch()
|
||||
const { openMinapp } = useMinappPopup()
|
||||
const { openSmartMinapp } = useMinappPopup()
|
||||
|
||||
const yuqueToken = useSelector((state: RootState) => state.settings.yuqueToken)
|
||||
const yuqueUrl = useSelector((state: RootState) => state.settings.yuqueUrl)
|
||||
@ -65,10 +66,11 @@ const YuqueSettings: FC = () => {
|
||||
}
|
||||
|
||||
const handleYuqueHelpClick = () => {
|
||||
openMinapp({
|
||||
openSmartMinapp({
|
||||
id: 'yuque-help',
|
||||
name: 'Yuque Help',
|
||||
url: 'https://www.yuque.com/settings/tokens'
|
||||
url: 'https://www.yuque.com/settings/tokens',
|
||||
logo: AppLogo
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@ -183,7 +183,7 @@ const CardContainer = styled.div<{ $isActive: boolean }>`
|
||||
margin-bottom: 5px;
|
||||
height: 125px;
|
||||
opacity: ${(props) => (props.$isActive ? 1 : 0.6)};
|
||||
width: calc(100vw - var(--settings-width) - 40px);
|
||||
width: 100%;
|
||||
|
||||
&:hover {
|
||||
opacity: 1;
|
||||
|
||||
@ -251,7 +251,8 @@ const McpServersList: FC = () => {
|
||||
onSortEnd={onSortEnd}
|
||||
layout="list"
|
||||
horizontal={false}
|
||||
listStyle={{ display: 'flex', flexDirection: 'column' }}
|
||||
listStyle={{ display: 'flex', flexDirection: 'column', width: '100%' }}
|
||||
itemStyle={{ width: '100%' }}
|
||||
gap="12px"
|
||||
restrictions={{ scrollableAncestor: true }}
|
||||
useDragOverlay
|
||||
|
||||
@ -17,6 +17,7 @@ import {
|
||||
isVisionModel,
|
||||
isWebSearchModel
|
||||
} from '@renderer/config/models'
|
||||
import { isNewApiProvider } from '@renderer/config/providers'
|
||||
import { useDynamicLabelWidth } from '@renderer/hooks/useDynamicLabelWidth'
|
||||
import { Model, ModelCapability, ModelType, Provider } from '@renderer/types'
|
||||
import { getDefaultGroupName, getDifference, getUnion, uniqueObjectArray } from '@renderer/utils'
|
||||
@ -78,7 +79,7 @@ const ModelEditContent: FC<ModelEditContentProps & ModalProps> = ({ provider, mo
|
||||
id: formValues.id || model.id,
|
||||
name: formValues.name || model.name,
|
||||
group: formValues.group || model.group,
|
||||
endpoint_type: provider.id === 'new-api' ? formValues.endpointType : model.endpoint_type,
|
||||
endpoint_type: isNewApiProvider(provider) ? formValues.endpointType : model.endpoint_type,
|
||||
capabilities: overrides?.capabilities ?? modelCapabilities,
|
||||
supported_text_delta: overrides?.supported_text_delta ?? supportedTextDelta,
|
||||
pricing: {
|
||||
@ -97,7 +98,7 @@ const ModelEditContent: FC<ModelEditContentProps & ModalProps> = ({ provider, mo
|
||||
id: values.id || model.id,
|
||||
name: values.name || model.name,
|
||||
group: values.group || model.group,
|
||||
endpoint_type: provider.id === 'new-api' ? values.endpointType : model.endpoint_type,
|
||||
endpoint_type: isNewApiProvider(provider) ? values.endpointType : model.endpoint_type,
|
||||
capabilities: modelCapabilities,
|
||||
supported_text_delta: supportedTextDelta,
|
||||
pricing: {
|
||||
@ -247,7 +248,7 @@ const ModelEditContent: FC<ModelEditContentProps & ModalProps> = ({ provider, mo
|
||||
<Modal title={t('models.edit')} footer={null} transitionName="animation-move-down" centered {...props}>
|
||||
<Form
|
||||
form={form}
|
||||
labelCol={{ flex: provider.id === 'new-api' ? labelWidth : '110px' }}
|
||||
labelCol={{ flex: isNewApiProvider(provider) ? labelWidth : '110px' }}
|
||||
labelAlign="left"
|
||||
colon={false}
|
||||
style={{ marginTop: 15 }}
|
||||
@ -309,7 +310,7 @@ const ModelEditContent: FC<ModelEditContentProps & ModalProps> = ({ provider, mo
|
||||
tooltip={t('settings.models.add.group_name.tooltip')}>
|
||||
<Input placeholder={t('settings.models.add.group_name.placeholder')} spellCheck={false} />
|
||||
</Form.Item>
|
||||
{provider.id === 'new-api' && (
|
||||
{isNewApiProvider(provider) && (
|
||||
<Form.Item
|
||||
name="endpointType"
|
||||
label={t('settings.models.add.endpoint_type.label')}
|
||||
|
||||
@ -3,6 +3,7 @@ import ModelIdWithTags from '@renderer/components/ModelIdWithTags'
|
||||
import CustomTag from '@renderer/components/Tags/CustomTag'
|
||||
import { DynamicVirtualList } from '@renderer/components/VirtualList'
|
||||
import { getModelLogo } from '@renderer/config/models'
|
||||
import { isNewApiProvider } from '@renderer/config/providers'
|
||||
import FileItem from '@renderer/pages/files/FileItem'
|
||||
import NewApiBatchAddModelPopup from '@renderer/pages/settings/ProviderSettings/ModelList/NewApiBatchAddModelPopup'
|
||||
import { Model, Provider } from '@renderer/types'
|
||||
@ -91,7 +92,7 @@ const ManageModelsList: React.FC<ManageModelsListProps> = ({ modelGroups, provid
|
||||
// 添加整组
|
||||
const wouldAddModels = models.filter((model) => !isModelInProvider(provider, model.id))
|
||||
|
||||
if (provider.id === 'new-api') {
|
||||
if (isNewApiProvider(provider)) {
|
||||
if (wouldAddModels.every(isValidNewApiModel)) {
|
||||
wouldAddModels.forEach(onAddModel)
|
||||
} else {
|
||||
|
||||
@ -13,6 +13,7 @@ import {
|
||||
isWebSearchModel,
|
||||
SYSTEM_MODELS
|
||||
} from '@renderer/config/models'
|
||||
import { isNewApiProvider } from '@renderer/config/providers'
|
||||
import { useProvider } from '@renderer/hooks/useProvider'
|
||||
import NewApiAddModelPopup from '@renderer/pages/settings/ProviderSettings/ModelList/NewApiAddModelPopup'
|
||||
import NewApiBatchAddModelPopup from '@renderer/pages/settings/ProviderSettings/ModelList/NewApiBatchAddModelPopup'
|
||||
@ -129,7 +130,7 @@ const PopupContainer: React.FC<Props> = ({ providerId, resolve }) => {
|
||||
const onAddModel = useCallback(
|
||||
(model: Model) => {
|
||||
if (!isEmpty(model.name)) {
|
||||
if (provider.id === 'new-api') {
|
||||
if (isNewApiProvider(provider)) {
|
||||
if (model.supported_endpoint_types && model.supported_endpoint_types.length > 0) {
|
||||
addModel({
|
||||
...model,
|
||||
@ -160,7 +161,7 @@ const PopupContainer: React.FC<Props> = ({ providerId, resolve }) => {
|
||||
content: t('settings.models.manage.add_listed.confirm'),
|
||||
centered: true,
|
||||
onOk: () => {
|
||||
if (provider.id === 'new-api') {
|
||||
if (isNewApiProvider(provider)) {
|
||||
if (models.every(isValidNewApiModel)) {
|
||||
wouldAddModel.forEach(onAddModel)
|
||||
} else {
|
||||
|
||||
@ -2,7 +2,7 @@ import CollapsibleSearchBar from '@renderer/components/CollapsibleSearchBar'
|
||||
import { LoadingIcon, StreamlineGoodHealthAndWellBeing } from '@renderer/components/Icons'
|
||||
import { HStack } from '@renderer/components/Layout'
|
||||
import CustomTag from '@renderer/components/Tags/CustomTag'
|
||||
import { PROVIDER_URLS } from '@renderer/config/providers'
|
||||
import { isNewApiProvider, PROVIDER_URLS } from '@renderer/config/providers'
|
||||
import { useProvider } from '@renderer/hooks/useProvider'
|
||||
import { getProviderLabel } from '@renderer/i18n/label'
|
||||
import { SettingHelpLink, SettingHelpText, SettingHelpTextRow, SettingSubtitle } from '@renderer/pages/settings'
|
||||
@ -86,7 +86,7 @@ const ModelList: React.FC<ModelListProps> = ({ providerId }) => {
|
||||
}, [provider.id])
|
||||
|
||||
const onAddModel = useCallback(() => {
|
||||
if (provider.id === 'new-api') {
|
||||
if (isNewApiProvider(provider)) {
|
||||
NewApiAddModelPopup.show({ title: t('settings.models.add.add_model'), provider })
|
||||
} else {
|
||||
AddModelPopup.show({ title: t('settings.models.add.add_model'), provider })
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import { TopView } from '@renderer/components/TopView'
|
||||
import { endpointTypeOptions } from '@renderer/config/endpointTypes'
|
||||
import { isNotSupportedTextDelta } from '@renderer/config/models'
|
||||
import { isNewApiProvider } from '@renderer/config/providers'
|
||||
import { useDynamicLabelWidth } from '@renderer/hooks/useDynamicLabelWidth'
|
||||
import { useProvider } from '@renderer/hooks/useProvider'
|
||||
import { EndpointType, Model, Provider } from '@renderer/types'
|
||||
@ -60,7 +61,7 @@ const PopupContainer: React.FC<Props> = ({ title, provider, resolve, model, endp
|
||||
provider: provider.id,
|
||||
name: values.name ? values.name : id.toUpperCase(),
|
||||
group: values.group ?? getDefaultGroupName(id),
|
||||
endpoint_type: provider.id === 'new-api' ? values.endpointType : undefined
|
||||
endpoint_type: isNewApiProvider(provider) ? values.endpointType : undefined
|
||||
}
|
||||
|
||||
addModel({ ...model, supported_text_delta: !isNotSupportedTextDelta(model) })
|
||||
|
||||
@ -15,22 +15,23 @@ export const createThinkingCallbacks = (deps: ThinkingCallbacksDependencies) =>
|
||||
|
||||
// 内部维护的状态
|
||||
let thinkingBlockId: string | null = null
|
||||
let _thinking_millsec = 0
|
||||
|
||||
return {
|
||||
onThinkingStart: async () => {
|
||||
if (blockManager.hasInitialPlaceholder) {
|
||||
const changes = {
|
||||
const changes: Partial<MessageBlock> = {
|
||||
type: MessageBlockType.THINKING,
|
||||
content: '',
|
||||
status: MessageBlockStatus.STREAMING,
|
||||
thinking_millsec: 0
|
||||
thinking_millsec: _thinking_millsec
|
||||
}
|
||||
thinkingBlockId = blockManager.initialPlaceholderBlockId!
|
||||
blockManager.smartBlockUpdate(thinkingBlockId, changes, MessageBlockType.THINKING, true)
|
||||
} else if (!thinkingBlockId) {
|
||||
const newBlock = createThinkingBlock(assistantMsgId, '', {
|
||||
status: MessageBlockStatus.STREAMING,
|
||||
thinking_millsec: 0
|
||||
thinking_millsec: _thinking_millsec
|
||||
})
|
||||
thinkingBlockId = newBlock.id
|
||||
await blockManager.handleBlockTransition(newBlock, MessageBlockType.THINKING)
|
||||
@ -38,26 +39,27 @@ export const createThinkingCallbacks = (deps: ThinkingCallbacksDependencies) =>
|
||||
},
|
||||
|
||||
onThinkingChunk: async (text: string, thinking_millsec?: number) => {
|
||||
_thinking_millsec = thinking_millsec || 0
|
||||
if (thinkingBlockId) {
|
||||
const blockChanges: Partial<MessageBlock> = {
|
||||
content: text,
|
||||
status: MessageBlockStatus.STREAMING,
|
||||
thinking_millsec: thinking_millsec || 0
|
||||
thinking_millsec: _thinking_millsec
|
||||
}
|
||||
blockManager.smartBlockUpdate(thinkingBlockId, blockChanges, MessageBlockType.THINKING)
|
||||
}
|
||||
},
|
||||
|
||||
onThinkingComplete: (finalText: string, final_thinking_millsec?: number) => {
|
||||
onThinkingComplete: (finalText: string) => {
|
||||
if (thinkingBlockId) {
|
||||
const changes = {
|
||||
type: MessageBlockType.THINKING,
|
||||
const changes: Partial<MessageBlock> = {
|
||||
content: finalText,
|
||||
status: MessageBlockStatus.SUCCESS,
|
||||
thinking_millsec: final_thinking_millsec || 0
|
||||
thinking_millsec: _thinking_millsec
|
||||
}
|
||||
blockManager.smartBlockUpdate(thinkingBlockId, changes, MessageBlockType.THINKING, true)
|
||||
thinkingBlockId = null
|
||||
_thinking_millsec = 0
|
||||
} else {
|
||||
logger.warn(
|
||||
`[onThinkingComplete] Received thinking.complete but last block was not THINKING (was ${blockManager.lastBlockType}) or lastBlockId is null.`
|
||||
|
||||
@ -46,8 +46,9 @@ const assistantsSlice = createSlice({
|
||||
removeAssistant: (state, action: PayloadAction<{ id: string }>) => {
|
||||
state.assistants = state.assistants.filter((c) => c.id !== action.payload.id)
|
||||
},
|
||||
updateAssistant: (state, action: PayloadAction<Partial<Assistant>>) => {
|
||||
state.assistants = state.assistants.map((c) => (c.id === action.payload.id ? { ...c, ...action.payload } : c))
|
||||
updateAssistant: (state, action: PayloadAction<Partial<Assistant> & { id: string }>) => {
|
||||
const { id, ...update } = action.payload
|
||||
state.assistants = state.assistants.map((c) => (c.id === id ? { ...c, ...update } : c))
|
||||
},
|
||||
updateAssistantSettings: (
|
||||
state,
|
||||
|
||||
@ -2495,6 +2495,7 @@ const migrateConfig = {
|
||||
'157': (state: RootState) => {
|
||||
try {
|
||||
addProvider(state, 'aionly')
|
||||
state.llm.providers = moveProvider(state.llm.providers, 'aionly', 10)
|
||||
|
||||
const cherryinProvider = state.llm.providers.find((provider) => provider.id === 'cherryin')
|
||||
|
||||
|
||||
@ -410,7 +410,8 @@ describe('streamCallback Integration Tests', () => {
|
||||
{ type: ChunkType.THINKING_START },
|
||||
{ type: ChunkType.THINKING_DELTA, text: 'Let me think...', thinking_millsec: 1000 },
|
||||
{ type: ChunkType.THINKING_DELTA, text: 'I need to consider...', thinking_millsec: 2000 },
|
||||
{ type: ChunkType.THINKING_COMPLETE, text: 'Final thoughts', thinking_millsec: 3000 },
|
||||
{ type: ChunkType.THINKING_DELTA, text: 'Final thoughts', thinking_millsec: 3000 },
|
||||
{ type: ChunkType.THINKING_COMPLETE, text: 'Final thoughts' },
|
||||
{ type: ChunkType.BLOCK_COMPLETE }
|
||||
]
|
||||
|
||||
|
||||
16
yarn.lock
16
yarn.lock
@ -155,7 +155,7 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/google@npm:2.0.14, @ai-sdk/google@npm:^2.0.14":
|
||||
"@ai-sdk/google@npm:2.0.14":
|
||||
version: 2.0.14
|
||||
resolution: "@ai-sdk/google@npm:2.0.14"
|
||||
dependencies:
|
||||
@ -167,6 +167,18 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.14#~/.yarn/patches/@ai-sdk-google-npm-2.0.14-376d8b03cc.patch":
|
||||
version: 2.0.14
|
||||
resolution: "@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.14#~/.yarn/patches/@ai-sdk-google-npm-2.0.14-376d8b03cc.patch::version=2.0.14&hash=a91bb2"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.9"
|
||||
peerDependencies:
|
||||
zod: ^3.25.76 || ^4
|
||||
checksum: 10c0/5ec33dc9898457b1f48ed14cb767817345032c539dd21b7e21985ed47bc21b0820922b581bf349bb3898136790b12da3a0a7c9903c333a28ead0c3c2cd5230f2
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/mistral@npm:^2.0.14":
|
||||
version: 2.0.14
|
||||
resolution: "@ai-sdk/mistral@npm:2.0.14"
|
||||
@ -2374,7 +2386,7 @@ __metadata:
|
||||
"@ai-sdk/anthropic": "npm:^2.0.17"
|
||||
"@ai-sdk/azure": "npm:^2.0.30"
|
||||
"@ai-sdk/deepseek": "npm:^1.0.17"
|
||||
"@ai-sdk/google": "npm:^2.0.14"
|
||||
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.14#~/.yarn/patches/@ai-sdk-google-npm-2.0.14-376d8b03cc.patch"
|
||||
"@ai-sdk/openai": "npm:^2.0.30"
|
||||
"@ai-sdk/openai-compatible": "npm:^1.0.17"
|
||||
"@ai-sdk/provider": "npm:^2.0.0"
|
||||
|
||||
Loading…
Reference in New Issue
Block a user