From 77fd90ef7d9661131307c009f4bf0569b84a2611 Mon Sep 17 00:00:00 2001 From: Zhaokun Date: Wed, 3 Dec 2025 16:14:37 +0800 Subject: [PATCH 01/17] fix: Selected area in code block changes after scrolling (#11469) * fix: improve code block copy in collapsed state with virtual scroll - Add saveSelection mechanism to track selection across virtual scroll updates - Implement custom copy handler to extract complete content from raw data - Auto-expand code block when multi-line selection is detected in collapsed state - Only enable auto-expand when codeCollapsible setting is enabled - Add comprehensive logging for debugging selection and copy issues Fixes issue where copying code in collapsed state would lose content from virtualized rows that are not rendered in DOM. The solution captures selection position (line + offset) during scroll and uses it to extract complete content from the original source when copying. * fix(CodeViewer): scope selection and copy to viewer container to prevent multiple blocks appearing selected\n\n- Add selectionBelongsToViewer() to ensure selection anchors are within this viewer\n- Guard saveSelection, copy handler, and selectionchange auto-expand logic\n- Avoids cross-viewer selection bleed when multiple CodeViewer instances exist on a page\n\nFollow-up to 37c2b5ecb (virtual scroll selection/copy). * fix(CodeViewer): clear saved selection when active selection belongs to another viewer\n\n- Early-return in selectionchange handler when selection is outside this viewer\n- Complements scoping guards to avoid misleading multi-selection states * fix(CodeViewer): change logger info to debug for selection and copy events - Adjust logging level from info to debug for various selection and copy operations to reduce log verbosity. - Ensure selection belongs to the current viewer before processing. * fix(CodeViewer): remove invisible character from import statement * fix(CodeViewer): complete useCallback deps to avoid stale closure - saveSelection deps -> [selectionBelongsToViewer] - handleCopy deps -> [selectionBelongsToViewer, expanded, saveSelection, rawLines] - no behavior change; satisfy exhaustive-deps; reduce risk of stale refs * fix(CodeViewer): improve selection handling for virtual scrolling and enhance comments * fix(CodeViewer): handle clipboardData unavailability and remove unused ref - Add null check for event.clipboardData to prevent silent copy failure - When clipboardData is unavailable, fall back to browser default copy behavior - Remove unused isRestoringSelectionRef and its dead code check - Improve copy reliability in edge cases where clipboard API may be unavailable --- .../src/components/CodeBlockView/view.tsx | 3 +- src/renderer/src/components/CodeViewer.tsx | 278 +++++++++++++++++- 2 files changed, 279 insertions(+), 2 deletions(-) diff --git a/src/renderer/src/components/CodeBlockView/view.tsx b/src/renderer/src/components/CodeBlockView/view.tsx index 2ba94d0ef2..8b251b9603 100644 --- a/src/renderer/src/components/CodeBlockView/view.tsx +++ b/src/renderer/src/components/CodeBlockView/view.tsx @@ -264,9 +264,10 @@ export const CodeBlockView: React.FC = memo(({ children, language, onSave expanded={shouldExpand} wrapped={shouldWrap} maxHeight={`${MAX_COLLAPSED_CODE_HEIGHT}px`} + onRequestExpand={codeCollapsible ? () => setExpandOverride(true) : undefined} /> ), - [children, codeEditor.enabled, handleHeightChange, language, onSave, shouldExpand, shouldWrap] + [children, codeCollapsible, codeEditor.enabled, handleHeightChange, language, onSave, shouldExpand, shouldWrap] ) // 特殊视图组件映射 diff --git a/src/renderer/src/components/CodeViewer.tsx b/src/renderer/src/components/CodeViewer.tsx index af60633672..a60b45d713 100644 --- a/src/renderer/src/components/CodeViewer.tsx +++ b/src/renderer/src/components/CodeViewer.tsx @@ -1,3 +1,4 @@ +import { loggerService } from '@logger' import { useCodeStyle } from '@renderer/context/CodeStyleProvider' import { useCodeHighlight } from '@renderer/hooks/useCodeHighlight' import { useSettings } from '@renderer/hooks/useSettings' @@ -9,6 +10,15 @@ import React, { memo, useCallback, useEffect, useLayoutEffect, useMemo, useRef } import type { ThemedToken } from 'shiki/core' import styled from 'styled-components' +const logger = loggerService.withContext('CodeViewer') + +interface SavedSelection { + startLine: number + startOffset: number + endLine: number + endOffset: number +} + interface CodeViewerProps { /** Code string value. */ value: string @@ -52,6 +62,10 @@ interface CodeViewerProps { * @default true */ wrapped?: boolean + /** + * Callback to request expansion when multi-line selection is detected. + */ + onRequestExpand?: () => void } /** @@ -70,13 +84,24 @@ const CodeViewer = ({ fontSize: customFontSize, className, expanded = true, - wrapped = true + wrapped = true, + onRequestExpand }: CodeViewerProps) => { const { codeShowLineNumbers: _lineNumbers, fontSize: _fontSize } = useSettings() const { getShikiPreProperties, isShikiThemeDark } = useCodeStyle() const shikiThemeRef = useRef(null) const scrollerRef = useRef(null) const callerId = useRef(`${Date.now()}-${uuid()}`).current + const savedSelectionRef = useRef(null) + // Ensure the active selection actually belongs to this CodeViewer instance + const selectionBelongsToViewer = useCallback((sel: Selection | null) => { + const scroller = scrollerRef.current + if (!scroller || !sel || sel.rangeCount === 0) return false + + // Check if selection intersects with scroller + const range = sel.getRangeAt(0) + return scroller.contains(range.commonAncestorContainer) + }, []) const fontSize = useMemo(() => customFontSize ?? _fontSize - 1, [customFontSize, _fontSize]) const lineNumbers = useMemo(() => options?.lineNumbers ?? _lineNumbers, [options?.lineNumbers, _lineNumbers]) @@ -112,6 +137,204 @@ const CodeViewer = ({ } }, [language, getShikiPreProperties, isShikiThemeDark, className]) + // 保存当前选区的逻辑位置 + const saveSelection = useCallback((): SavedSelection | null => { + const selection = window.getSelection() + if (!selection || selection.rangeCount === 0 || selection.isCollapsed) { + return null + } + + // Only capture selections within this viewer's scroller + if (!selectionBelongsToViewer(selection)) { + return null + } + + const range = selection.getRangeAt(0) + const scroller = scrollerRef.current + if (!scroller) return null + + // 查找选区起始和结束位置对应的行号 + const findLineAndOffset = (node: Node, offset: number): { line: number; offset: number } | null => { + // 向上查找包含 data-index 属性的元素 + let element = node.nodeType === Node.ELEMENT_NODE ? (node as Element) : node.parentElement + + // 跳过行号元素,找到实际的行内容 + while (element) { + if (element.classList?.contains('line-number')) { + // 如果在行号上,移动到同级的 line-content + const lineContainer = element.parentElement + const lineContent = lineContainer?.querySelector('.line-content') + if (lineContent) { + element = lineContent as Element + break + } + } + if (element.hasAttribute('data-index')) { + break + } + element = element.parentElement + } + + if (!element || !element.hasAttribute('data-index')) { + logger.warn('Could not find data-index element', { + nodeName: node.nodeName, + nodeType: node.nodeType + }) + return null + } + + const lineIndex = parseInt(element.getAttribute('data-index') || '0', 10) + const lineContent = element.querySelector('.line-content') || element + + // Calculate character offset within the line + let charOffset = 0 + if (node.nodeType === Node.TEXT_NODE) { + // 遍历该行的所有文本节点,找到当前节点的位置 + const walker = document.createTreeWalker(lineContent as Node, NodeFilter.SHOW_TEXT) + let currentNode: Node | null + while ((currentNode = walker.nextNode())) { + if (currentNode === node) { + charOffset += offset + break + } + charOffset += currentNode.textContent?.length || 0 + } + } else if (node.nodeType === Node.ELEMENT_NODE) { + // 如果是元素节点,计算之前所有文本的长度 + const textBefore = (node as Element).textContent?.slice(0, offset) || '' + charOffset = textBefore.length + } + + logger.debug('findLineAndOffset result', { + lineIndex, + charOffset + }) + + return { line: lineIndex, offset: charOffset } + } + + const start = findLineAndOffset(range.startContainer, range.startOffset) + const end = findLineAndOffset(range.endContainer, range.endOffset) + + if (!start || !end) { + logger.warn('saveSelection failed', { + hasStart: !!start, + hasEnd: !!end + }) + return null + } + + logger.debug('saveSelection success', { + startLine: start.line, + startOffset: start.offset, + endLine: end.line, + endOffset: end.offset + }) + + return { + startLine: start.line, + startOffset: start.offset, + endLine: end.line, + endOffset: end.offset + } + }, [selectionBelongsToViewer]) + + // 滚动事件处理:保存选择用于复制,但不恢复(避免选择高亮问题) + const handleScroll = useCallback(() => { + // 只保存选择状态用于复制,不在滚动时恢复选择 + const saved = saveSelection() + if (saved) { + savedSelectionRef.current = saved + logger.debug('Selection saved for copy', { + startLine: saved.startLine, + endLine: saved.endLine + }) + } + }, [saveSelection]) + + // 处理复制事件,确保跨虚拟滚动的复制能获取完整内容 + const handleCopy = useCallback( + (event: ClipboardEvent) => { + const selection = window.getSelection() + // Ignore copies for selections outside this viewer + if (!selectionBelongsToViewer(selection)) { + return + } + if (!selection || selection.rangeCount === 0 || selection.isCollapsed) { + return + } + + // Prefer saved selection from scroll, otherwise get it in real-time + let saved = savedSelectionRef.current + if (!saved) { + saved = saveSelection() + } + + if (!saved) { + logger.warn('Cannot get selection, using browser default') + return + } + + const { startLine, startOffset, endLine, endOffset } = saved + + // Always use custom copy in collapsed state to handle virtual scroll edge cases + const needsCustomCopy = !expanded + + logger.debug('Copy event', { + startLine, + endLine, + startOffset, + endOffset, + expanded, + needsCustomCopy, + usedSavedSelection: !!savedSelectionRef.current + }) + + if (needsCustomCopy) { + try { + const selectedLines: string[] = [] + + for (let i = startLine; i <= endLine; i++) { + const line = rawLines[i] || '' + + if (i === startLine && i === endLine) { + // 单行选择 + selectedLines.push(line.slice(startOffset, endOffset)) + } else if (i === startLine) { + // 第一行,从 startOffset 到行尾 + selectedLines.push(line.slice(startOffset)) + } else if (i === endLine) { + // 最后一行,从行首到 endOffset + selectedLines.push(line.slice(0, endOffset)) + } else { + // 中间的完整行 + selectedLines.push(line) + } + } + + const fullText = selectedLines.join('\n') + + logger.debug('Custom copy success', { + linesCount: selectedLines.length, + totalLength: fullText.length, + firstLine: selectedLines[0]?.slice(0, 30), + lastLine: selectedLines[selectedLines.length - 1]?.slice(0, 30) + }) + + if (!event.clipboardData) { + logger.warn('clipboardData unavailable, using browser default copy') + return + } + event.clipboardData.setData('text/plain', fullText) + event.preventDefault() + } catch (error) { + logger.error('Custom copy failed', { error }) + } + } + }, + [selectionBelongsToViewer, expanded, saveSelection, rawLines] + ) + // Virtualizer 配置 const getScrollElement = useCallback(() => scrollerRef.current, []) const getItemKey = useCallback((index: number) => `${callerId}-${index}`, [callerId]) @@ -147,6 +370,58 @@ const CodeViewer = ({ } }, [virtualItems, debouncedHighlightLines]) + // Monitor selection changes, clear stale selection state, and auto-expand in collapsed state + const handleSelectionChange = useMemo( + () => + debounce(() => { + const selection = window.getSelection() + + // No valid selection: clear and return + if (!selection || selection.rangeCount === 0 || selection.isCollapsed) { + savedSelectionRef.current = null + return + } + + // Only handle selections within this CodeViewer + if (!selectionBelongsToViewer(selection)) { + savedSelectionRef.current = null + return + } + + // In collapsed state, detect multi-line selection and request expand + if (!expanded && onRequestExpand) { + const saved = saveSelection() + if (saved && saved.endLine > saved.startLine) { + logger.debug('Multi-line selection detected in collapsed state, requesting expand', { + startLine: saved.startLine, + endLine: saved.endLine + }) + onRequestExpand() + } + } + }, 100), + [expanded, onRequestExpand, saveSelection, selectionBelongsToViewer] + ) + + useEffect(() => { + document.addEventListener('selectionchange', handleSelectionChange) + return () => { + document.removeEventListener('selectionchange', handleSelectionChange) + handleSelectionChange.cancel() + } + }, [handleSelectionChange]) + + // Listen for copy events + useEffect(() => { + const scroller = scrollerRef.current + if (!scroller) return + + scroller.addEventListener('copy', handleCopy as EventListener) + return () => { + scroller.removeEventListener('copy', handleCopy as EventListener) + } + }, [handleCopy]) + // Report scrollHeight when it might change useLayoutEffect(() => { onHeightChange?.(scrollerRef.current?.scrollHeight ?? 0) @@ -160,6 +435,7 @@ const CodeViewer = ({ $wrap={wrapped} $expand={expanded} $lineHeight={estimateSize()} + onScroll={handleScroll} style={ { '--gutter-width': `${gutterDigits}ch`, From 600199dfcffd245247810dcbb897b9cc58112115 Mon Sep 17 00:00:00 2001 From: Peijie Diao <73533898+Do1e@users.noreply.github.com> Date: Wed, 3 Dec 2025 16:59:46 +0800 Subject: [PATCH 02/17] fix: topic name remains after deleting last topic (#11649) * fix: topic name remains after deleting last topic - Fix logic error when deleting the last topic - Only clear messages in the last topic before the commit - Now creates new topic before deleting the original one to ensure proper topic name update Signed-off-by: Do1e * fix(topic): integrate the same code in `handleConfirmDelete` Signed-off-by: Do1e --------- Signed-off-by: Do1e --- .../src/pages/home/Tabs/components/Topics.tsx | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/renderer/src/pages/home/Tabs/components/Topics.tsx b/src/renderer/src/pages/home/Tabs/components/Topics.tsx index e5fce191e3..7219f7d383 100644 --- a/src/renderer/src/pages/home/Tabs/components/Topics.tsx +++ b/src/renderer/src/pages/home/Tabs/components/Topics.tsx @@ -4,6 +4,7 @@ import ObsidianExportPopup from '@renderer/components/Popups/ObsidianExportPopup import PromptPopup from '@renderer/components/Popups/PromptPopup' import SaveToKnowledgePopup from '@renderer/components/Popups/SaveToKnowledgePopup' import { isMac } from '@renderer/config/constant' +import { db } from '@renderer/databases' import { useAssistant, useAssistants } from '@renderer/hooks/useAssistant' import { useInPlaceEdit } from '@renderer/hooks/useInPlaceEdit' import { useNotesSettings } from '@renderer/hooks/useNotesSettings' @@ -11,6 +12,7 @@ import { modelGenerating } from '@renderer/hooks/useRuntime' import { useSettings } from '@renderer/hooks/useSettings' import { finishTopicRenaming, startTopicRenaming, TopicManager } from '@renderer/hooks/useTopic' import { fetchMessagesSummary } from '@renderer/services/ApiService' +import { getDefaultTopic } from '@renderer/services/AssistantService' import { EVENT_NAMES, EventEmitter } from '@renderer/services/EventService' import type { RootState } from '@renderer/store' import store from '@renderer/store' @@ -65,7 +67,7 @@ export const Topics: React.FC = ({ assistant: _assistant, activeTopic, se const { t } = useTranslation() const { notesPath } = useNotesSettings() const { assistants } = useAssistants() - const { assistant, removeTopic, moveTopic, updateTopic, updateTopics } = useAssistant(_assistant.id) + const { assistant, addTopic, removeTopic, moveTopic, updateTopic, updateTopics } = useAssistant(_assistant.id) const { showTopicTime, pinTopicsToTop, setTopicPosition, topicPosition } = useSettings() const renamingTopics = useSelector((state: RootState) => state.runtime.chat.renamingTopics) @@ -138,17 +140,21 @@ export const Topics: React.FC = ({ assistant: _assistant, activeTopic, se async (topic: Topic, e: React.MouseEvent) => { e.stopPropagation() if (assistant.topics.length === 1) { - return onClearMessages(topic) + const newTopic = getDefaultTopic(assistant.id) + await db.topics.add({ id: newTopic.id, messages: [] }) + addTopic(newTopic) + setActiveTopic(newTopic) + } else { + const index = findIndex(assistant.topics, (t) => t.id === topic.id) + if (topic.id === activeTopic.id) { + setActiveTopic(assistant.topics[index + 1 === assistant.topics.length ? index - 1 : index + 1]) + } } await modelGenerating() - const index = findIndex(assistant.topics, (t) => t.id === topic.id) - if (topic.id === activeTopic.id) { - setActiveTopic(assistant.topics[index + 1 === assistant.topics.length ? index - 1 : index + 1]) - } removeTopic(topic) setDeletingTopicId(null) }, - [activeTopic.id, assistant.topics, onClearMessages, removeTopic, setActiveTopic] + [activeTopic.id, addTopic, assistant.id, assistant.topics, removeTopic, setActiveTopic] ) const onPinTopic = useCallback( From a1e95b55f8d67d86e2a575b35aae6a9386053ded Mon Sep 17 00:00:00 2001 From: Phantom Date: Wed, 3 Dec 2025 17:20:12 +0800 Subject: [PATCH 03/17] fix: remove stale anthropic-beta header for oauth (#11600) Fixes: [Bug]: Error when using claude-neptune-v3 Fixes #11597 --- src/renderer/src/aiCore/provider/providerConfig.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/src/renderer/src/aiCore/provider/providerConfig.ts b/src/renderer/src/aiCore/provider/providerConfig.ts index 3ed3633d7c..528cc8f660 100644 --- a/src/renderer/src/aiCore/provider/providerConfig.ts +++ b/src/renderer/src/aiCore/provider/providerConfig.ts @@ -336,7 +336,6 @@ export async function prepareSpecialProviderConfig( ...(config.options.headers ? config.options.headers : {}), 'Content-Type': 'application/json', 'anthropic-version': '2023-06-01', - 'anthropic-beta': 'oauth-2025-04-20', Authorization: `Bearer ${oauthToken}` }, baseURL: 'https://api.anthropic.com/v1', From 6696bcacb8bf9df724d8f4394332f89892287c2f Mon Sep 17 00:00:00 2001 From: Phantom Date: Wed, 3 Dec 2025 18:20:55 +0800 Subject: [PATCH 04/17] fix(settings): fix wrong type caused by as assertion in OpenAI settings (#11631) * fix(settings): fix wrong type caused by as assertion and migration Add migration step 180 to properly handle 'undefined' string values in OpenAI settings Update selector components to use value conversion helpers for summaryText and verbosity * feat(models): add null as supported verbosity level for OpenAI models Update model utils and types to include null as a valid verbosity level option alongside undefined. This provides more flexibility in controlling verbosity behavior, with null representing an explicit "off" state. Tests and UI components are updated to reflect this change. * fix(verbosity): fix wrong verbosity type definition and handling in #11281 * style: format * fix(store): correct verbosity check in migration config The condition was incorrectly checking for 'undefined' string instead of undefined value, and was assigning to summaryText instead of verbosity. This fixes the migration logic to properly handle the verbosity setting. * docs(aiCore): improve comments for verbosity and summary types Update type comments to better explain the behavior of verbosity and summary parameters in OpenAI API requests --- .../aiCore/legacy/clients/BaseApiClient.ts | 22 +++++++--------- .../legacy/clients/openai/OpenAIApiClient.ts | 10 ++----- .../src/config/models/__tests__/utils.test.ts | 12 ++++++--- src/renderer/src/config/models/utils.ts | 9 ++++--- .../Tabs/components/OpenAISettingsGroup.tsx | 20 +++++++++----- src/renderer/src/store/migrate.ts | 17 ++++++++++++ src/renderer/src/types/aiCoreTypes.ts | 26 ++++++++++++------- src/renderer/src/types/sdk.ts | 4 --- 8 files changed, 73 insertions(+), 47 deletions(-) diff --git a/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts b/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts index e755ce3f20..92f24b4abe 100644 --- a/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts +++ b/src/renderer/src/aiCore/legacy/clients/BaseApiClient.ts @@ -9,6 +9,7 @@ import { import { REFERENCE_PROMPT } from '@renderer/config/prompts' import { getLMStudioKeepAliveTime } from '@renderer/hooks/useLMStudio' import { getAssistantSettings } from '@renderer/services/AssistantService' +import type { RootState } from '@renderer/store' import type { Assistant, GenerateImageParams, @@ -245,23 +246,20 @@ export abstract class BaseApiClient< protected getVerbosity(model?: Model): OpenAIVerbosity { try { - const state = window.store?.getState() + const state = window.store?.getState() as RootState const verbosity = state?.settings?.openAI?.verbosity - if (verbosity && ['low', 'medium', 'high'].includes(verbosity)) { - // If model is provided, check if the verbosity is supported by the model - if (model) { - const supportedVerbosity = getModelSupportedVerbosity(model) - // Use user's verbosity if supported, otherwise use the first supported option - return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0] - } - return verbosity + // If model is provided, check if the verbosity is supported by the model + if (model) { + const supportedVerbosity = getModelSupportedVerbosity(model) + // Use user's verbosity if supported, otherwise use the first supported option + return supportedVerbosity.includes(verbosity) ? verbosity : supportedVerbosity[0] } + return verbosity } catch (error) { - logger.warn('Failed to get verbosity from state:', error as Error) + logger.warn('Failed to get verbosity from state. Fallback to undefined.', error as Error) + return undefined } - - return 'medium' } protected getTimeout(model: Model) { diff --git a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts index ea50680ea4..cfc9087545 100644 --- a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts +++ b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIApiClient.ts @@ -32,7 +32,6 @@ import { isSupportedThinkingTokenModel, isSupportedThinkingTokenQwenModel, isSupportedThinkingTokenZhipuModel, - isSupportVerbosityModel, isVisionModel, MODEL_SUPPORTED_REASONING_EFFORT, ZHIPU_RESULT_TOKENS @@ -714,13 +713,8 @@ export class OpenAIAPIClient extends OpenAIBaseClient< ...modalities, // groq 有不同的 service tier 配置,不符合 openai 接口类型 service_tier: this.getServiceTier(model) as OpenAIServiceTier, - ...(isSupportVerbosityModel(model) - ? { - text: { - verbosity: this.getVerbosity(model) - } - } - : {}), + // verbosity. getVerbosity ensures the returned value is valid. + verbosity: this.getVerbosity(model), ...this.getProviderSpecificParameters(assistant, model), ...reasoningEffort, // ...getOpenAIWebSearchParams(model, enableWebSearch), diff --git a/src/renderer/src/config/models/__tests__/utils.test.ts b/src/renderer/src/config/models/__tests__/utils.test.ts index a163061ea1..ae4e33875c 100644 --- a/src/renderer/src/config/models/__tests__/utils.test.ts +++ b/src/renderer/src/config/models/__tests__/utils.test.ts @@ -222,18 +222,22 @@ describe('model utils', () => { describe('getModelSupportedVerbosity', () => { it('returns only "high" for GPT-5 Pro models', () => { - expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, 'high']) - expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([undefined, 'high']) + expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro' }))).toEqual([undefined, null, 'high']) + expect(getModelSupportedVerbosity(createModel({ id: 'gpt-5-pro-2025-10-06' }))).toEqual([ + undefined, + null, + 'high' + ]) }) it('returns all levels for non-Pro GPT-5 models', () => { const previewModel = createModel({ id: 'gpt-5-preview' }) - expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, 'low', 'medium', 'high']) + expect(getModelSupportedVerbosity(previewModel)).toEqual([undefined, null, 'low', 'medium', 'high']) }) it('returns all levels for GPT-5.1 models', () => { const gpt51Model = createModel({ id: 'gpt-5.1-preview' }) - expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, 'low', 'medium', 'high']) + expect(getModelSupportedVerbosity(gpt51Model)).toEqual([undefined, null, 'low', 'medium', 'high']) }) it('returns only undefined for non-GPT-5 models', () => { diff --git a/src/renderer/src/config/models/utils.ts b/src/renderer/src/config/models/utils.ts index 25e802b257..accf85e2cd 100644 --- a/src/renderer/src/config/models/utils.ts +++ b/src/renderer/src/config/models/utils.ts @@ -10,7 +10,8 @@ import { isGPT51SeriesModel, isOpenAIChatCompletionOnlyModel, isOpenAIOpenWeightModel, - isOpenAIReasoningModel + isOpenAIReasoningModel, + isSupportVerbosityModel } from './openai' import { isQwenMTModel } from './qwen' import { isGenerateImageModel, isTextToImageModel, isVisionModel } from './vision' @@ -154,10 +155,10 @@ const MODEL_SUPPORTED_VERBOSITY: readonly { * For GPT-5-pro, only 'high' is supported; for other GPT-5 models, 'low', 'medium', and 'high' are supported. * For GPT-5.1 series models, 'low', 'medium', and 'high' are supported. * @param model - The model to check - * @returns An array of supported verbosity levels, always including `undefined` as the first element + * @returns An array of supported verbosity levels, always including `undefined` as the first element and `null` when applicable */ export const getModelSupportedVerbosity = (model: Model | undefined | null): OpenAIVerbosity[] => { - if (!model) { + if (!model || !isSupportVerbosityModel(model)) { return [undefined] } @@ -165,7 +166,7 @@ export const getModelSupportedVerbosity = (model: Model | undefined | null): Ope for (const { validator, values } of MODEL_SUPPORTED_VERBOSITY) { if (validator(model)) { - supportedValues = [...values] + supportedValues = [null, ...values] break } } diff --git a/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx b/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx index bf3e1970dc..35c943e21b 100644 --- a/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx +++ b/src/renderer/src/pages/home/Tabs/components/OpenAISettingsGroup.tsx @@ -24,12 +24,12 @@ import { useTranslation } from 'react-i18next' import { useSelector } from 'react-redux' type VerbosityOption = { - value: NonNullable | 'undefined' + value: NonNullable | 'undefined' | 'null' label: string } type SummaryTextOption = { - value: NonNullable | 'undefined' + value: NonNullable | 'undefined' | 'null' label: string } @@ -85,6 +85,10 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti value: 'undefined', label: t('common.ignore') }, + { + value: 'null', + label: t('common.off') + }, { value: 'auto', label: t('settings.openai.summary_text_mode.auto') @@ -105,6 +109,10 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti value: 'undefined', label: t('common.ignore') }, + { + value: 'null', + label: t('common.off') + }, { value: 'low', label: t('settings.openai.verbosity.low') @@ -203,9 +211,9 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti { - setSummaryText(value as OpenAISummaryText) + setSummaryText(toRealValue(value)) }} options={summaryTextOptions} /> @@ -222,9 +230,9 @@ const OpenAISettingsGroup: FC = ({ model, providerId, SettingGroup, Setti { - setVerbosity(value as OpenAIVerbosity) + setVerbosity(toRealValue(value)) }} options={verbosityOptions} /> diff --git a/src/renderer/src/store/migrate.ts b/src/renderer/src/store/migrate.ts index 4b2e4cef89..b2993baf5d 100644 --- a/src/renderer/src/store/migrate.ts +++ b/src/renderer/src/store/migrate.ts @@ -2906,6 +2906,23 @@ const migrateConfig = { logger.error('migrate 179 error', error as Error) return state } + }, + '180': (state: RootState) => { + try { + // @ts-expect-error + if (state.settings.openAI.summaryText === 'undefined') { + state.settings.openAI.summaryText = undefined + } + // @ts-expect-error + if (state.settings.openAI.verbosity === 'undefined') { + state.settings.openAI.verbosity = undefined + } + logger.info('migrate 180 success') + return state + } catch (error) { + logger.error('migrate 180 error', error as Error) + return state + } } } diff --git a/src/renderer/src/types/aiCoreTypes.ts b/src/renderer/src/types/aiCoreTypes.ts index 2e4c09348b..6281905cbb 100644 --- a/src/renderer/src/types/aiCoreTypes.ts +++ b/src/renderer/src/types/aiCoreTypes.ts @@ -1,5 +1,5 @@ import type OpenAI from '@cherrystudio/openai' -import type { NotNull, NotUndefined } from '@types' +import type { NotUndefined } from '@types' import type { ImageModel, LanguageModel } from 'ai' import type { generateObject, generateText, ModelMessage, streamObject, streamText } from 'ai' import * as z from 'zod' @@ -31,18 +31,26 @@ export type GenerateObjectParams = Omit[0], 'm export type AiSdkModel = LanguageModel | ImageModel -// The original type unite both undefined and null. -// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs. -// Parameter would not be passed into request if it's undefined. -export type OpenAIVerbosity = NotNull +/** + * Constrains the verbosity of the model's response. Lower values will result in more concise responses, while higher values will result in more verbose responses. + * + * The original type unites both undefined and null. + * When undefined, the parameter is omitted from the request. + * When null, verbosity is explicitly disabled. + */ +export type OpenAIVerbosity = OpenAI.Responses.ResponseTextConfig['verbosity'] export type ValidOpenAIVerbosity = NotUndefined export type OpenAIReasoningEffort = OpenAI.ReasoningEffort -// The original type unite both undefined and null. -// I pick undefined as the unique falsy type since they seem like share the same meaning according to OpenAI API docs. -// Parameter would not be passed into request if it's undefined. -export type OpenAISummaryText = NotNull +/** + * A summary of the reasoning performed by the model. This can be useful for debugging and understanding the model's reasoning process. + * + * The original type unites both undefined and null. + * When undefined, the parameter is omitted from the request. + * When null, verbosity is explicitly disabled. + */ +export type OpenAISummaryText = OpenAI.Reasoning['summary'] const AiSdkParamsSchema = z.enum([ 'maxOutputTokens', diff --git a/src/renderer/src/types/sdk.ts b/src/renderer/src/types/sdk.ts index bb76c8a634..2b51bee3fd 100644 --- a/src/renderer/src/types/sdk.ts +++ b/src/renderer/src/types/sdk.ts @@ -128,10 +128,6 @@ export type OpenAIExtraBody = { source_lang: 'auto' target_lang: string } - // for gpt-5 series models verbosity control - text?: { - verbosity?: 'low' | 'medium' | 'high' - } } // image is for openrouter. audio is ignored for now export type OpenAIModality = OpenAI.ChatCompletionModality | 'image' From 33457686ac017d47bb2b7c70317868fb11c15c3c Mon Sep 17 00:00:00 2001 From: SuYao Date: Wed, 3 Dec 2025 20:54:06 +0800 Subject: [PATCH 05/17] fix: update Inputbar components to support dynamic textarea height adjustment (#11587) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: update Inputbar components to support dynamic textarea height adjustment * fix: align drag handler maxHeight with hook configuration (500px) - Update hardcoded maxHeight from 400 to 500 in InputbarCore drag handler - This ensures consistency with useTextareaResize hook maxHeight parameter - Resolves PR comment about maxHeight inconsistency between hook and drag handler 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --------- Co-authored-by: Claude --- src/renderer/src/hooks/useTextareaResize.ts | 2 +- .../home/Inputbar/AgentSessionInputbar.tsx | 8 ++++++-- .../src/pages/home/Inputbar/Inputbar.tsx | 10 +++++++--- .../home/Inputbar/components/InputbarCore.tsx | 17 ++++++++++------- 4 files changed, 24 insertions(+), 13 deletions(-) diff --git a/src/renderer/src/hooks/useTextareaResize.ts b/src/renderer/src/hooks/useTextareaResize.ts index 8688cb186f..c29d6134cd 100644 --- a/src/renderer/src/hooks/useTextareaResize.ts +++ b/src/renderer/src/hooks/useTextareaResize.ts @@ -51,7 +51,7 @@ export function useTextareaResize(options: UseTextareaResizeOptions = {}): UseTe const { maxHeight = 400, minHeight = 30, autoResize = true } = options const textareaRef = useRef(null) - const [customHeight, setCustomHeight] = useState() + const [customHeight, setCustomHeight] = useState(undefined) const [isExpanded, setIsExpanded] = useState(false) const resize = useCallback( diff --git a/src/renderer/src/pages/home/Inputbar/AgentSessionInputbar.tsx b/src/renderer/src/pages/home/Inputbar/AgentSessionInputbar.tsx index 2dbcd04067..850be7f727 100644 --- a/src/renderer/src/pages/home/Inputbar/AgentSessionInputbar.tsx +++ b/src/renderer/src/pages/home/Inputbar/AgentSessionInputbar.tsx @@ -177,8 +177,10 @@ const AgentSessionInputbarInner: FC = ({ assistant, agentId, session resize: resizeTextArea, focus: focusTextarea, setExpanded, - isExpanded: textareaIsExpanded - } = useTextareaResize({ maxHeight: 400, minHeight: 30 }) + isExpanded: textareaIsExpanded, + customHeight, + setCustomHeight + } = useTextareaResize({ maxHeight: 500, minHeight: 30 }) const { sendMessageShortcut, apiServer } = useSettings() const { t } = useTranslation() @@ -474,6 +476,8 @@ const AgentSessionInputbarInner: FC = ({ assistant, agentId, session text={text} onTextChange={setText} textareaRef={textareaRef} + height={customHeight} + onHeightChange={setCustomHeight} resizeTextArea={resizeTextArea} focusTextarea={focusTextarea} placeholder={placeholderText} diff --git a/src/renderer/src/pages/home/Inputbar/Inputbar.tsx b/src/renderer/src/pages/home/Inputbar/Inputbar.tsx index 0985023ead..fc95082e50 100644 --- a/src/renderer/src/pages/home/Inputbar/Inputbar.tsx +++ b/src/renderer/src/pages/home/Inputbar/Inputbar.tsx @@ -143,9 +143,11 @@ const InputbarInner: FC = ({ assistant: initialAssistant, se resize: resizeTextArea, focus: focusTextarea, setExpanded, - isExpanded: textareaIsExpanded + isExpanded: textareaIsExpanded, + customHeight, + setCustomHeight } = useTextareaResize({ - maxHeight: 400, + maxHeight: 500, minHeight: 30 }) @@ -257,7 +259,7 @@ const InputbarInner: FC = ({ assistant: initialAssistant, se setText('') setFiles([]) setTimeoutTimer('sendMessage_1', () => setText(''), 500) - setTimeoutTimer('sendMessage_2', () => resizeTextArea(true), 0) + setTimeoutTimer('sendMessage_2', () => resizeTextArea(), 0) } catch (error) { logger.warn('Failed to send message:', error as Error) parent?.recordException(error as Error) @@ -478,6 +480,8 @@ const InputbarInner: FC = ({ assistant: initialAssistant, se text={text} onTextChange={setText} textareaRef={textareaRef} + height={customHeight} + onHeightChange={setCustomHeight} resizeTextArea={resizeTextArea} focusTextarea={focusTextarea} isLoading={loading} diff --git a/src/renderer/src/pages/home/Inputbar/components/InputbarCore.tsx b/src/renderer/src/pages/home/Inputbar/components/InputbarCore.tsx index e1ae6d1bae..b55ca93cd3 100644 --- a/src/renderer/src/pages/home/Inputbar/components/InputbarCore.tsx +++ b/src/renderer/src/pages/home/Inputbar/components/InputbarCore.tsx @@ -50,6 +50,9 @@ export interface InputbarCoreProps { resizeTextArea: (force?: boolean) => void focusTextarea: () => void + height: number | undefined + onHeightChange: (height: number) => void + supportedExts: string[] isLoading: boolean @@ -104,6 +107,8 @@ export const InputbarCore: FC = ({ textareaRef, resizeTextArea, focusTextarea, + height, + onHeightChange, supportedExts, isLoading, onPause, @@ -131,8 +136,6 @@ export const InputbarCore: FC = ({ } = useSettings() const quickPanelTriggersEnabled = forceEnableQuickPanelTriggers ?? enableQuickPanelTriggers - const [textareaHeight, setTextareaHeight] = useState() - const { t } = useTranslation() const [isTranslating, setIsTranslating] = useState(false) const { getLanguageByLangcode } = useTranslate() @@ -538,8 +541,8 @@ export const InputbarCore: FC = ({ const handleMouseMove = (e: MouseEvent) => { const deltaY = startDragY.current - e.clientY - const newHeight = Math.max(40, Math.min(400, startHeight.current + deltaY)) - setTextareaHeight(newHeight) + const newHeight = Math.max(40, Math.min(500, startHeight.current + deltaY)) + onHeightChange(newHeight) } const handleMouseUp = () => { @@ -550,7 +553,7 @@ export const InputbarCore: FC = ({ document.addEventListener('mousemove', handleMouseMove) document.addEventListener('mouseup', handleMouseUp) }, - [config.enableDragDrop, setTextareaHeight, textareaRef] + [config.enableDragDrop, onHeightChange, textareaRef] ) const onQuote = useCallback( @@ -667,11 +670,11 @@ export const InputbarCore: FC = ({ variant="borderless" spellCheck={enableSpellCheck} rows={2} - autoSize={textareaHeight ? false : { minRows: 2, maxRows: 20 }} + autoSize={height ? false : { minRows: 2, maxRows: 20 }} styles={{ textarea: TextareaStyle }} style={{ fontSize, - height: textareaHeight, + height: height, minHeight: '30px' }} disabled={isTranslating || searching} From f571dd7af0f98a7616dfd29af52c26695e65703c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=A7=91=E5=9B=BF=E8=84=91=E8=A2=8B?= <70054568+eeee0717@users.noreply.github.com> Date: Wed, 3 Dec 2025 21:03:07 +0800 Subject: [PATCH 06/17] fix: ollama url (#11611) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: ollama url * feat: add Ollama provider integration and update dependencies * fix: update Ollama provider handling and API host formatting * feat: support Ollama Cloud * test: formatOllamaApiHost * chore * fix: update Ollama provider check to use isOllamaProvider function * fix: address PR review issues for Ollama provider Critical fixes: - Fix regex escape bug: /\v1$/ → /\/v1$/ in OpenAIBaseClient.ts - Add comprehensive error handling for Ollama fetch API (network errors, non-200 responses, invalid JSON) Minor improvements: - Fix inconsistent optional chaining in formatOllamaApiHost - Add null check in migration 180 for undefined state.llm.providers All checks passed: lint, typecheck, tests (2313 tests) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --------- Co-authored-by: suyao Co-authored-by: Claude --- package.json | 1 + .../embedjs/embeddings/EmbeddingsFactory.ts | 12 +--- .../legacy/clients/openai/OpenAIBaseClient.ts | 33 ++++++++- .../middleware/AiSdkMiddlewareBuilder.ts | 3 +- .../src/aiCore/provider/providerConfig.ts | 26 ++++++- .../aiCore/provider/providerInitialization.ts | 7 ++ src/renderer/src/aiCore/utils/options.ts | 24 ++++++- .../ProviderSettings/AddProviderPopup.tsx | 3 +- .../ProviderSettings/ProviderSetting.tsx | 5 ++ src/renderer/src/services/KnowledgeService.ts | 16 +++-- src/renderer/src/store/index.ts | 2 +- src/renderer/src/store/migrate.ts | 5 ++ src/renderer/src/types/provider.ts | 3 +- src/renderer/src/utils/__tests__/api.test.ts | 68 +++++++++++++++++++ src/renderer/src/utils/api.ts | 11 +++ src/renderer/src/utils/provider.ts | 4 ++ yarn.lock | 13 ++++ 17 files changed, 210 insertions(+), 26 deletions(-) diff --git a/package.json b/package.json index 3f95aee6d5..fd5eb0151d 100644 --- a/package.json +++ b/package.json @@ -318,6 +318,7 @@ "motion": "^12.10.5", "notion-helper": "^1.3.22", "npx-scope-finder": "^1.2.0", + "ollama-ai-provider-v2": "^1.5.5", "oxlint": "^1.22.0", "oxlint-tsgolint": "^0.2.0", "p-queue": "^8.1.0", diff --git a/src/main/knowledge/embedjs/embeddings/EmbeddingsFactory.ts b/src/main/knowledge/embedjs/embeddings/EmbeddingsFactory.ts index 8a780d5618..e9f459fd6c 100644 --- a/src/main/knowledge/embedjs/embeddings/EmbeddingsFactory.ts +++ b/src/main/knowledge/embedjs/embeddings/EmbeddingsFactory.ts @@ -19,19 +19,9 @@ export default class EmbeddingsFactory { }) } if (provider === 'ollama') { - if (baseURL.includes('v1/')) { - return new OllamaEmbeddings({ - model: model, - baseUrl: baseURL.replace('v1/', ''), - requestOptions: { - // @ts-ignore expected - 'encoding-format': 'float' - } - }) - } return new OllamaEmbeddings({ model: model, - baseUrl: baseURL, + baseUrl: baseURL.replace(/\/api$/, ''), requestOptions: { // @ts-ignore expected 'encoding-format': 'float' diff --git a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIBaseClient.ts b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIBaseClient.ts index 9a8d5f8383..dc97e74a3c 100644 --- a/src/renderer/src/aiCore/legacy/clients/openai/OpenAIBaseClient.ts +++ b/src/renderer/src/aiCore/legacy/clients/openai/OpenAIBaseClient.ts @@ -11,7 +11,7 @@ import { getStoreSetting } from '@renderer/hooks/useSettings' import { getAssistantSettings } from '@renderer/services/AssistantService' import store from '@renderer/store' import type { SettingsState } from '@renderer/store/settings' -import type { Assistant, GenerateImageParams, Model, Provider } from '@renderer/types' +import { type Assistant, type GenerateImageParams, type Model, type Provider } from '@renderer/types' import type { OpenAIResponseSdkMessageParam, OpenAIResponseSdkParams, @@ -25,7 +25,8 @@ import type { OpenAISdkRawOutput, ReasoningEffortOptionalParams } from '@renderer/types/sdk' -import { formatApiHost } from '@renderer/utils/api' +import { formatApiHost, withoutTrailingSlash } from '@renderer/utils/api' +import { isOllamaProvider } from '@renderer/utils/provider' import { BaseApiClient } from '../BaseApiClient' @@ -115,6 +116,34 @@ export abstract class OpenAIBaseClient< })) .filter(isSupportedModel) } + + if (isOllamaProvider(this.provider)) { + const baseUrl = withoutTrailingSlash(this.getBaseURL(false)) + .replace(/\/v1$/, '') + .replace(/\/api$/, '') + const response = await fetch(`${baseUrl}/api/tags`, { + headers: { + Authorization: `Bearer ${this.apiKey}`, + ...this.defaultHeaders(), + ...this.provider.extra_headers + } + }) + + if (!response.ok) { + throw new Error(`Ollama server returned ${response.status} ${response.statusText}`) + } + + const data = await response.json() + if (!data?.models || !Array.isArray(data.models)) { + throw new Error('Invalid response from Ollama API: missing models array') + } + + return data.models.map((model) => ({ + id: model.name, + object: 'model', + owned_by: 'ollama' + })) + } const response = await sdk.models.list() if (this.provider.id === 'together') { // @ts-ignore key is not typed diff --git a/src/renderer/src/aiCore/middleware/AiSdkMiddlewareBuilder.ts b/src/renderer/src/aiCore/middleware/AiSdkMiddlewareBuilder.ts index b314ddd737..10a4d59384 100644 --- a/src/renderer/src/aiCore/middleware/AiSdkMiddlewareBuilder.ts +++ b/src/renderer/src/aiCore/middleware/AiSdkMiddlewareBuilder.ts @@ -4,7 +4,7 @@ import { isGemini3Model, isSupportedThinkingTokenQwenModel } from '@renderer/con import type { MCPTool } from '@renderer/types' import { type Assistant, type Message, type Model, type Provider, SystemProviderIds } from '@renderer/types' import type { Chunk } from '@renderer/types/chunk' -import { isSupportEnableThinkingProvider } from '@renderer/utils/provider' +import { isOllamaProvider, isSupportEnableThinkingProvider } from '@renderer/utils/provider' import type { LanguageModelMiddleware } from 'ai' import { extractReasoningMiddleware, simulateStreamingMiddleware } from 'ai' import { isEmpty } from 'lodash' @@ -240,6 +240,7 @@ function addModelSpecificMiddlewares(builder: AiSdkMiddlewareBuilder, config: Ai // Use /think or /no_think suffix to control thinking mode if ( config.provider && + !isOllamaProvider(config.provider) && isSupportedThinkingTokenQwenModel(config.model) && !isSupportEnableThinkingProvider(config.provider) ) { diff --git a/src/renderer/src/aiCore/provider/providerConfig.ts b/src/renderer/src/aiCore/provider/providerConfig.ts index 528cc8f660..a5a84fccae 100644 --- a/src/renderer/src/aiCore/provider/providerConfig.ts +++ b/src/renderer/src/aiCore/provider/providerConfig.ts @@ -11,17 +11,24 @@ import { createVertexProvider, isVertexAIConfigured } from '@renderer/hooks/useV import { getProviderByModel } from '@renderer/services/AssistantService' import store from '@renderer/store' import { isSystemProvider, type Model, type Provider, SystemProviderIds } from '@renderer/types' -import { formatApiHost, formatAzureOpenAIApiHost, formatVertexApiHost, routeToEndpoint } from '@renderer/utils/api' +import { + formatApiHost, + formatAzureOpenAIApiHost, + formatOllamaApiHost, + formatVertexApiHost, + routeToEndpoint +} from '@renderer/utils/api' import { isAnthropicProvider, isAzureOpenAIProvider, isCherryAIProvider, isGeminiProvider, isNewApiProvider, + isOllamaProvider, isPerplexityProvider, isVertexProvider } from '@renderer/utils/provider' -import { cloneDeep } from 'lodash' +import { cloneDeep, isEmpty } from 'lodash' import type { AiSdkConfig } from '../types' import { aihubmixProviderCreator, newApiResolverCreator, vertexAnthropicProviderCreator } from './config' @@ -99,6 +106,8 @@ export function formatProviderApiHost(provider: Provider): Provider { } } else if (formatted.id === SystemProviderIds.copilot || formatted.id === SystemProviderIds.github) { formatted.apiHost = formatApiHost(formatted.apiHost, false) + } else if (isOllamaProvider(formatted)) { + formatted.apiHost = formatOllamaApiHost(formatted.apiHost) } else if (isGeminiProvider(formatted)) { formatted.apiHost = formatApiHost(formatted.apiHost, true, 'v1beta') } else if (isAzureOpenAIProvider(formatted)) { @@ -183,6 +192,19 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A } } + if (isOllamaProvider(actualProvider)) { + return { + providerId: 'ollama', + options: { + ...baseConfig, + headers: { + ...actualProvider.extra_headers, + Authorization: !isEmpty(baseConfig.apiKey) ? `Bearer ${baseConfig.apiKey}` : undefined + } + } + } + } + // 处理OpenAI模式 const extraOptions: any = {} extraOptions.endpoint = endpoint diff --git a/src/renderer/src/aiCore/provider/providerInitialization.ts b/src/renderer/src/aiCore/provider/providerInitialization.ts index 2e4b9fced2..a42e2ac659 100644 --- a/src/renderer/src/aiCore/provider/providerInitialization.ts +++ b/src/renderer/src/aiCore/provider/providerInitialization.ts @@ -94,6 +94,13 @@ export const NEW_PROVIDER_CONFIGS: ProviderConfig[] = [ import: () => import('@ai-sdk/cerebras'), creatorFunctionName: 'createCerebras', supportsImageGeneration: false + }, + { + id: 'ollama', + name: 'Ollama', + import: () => import('ollama-ai-provider-v2'), + creatorFunctionName: 'createOllama', + supportsImageGeneration: false } ] as const diff --git a/src/renderer/src/aiCore/utils/options.ts b/src/renderer/src/aiCore/utils/options.ts index a1352a801a..e39837dc27 100644 --- a/src/renderer/src/aiCore/utils/options.ts +++ b/src/renderer/src/aiCore/utils/options.ts @@ -29,12 +29,14 @@ import { type OpenAIServiceTier, OpenAIServiceTiers, type Provider, - type ServiceTier + type ServiceTier, + SystemProviderIds } from '@renderer/types' import { type AiSdkParam, isAiSdkParam, type OpenAIVerbosity } from '@renderer/types/aiCoreTypes' import { isSupportServiceTierProvider, isSupportVerbosityProvider } from '@renderer/utils/provider' import type { JSONValue } from 'ai' import { t } from 'i18next' +import type { OllamaCompletionProviderOptions } from 'ollama-ai-provider-v2' import { addAnthropicHeaders } from '../prepareParams/header' import { getAiSdkProviderId } from '../provider/factory' @@ -236,6 +238,9 @@ export function buildProviderOptions( case 'huggingface': providerSpecificOptions = buildOpenAIProviderOptions(assistant, model, capabilities, serviceTier) break + case SystemProviderIds.ollama: + providerSpecificOptions = buildOllamaProviderOptions(assistant, capabilities) + break default: // 对于其他 provider,使用通用的构建逻辑 providerSpecificOptions = { @@ -478,6 +483,23 @@ function buildBedrockProviderOptions( return providerOptions } +function buildOllamaProviderOptions( + assistant: Assistant, + capabilities: { + enableReasoning: boolean + enableWebSearch: boolean + enableGenerateImage: boolean + } +): OllamaCompletionProviderOptions { + const { enableReasoning } = capabilities + const providerOptions: OllamaCompletionProviderOptions = {} + const reasoningEffort = assistant.settings?.reasoning_effort + if (enableReasoning) { + providerOptions.think = !['none', undefined].includes(reasoningEffort) + } + return providerOptions +} + /** * 构建通用的 providerOptions(用于其他 provider) */ diff --git a/src/renderer/src/pages/settings/ProviderSettings/AddProviderPopup.tsx b/src/renderer/src/pages/settings/ProviderSettings/AddProviderPopup.tsx index e4923de1ba..b6d7145c91 100644 --- a/src/renderer/src/pages/settings/ProviderSettings/AddProviderPopup.tsx +++ b/src/renderer/src/pages/settings/ProviderSettings/AddProviderPopup.tsx @@ -259,7 +259,8 @@ const PopupContainer: React.FC = ({ provider, resolve }) => { { label: 'Anthropic', value: 'anthropic' }, { label: 'Azure OpenAI', value: 'azure-openai' }, { label: 'New API', value: 'new-api' }, - { label: 'CherryIN', value: 'cherryin-type' } + { label: 'CherryIN', value: 'cherryin-type' }, + { label: 'Ollama', value: 'ollama' } ]} /> diff --git a/src/renderer/src/pages/settings/ProviderSettings/ProviderSetting.tsx b/src/renderer/src/pages/settings/ProviderSettings/ProviderSetting.tsx index da05409683..c3b8904cef 100644 --- a/src/renderer/src/pages/settings/ProviderSettings/ProviderSetting.tsx +++ b/src/renderer/src/pages/settings/ProviderSettings/ProviderSetting.tsx @@ -29,6 +29,7 @@ import { isAzureOpenAIProvider, isGeminiProvider, isNewApiProvider, + isOllamaProvider, isOpenAICompatibleProvider, isOpenAIProvider, isVertexProvider @@ -277,6 +278,10 @@ const ProviderSetting: FC = ({ providerId }) => { const hostPreview = () => { const formattedApiHost = adaptProvider({ provider: { ...provider, apiHost } }).apiHost + if (isOllamaProvider(provider)) { + return formattedApiHost + '/chat' + } + if (isOpenAICompatibleProvider(provider)) { return formattedApiHost + '/chat/completions' } diff --git a/src/renderer/src/services/KnowledgeService.ts b/src/renderer/src/services/KnowledgeService.ts index ef35027ff5..e2f2e6fc15 100644 --- a/src/renderer/src/services/KnowledgeService.ts +++ b/src/renderer/src/services/KnowledgeService.ts @@ -6,12 +6,13 @@ import { DEFAULT_KNOWLEDGE_DOCUMENT_COUNT, DEFAULT_KNOWLEDGE_THRESHOLD } from '@ import { getEmbeddingMaxContext } from '@renderer/config/embedings' import { addSpan, endSpan } from '@renderer/services/SpanManagerService' import store from '@renderer/store' -import type { - FileMetadata, - KnowledgeBase, - KnowledgeBaseParams, - KnowledgeReference, - KnowledgeSearchResult +import { + type FileMetadata, + type KnowledgeBase, + type KnowledgeBaseParams, + type KnowledgeReference, + type KnowledgeSearchResult, + SystemProviderIds } from '@renderer/types' import type { Chunk } from '@renderer/types/chunk' import { ChunkType } from '@renderer/types/chunk' @@ -50,6 +51,9 @@ export const getKnowledgeBaseParams = (base: KnowledgeBase): KnowledgeBaseParams baseURL = baseURL + '/openai' } else if (isAzureOpenAIProvider(actualProvider)) { baseURL = baseURL + '/v1' + } else if (actualProvider.id === SystemProviderIds.ollama) { + // LangChain生态不需要/api结尾的URL + baseURL = baseURL.replace(/\/api$/, '') } logger.info(`Knowledge base ${base.name} using baseURL: ${baseURL}`) diff --git a/src/renderer/src/store/index.ts b/src/renderer/src/store/index.ts index 5c562885bb..94b51474b9 100644 --- a/src/renderer/src/store/index.ts +++ b/src/renderer/src/store/index.ts @@ -67,7 +67,7 @@ const persistedReducer = persistReducer( { key: 'cherry-studio', storage, - version: 179, + version: 180, blacklist: ['runtime', 'messages', 'messageBlocks', 'tabs', 'toolPermissions'], migrate }, diff --git a/src/renderer/src/store/migrate.ts b/src/renderer/src/store/migrate.ts index b2993baf5d..6f05c2b348 100644 --- a/src/renderer/src/store/migrate.ts +++ b/src/renderer/src/store/migrate.ts @@ -2917,6 +2917,11 @@ const migrateConfig = { if (state.settings.openAI.verbosity === 'undefined') { state.settings.openAI.verbosity = undefined } + state.llm.providers.forEach((provider) => { + if (provider.id === SystemProviderIds.ollama) { + provider.type = 'ollama' + } + }) logger.info('migrate 180 success') return state } catch (error) { diff --git a/src/renderer/src/types/provider.ts b/src/renderer/src/types/provider.ts index 9d948f16d0..aea72fa287 100644 --- a/src/renderer/src/types/provider.ts +++ b/src/renderer/src/types/provider.ts @@ -15,7 +15,8 @@ export const ProviderTypeSchema = z.enum([ 'aws-bedrock', 'vertex-anthropic', 'new-api', - 'ai-gateway' + 'ai-gateway', + 'ollama' ]) export type ProviderType = z.infer diff --git a/src/renderer/src/utils/__tests__/api.test.ts b/src/renderer/src/utils/__tests__/api.test.ts index c00c2e0f60..fe34dcf26e 100644 --- a/src/renderer/src/utils/__tests__/api.test.ts +++ b/src/renderer/src/utils/__tests__/api.test.ts @@ -6,6 +6,7 @@ import { formatApiHost, formatApiKeys, formatAzureOpenAIApiHost, + formatOllamaApiHost, formatVertexApiHost, getTrailingApiVersion, hasAPIVersion, @@ -341,6 +342,73 @@ describe('api', () => { }) }) + describe('formatOllamaApiHost', () => { + it('removes trailing slash and appends /api for basic hosts', () => { + expect(formatOllamaApiHost('https://api.ollama.com/')).toBe('https://api.ollama.com/api') + expect(formatOllamaApiHost('http://localhost:11434/')).toBe('http://localhost:11434/api') + }) + + it('appends /api when no suffix is present', () => { + expect(formatOllamaApiHost('https://api.ollama.com')).toBe('https://api.ollama.com/api') + expect(formatOllamaApiHost('http://localhost:11434')).toBe('http://localhost:11434/api') + }) + + it('removes /v1 suffix and appends /api', () => { + expect(formatOllamaApiHost('https://api.ollama.com/v1')).toBe('https://api.ollama.com/api') + expect(formatOllamaApiHost('http://localhost:11434/v1/')).toBe('http://localhost:11434/api') + }) + + it('removes /api suffix and keeps /api', () => { + expect(formatOllamaApiHost('https://api.ollama.com/api')).toBe('https://api.ollama.com/api') + expect(formatOllamaApiHost('http://localhost:11434/api/')).toBe('http://localhost:11434/api') + }) + + it('removes /chat suffix and appends /api', () => { + expect(formatOllamaApiHost('https://api.ollama.com/chat')).toBe('https://api.ollama.com/api') + expect(formatOllamaApiHost('http://localhost:11434/chat/')).toBe('http://localhost:11434/api') + }) + + it('handles multiple suffix combinations correctly', () => { + expect(formatOllamaApiHost('https://api.ollama.com/v1/chat')).toBe('https://api.ollama.com/v1/api') + expect(formatOllamaApiHost('https://api.ollama.com/chat/v1')).toBe('https://api.ollama.com/api') + expect(formatOllamaApiHost('https://api.ollama.com/api/chat')).toBe('https://api.ollama.com/api/api') + }) + + it('preserves complex paths while handling suffixes', () => { + expect(formatOllamaApiHost('https://api.ollama.com/custom/path')).toBe('https://api.ollama.com/custom/path/api') + expect(formatOllamaApiHost('https://api.ollama.com/custom/path/')).toBe('https://api.ollama.com/custom/path/api') + expect(formatOllamaApiHost('https://api.ollama.com/custom/path/v1')).toBe( + 'https://api.ollama.com/custom/path/api' + ) + }) + + it('handles edge cases with multiple slashes', () => { + expect(formatOllamaApiHost('https://api.ollama.com//')).toBe('https://api.ollama.com//api') + expect(formatOllamaApiHost('https://api.ollama.com///v1///')).toBe('https://api.ollama.com///v1///api') + }) + + it('handles localhost with different ports', () => { + expect(formatOllamaApiHost('http://localhost:3000')).toBe('http://localhost:3000/api') + expect(formatOllamaApiHost('http://127.0.0.1:11434/')).toBe('http://127.0.0.1:11434/api') + expect(formatOllamaApiHost('https://localhost:8080/v1')).toBe('https://localhost:8080/api') + }) + + it('handles IP addresses', () => { + expect(formatOllamaApiHost('http://192.168.1.100:11434')).toBe('http://192.168.1.100:11434/api') + expect(formatOllamaApiHost('https://10.0.0.1:8080/v1/')).toBe('https://10.0.0.1:8080/api') + }) + + it('handles empty strings and edge cases', () => { + expect(formatOllamaApiHost('')).toBe('/api') + expect(formatOllamaApiHost('/')).toBe('/api') + }) + + it('preserves protocol and handles mixed case', () => { + expect(formatOllamaApiHost('HTTPS://API.OLLAMA.COM')).toBe('HTTPS://API.OLLAMA.COM/api') + expect(formatOllamaApiHost('HTTP://localhost:11434/V1/')).toBe('HTTP://localhost:11434/V1/api') + }) + }) + describe('getTrailingApiVersion', () => { it('extracts trailing API version from URL', () => { expect(getTrailingApiVersion('https://api.example.com/v1')).toBe('v1') diff --git a/src/renderer/src/utils/api.ts b/src/renderer/src/utils/api.ts index efadb7813c..10f31ae5c0 100644 --- a/src/renderer/src/utils/api.ts +++ b/src/renderer/src/utils/api.ts @@ -110,6 +110,17 @@ export function formatApiHost(host?: string, supportApiVersion: boolean = true, } } +/** + * 格式化 Ollama 的 API 主机地址。 + */ +export function formatOllamaApiHost(host: string): string { + const normalizedHost = withoutTrailingSlash(host) + ?.replace(/\/v1$/, '') + ?.replace(/\/api$/, '') + ?.replace(/\/chat$/, '') + return formatApiHost(normalizedHost + '/api', false) +} + /** * 格式化 Azure OpenAI 的 API 主机地址。 */ diff --git a/src/renderer/src/utils/provider.ts b/src/renderer/src/utils/provider.ts index fae0aababa..0af511b97e 100644 --- a/src/renderer/src/utils/provider.ts +++ b/src/renderer/src/utils/provider.ts @@ -175,6 +175,10 @@ export function isAIGatewayProvider(provider: Provider): boolean { return provider.type === 'ai-gateway' } +export function isOllamaProvider(provider: Provider): boolean { + return provider.type === 'ollama' +} + const NOT_SUPPORT_API_VERSION_PROVIDERS = ['github', 'copilot', 'perplexity'] as const satisfies SystemProviderId[] export const isSupportAPIVersionProvider = (provider: Provider) => { diff --git a/yarn.lock b/yarn.lock index 22b6c581db..c832447198 100644 --- a/yarn.lock +++ b/yarn.lock @@ -10232,6 +10232,7 @@ __metadata: notion-helper: "npm:^1.3.22" npx-scope-finder: "npm:^1.2.0" officeparser: "npm:^4.2.0" + ollama-ai-provider-v2: "npm:^1.5.5" os-proxy-config: "npm:^1.1.2" oxlint: "npm:^1.22.0" oxlint-tsgolint: "npm:^0.2.0" @@ -19934,6 +19935,18 @@ __metadata: languageName: node linkType: hard +"ollama-ai-provider-v2@npm:^1.5.5": + version: 1.5.5 + resolution: "ollama-ai-provider-v2@npm:1.5.5" + dependencies: + "@ai-sdk/provider": "npm:^2.0.0" + "@ai-sdk/provider-utils": "npm:^3.0.17" + peerDependencies: + zod: ^4.0.16 + checksum: 10c0/da40c8097bd8205c46eccfbd13e77c51a6ce97a29b886adfc9e1b8444460b558138d1ed4428491fcc9378d46f649dd0a9b1e5b13cf6bbc8f5385e8b321734e72 + languageName: node + linkType: hard + "ollama@npm:^0.5.12": version: 0.5.16 resolution: "ollama@npm:0.5.16" From aeabc28451f5a1f3af0402316f255851d769ec1e Mon Sep 17 00:00:00 2001 From: Phantom Date: Wed, 3 Dec 2025 22:32:18 +0800 Subject: [PATCH 07/17] chore(feishu-notify): modify notification card (#11656) refactor(feishu-notify): simplify issue card layout by removing redundant elements Remove unnecessary div elements and consolidate title display into the card header --- scripts/feishu-notify.js | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/scripts/feishu-notify.js b/scripts/feishu-notify.js index aae9004a48..d238dedb90 100644 --- a/scripts/feishu-notify.js +++ b/scripts/feishu-notify.js @@ -91,23 +91,6 @@ function createIssueCard(issueData) { return { elements: [ - { - tag: 'div', - text: { - tag: 'lark_md', - content: `**🐛 New GitHub Issue #${issueNumber}**` - } - }, - { - tag: 'hr' - }, - { - tag: 'div', - text: { - tag: 'lark_md', - content: `**📝 Title:** ${issueTitle}` - } - }, { tag: 'div', text: { @@ -158,7 +141,7 @@ function createIssueCard(issueData) { template: 'blue', title: { tag: 'plain_text', - content: '🆕 Cherry Studio - New Issue' + content: `#${issueNumber} - ${issueTitle}` } } } From 4f701d3e454dd1ad6cf4c9c1f8a89278c75ac9d7 Mon Sep 17 00:00:00 2001 From: beyondkmp Date: Thu, 4 Dec 2025 10:57:42 +0800 Subject: [PATCH 08/17] fix(apiServer): use 127.0.0.1 instead of localhost for better compatibility (#11673) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(apiServer): use 127.0.0.1 instead of localhost for better compatibility - Change default host from localhost to 127.0.0.1 in config and settings - Add buildApiServerUrl helper to properly construct API server URLs - Update OpenAPI documentation server URL - Update test files to use 127.0.0.1 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude * fix(migration): migrate existing localhost config to 127.0.0.1 - Add migration 180 to automatically update localhost to 127.0.0.1 - Handle both plain host and hosts with http/https protocol - Increment store version to 180 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude * refactor(apiServer): simplify buildApiServerUrl implementation - Remove complex URL parsing and protocol handling - Use simple string concatenation for URL building - Assume http protocol since API server is local 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude * refactor: remove buildApiServerUrl helper and simplify migration - Remove buildApiServerUrl helper function - Use 127.0.0.1 directly in URL construction - Simplify migration 180 to unconditionally set host to 127.0.0.1 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude * refactor(apiServer): fix critical bugs and improve code structure 🔴 Critical Fixes: - Fix config.ts to use stored host value instead of ignoring it - Fix hardcoded 127.0.0.1 URLs to use apiServerConfig.host 🟡 Improvements: - Extract API_SERVER_DEFAULTS to shared constants in packages/shared/config/constant.ts - Apply consistent fallback pattern using API_SERVER_DEFAULTS.HOST and API_SERVER_DEFAULTS.PORT - Update all imports to use shared constants across main and renderer processes Files changed: - packages/shared/config/constant.ts: Add API_SERVER_DEFAULTS constants - src/main/apiServer/config.ts: Use stored host with fallback - src/main/apiServer/middleware/openapi.ts: Use constants - src/renderer/src/pages/settings/ToolSettings/ApiServerSettings/ApiServerSettings.tsx: Use config host and constants - src/renderer/src/store/settings.ts: Use constants in initial state - src/renderer/src/store/migrate.ts: Use constants in migration 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude * update * fix(apiServer): use relative URL in OpenAPI spec for better compatibility - Change server URL from hardcoded defaults to relative path '/' - This ensures Swagger UI "Try it out" works correctly regardless of configured host/port - Remove unused API_SERVER_DEFAULTS import 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --------- Co-authored-by: Claude --- packages/shared/config/constant.ts | 5 +++++ src/main/apiServer/config.ts | 12 +++++------- src/main/apiServer/middleware/openapi.ts | 4 ++-- .../ApiServerSettings/ApiServerSettings.tsx | 15 ++++++++++----- src/renderer/src/store/migrate.ts | 8 ++++++-- src/renderer/src/store/settings.ts | 6 +++--- tests/apis/agents/agents.http | 3 +-- tests/apis/agents/sessions.http | 2 +- tests/apis/chat.http | 2 +- 9 files changed, 34 insertions(+), 23 deletions(-) diff --git a/packages/shared/config/constant.ts b/packages/shared/config/constant.ts index c05fde902c..1e02ce7706 100644 --- a/packages/shared/config/constant.ts +++ b/packages/shared/config/constant.ts @@ -7,6 +7,11 @@ export const documentExts = ['.pdf', '.doc', '.docx', '.pptx', '.xlsx', '.odt', export const thirdPartyApplicationExts = ['.draftsExport'] export const bookExts = ['.epub'] +export const API_SERVER_DEFAULTS = { + HOST: '127.0.0.1', + PORT: 23333 +} + /** * A flat array of all file extensions known by the linguist database. * This is the primary source for identifying code files. diff --git a/src/main/apiServer/config.ts b/src/main/apiServer/config.ts index 60b1986be9..0966827a7b 100644 --- a/src/main/apiServer/config.ts +++ b/src/main/apiServer/config.ts @@ -1,3 +1,4 @@ +import { API_SERVER_DEFAULTS } from '@shared/config/constant' import type { ApiServerConfig } from '@types' import { v4 as uuidv4 } from 'uuid' @@ -6,9 +7,6 @@ import { reduxService } from '../services/ReduxService' const logger = loggerService.withContext('ApiServerConfig') -const defaultHost = 'localhost' -const defaultPort = 23333 - class ConfigManager { private _config: ApiServerConfig | null = null @@ -30,8 +28,8 @@ class ConfigManager { } this._config = { enabled: serverSettings?.enabled ?? false, - port: serverSettings?.port ?? defaultPort, - host: defaultHost, + port: serverSettings?.port ?? API_SERVER_DEFAULTS.PORT, + host: serverSettings?.host ?? API_SERVER_DEFAULTS.HOST, apiKey: apiKey } return this._config @@ -39,8 +37,8 @@ class ConfigManager { logger.warn('Failed to load config from Redux, using defaults', { error }) this._config = { enabled: false, - port: defaultPort, - host: defaultHost, + port: API_SERVER_DEFAULTS.PORT, + host: API_SERVER_DEFAULTS.HOST, apiKey: this.generateApiKey() } return this._config diff --git a/src/main/apiServer/middleware/openapi.ts b/src/main/apiServer/middleware/openapi.ts index ff01005bd9..6b374901ca 100644 --- a/src/main/apiServer/middleware/openapi.ts +++ b/src/main/apiServer/middleware/openapi.ts @@ -20,8 +20,8 @@ const swaggerOptions: swaggerJSDoc.Options = { }, servers: [ { - url: 'http://localhost:23333', - description: 'Local development server' + url: '/', + description: 'Current server' } ], components: { diff --git a/src/renderer/src/pages/settings/ToolSettings/ApiServerSettings/ApiServerSettings.tsx b/src/renderer/src/pages/settings/ToolSettings/ApiServerSettings/ApiServerSettings.tsx index 0205ec676a..58a0ab4b11 100644 --- a/src/renderer/src/pages/settings/ToolSettings/ApiServerSettings/ApiServerSettings.tsx +++ b/src/renderer/src/pages/settings/ToolSettings/ApiServerSettings/ApiServerSettings.tsx @@ -4,6 +4,7 @@ import type { RootState } from '@renderer/store' import { useAppDispatch } from '@renderer/store' import { setApiServerApiKey, setApiServerPort } from '@renderer/store/settings' import { formatErrorMessage } from '@renderer/utils/error' +import { API_SERVER_DEFAULTS } from '@shared/config/constant' import { Alert, Button, Input, InputNumber, Tooltip, Typography } from 'antd' import { Copy, ExternalLink, Play, RotateCcw, Square } from 'lucide-react' import type { FC } from 'react' @@ -56,7 +57,7 @@ const ApiServerSettings: FC = () => { } const handlePortChange = (value: string) => { - const port = parseInt(value) || 23333 + const port = parseInt(value) || API_SERVER_DEFAULTS.PORT if (port >= 1000 && port <= 65535) { dispatch(setApiServerPort(port)) } @@ -64,7 +65,9 @@ const ApiServerSettings: FC = () => { const openApiDocs = () => { if (apiServerRunning) { - window.open(`http://localhost:${apiServerConfig.port}/api-docs`, '_blank') + const host = apiServerConfig.host || API_SERVER_DEFAULTS.HOST + const port = apiServerConfig.port || API_SERVER_DEFAULTS.PORT + window.open(`http://${host}:${port}/api-docs`, '_blank') } } @@ -98,7 +101,9 @@ const ApiServerSettings: FC = () => { {apiServerRunning ? t('apiServer.status.running') : t('apiServer.status.stopped')} - {apiServerRunning ? `http://localhost:${apiServerConfig.port}` : t('apiServer.fields.port.description')} + {apiServerRunning + ? `http://${apiServerConfig.host || API_SERVER_DEFAULTS.HOST}:${apiServerConfig.port || API_SERVER_DEFAULTS.PORT}` + : t('apiServer.fields.port.description')} @@ -119,11 +124,11 @@ const ApiServerSettings: FC = () => { {!apiServerRunning && ( handlePortChange(String(value || 23333))} + onChange={(value) => handlePortChange(String(value || API_SERVER_DEFAULTS.PORT))} min={1000} max={65535} disabled={apiServerRunning} - placeholder="23333" + placeholder={String(API_SERVER_DEFAULTS.PORT)} size="middle" /> )} diff --git a/src/renderer/src/store/migrate.ts b/src/renderer/src/store/migrate.ts index 6f05c2b348..0e89227907 100644 --- a/src/renderer/src/store/migrate.ts +++ b/src/renderer/src/store/migrate.ts @@ -32,6 +32,7 @@ import { isSupportDeveloperRoleProvider, isSupportStreamOptionsProvider } from '@renderer/utils/provider' +import { API_SERVER_DEFAULTS } from '@shared/config/constant' import { defaultByPassRules, UpgradeChannel } from '@shared/config/constant' import { isEmpty } from 'lodash' import { createMigrate } from 'redux-persist' @@ -2032,8 +2033,8 @@ const migrateConfig = { if (!state.settings.apiServer) { state.settings.apiServer = { enabled: false, - host: 'localhost', - port: 23333, + host: API_SERVER_DEFAULTS.HOST, + port: API_SERVER_DEFAULTS.PORT, apiKey: `cs-sk-${uuid()}` } } @@ -2909,6 +2910,9 @@ const migrateConfig = { }, '180': (state: RootState) => { try { + if (state.settings.apiServer) { + state.settings.apiServer.host = API_SERVER_DEFAULTS.HOST + } // @ts-expect-error if (state.settings.openAI.summaryText === 'undefined') { state.settings.openAI.summaryText = undefined diff --git a/src/renderer/src/store/settings.ts b/src/renderer/src/store/settings.ts index d6c6856063..36a478853a 100644 --- a/src/renderer/src/store/settings.ts +++ b/src/renderer/src/store/settings.ts @@ -18,7 +18,7 @@ import type { import { ThemeMode } from '@renderer/types' import type { OpenAISummaryText, OpenAIVerbosity } from '@renderer/types/aiCoreTypes' import { uuid } from '@renderer/utils' -import { UpgradeChannel } from '@shared/config/constant' +import { API_SERVER_DEFAULTS, UpgradeChannel } from '@shared/config/constant' import type { RemoteSyncState } from './backup' @@ -410,8 +410,8 @@ export const initialState: SettingsState = { // API Server apiServer: { enabled: false, - host: 'localhost', - port: 23333, + host: API_SERVER_DEFAULTS.HOST, + port: API_SERVER_DEFAULTS.PORT, apiKey: `cs-sk-${uuid()}` }, showMessageOutline: false diff --git a/tests/apis/agents/agents.http b/tests/apis/agents/agents.http index ce21217ffd..3717fbfe30 100644 --- a/tests/apis/agents/agents.http +++ b/tests/apis/agents/agents.http @@ -1,4 +1,4 @@ -@host=http://localhost:23333 +@host=http://127.0.0.1:23333 @token=cs-sk-af798ed4-7cf5-4fd7-ae4b-df203b164194 @agent_id=agent_1758092281575_tn9dxio9k @@ -56,4 +56,3 @@ Content-Type: application/json "max_turns": 5 } } - diff --git a/tests/apis/agents/sessions.http b/tests/apis/agents/sessions.http index f7e941c93c..b236e214dc 100644 --- a/tests/apis/agents/sessions.http +++ b/tests/apis/agents/sessions.http @@ -1,5 +1,5 @@ -@host=http://localhost:23333 +@host=http://127.0.0.1:23333 @token=cs-sk-af798ed4-7cf5-4fd7-ae4b-df203b164194 @agent_id=agent_1758092281575_tn9dxio9k @session_id=session_1758278828236_mqj91e7c0 diff --git a/tests/apis/chat.http b/tests/apis/chat.http index eefa86deeb..ab556ccb0f 100644 --- a/tests/apis/chat.http +++ b/tests/apis/chat.http @@ -1,4 +1,4 @@ -@host=http://localhost:23333 +@host=http://127.0.0.1:23333 @token=cs-sk-af798ed4-7cf5-4fd7-ae4b-df203b164194 @agent_id=agent_1758092281575_tn9dxio9k From 387e8f77f5b524d53c6ee583783242d717b31e18 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 4 Dec 2025 13:30:21 +0800 Subject: [PATCH 09/17] ci(deps): bump peter-evans/repository-dispatch from 3 to 4 (#11594) Bumps [peter-evans/repository-dispatch](https://github.com/peter-evans/repository-dispatch) from 3 to 4. - [Release notes](https://github.com/peter-evans/repository-dispatch/releases) - [Commits](https://github.com/peter-evans/repository-dispatch/compare/v3...v4) --- updated-dependencies: - dependency-name: peter-evans/repository-dispatch dependency-version: '4' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/dispatch-docs-update.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dispatch-docs-update.yml b/.github/workflows/dispatch-docs-update.yml index b9457faec6..bb33c60b33 100644 --- a/.github/workflows/dispatch-docs-update.yml +++ b/.github/workflows/dispatch-docs-update.yml @@ -19,7 +19,7 @@ jobs: echo "tag=${{ github.event.release.tag_name }}" >> $GITHUB_OUTPUT - name: Dispatch update-download-version workflow to cherry-studio-docs - uses: peter-evans/repository-dispatch@v3 + uses: peter-evans/repository-dispatch@v4 with: token: ${{ secrets.REPO_DISPATCH_TOKEN }} repository: CherryHQ/cherry-studio-docs From fb201731947b267b1fbd5a635810acb837ea8202 Mon Sep 17 00:00:00 2001 From: KazooTTT Date: Thu, 4 Dec 2025 14:44:52 +0800 Subject: [PATCH 10/17] fix(inputbar): block enter send while generating (#11672) * fix(inputbar): block enter send while generating - reuse unified send disable state for keyboard and button - prevent enter sending when loading or searching * refactor: optimize InputbarCore component's useHotkeys hook Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> * refactor(InputbarCore): rename cannotSend to noContent for clarity --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- .../home/Inputbar/components/InputbarCore.tsx | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/renderer/src/pages/home/Inputbar/components/InputbarCore.tsx b/src/renderer/src/pages/home/Inputbar/components/InputbarCore.tsx index b55ca93cd3..99a93e537c 100644 --- a/src/renderer/src/pages/home/Inputbar/components/InputbarCore.tsx +++ b/src/renderer/src/pages/home/Inputbar/components/InputbarCore.tsx @@ -181,8 +181,10 @@ export const InputbarCore: FC = ({ enabled: config.enableDragDrop, t }) - // 判断是否可以发送:文本不为空或有文件 - const cannotSend = isEmpty && files.length === 0 + // 判断是否有内容:文本不为空或有文件 + const noContent = isEmpty && files.length === 0 + // 发送入口统一禁用条件:空内容、正在生成、全局搜索态 + const isSendDisabled = noContent || isLoading || searching useEffect(() => { setExtensions(supportedExts) @@ -313,7 +315,7 @@ export const InputbarCore: FC = ({ const isEnterPressed = event.key === 'Enter' && !event.nativeEvent.isComposing if (isEnterPressed) { - if (isSendMessageKeyPressed(event, sendMessageShortcut) && !cannotSend) { + if (isSendMessageKeyPressed(event, sendMessageShortcut) && !isSendDisabled) { handleSendMessage() event.preventDefault() return @@ -359,7 +361,7 @@ export const InputbarCore: FC = ({ translate, handleToggleExpanded, sendMessageShortcut, - cannotSend, + isSendDisabled, handleSendMessage, setText, setTimeoutTimer, @@ -620,7 +622,7 @@ export const InputbarCore: FC = ({ const rightSectionExtras = useMemo(() => { const extras: React.ReactNode[] = [] extras.push() - extras.push() + extras.push() if (isLoading) { extras.push( @@ -633,7 +635,7 @@ export const InputbarCore: FC = ({ } return <>{extras} - }, [text, onTranslated, isTranslating, handleSendMessage, cannotSend, isLoading, searching, t, onPause]) + }, [text, onTranslated, isTranslating, handleSendMessage, isSendDisabled, isLoading, t, onPause]) const quickPanelElement = config.enableQuickPanel ? : null From 9637fb8a4379f56f76419ac6d5f39e6643d0ca6b Mon Sep 17 00:00:00 2001 From: fullex <106392080+0xfullex@users.noreply.github.com> Date: Thu, 4 Dec 2025 14:46:37 +0800 Subject: [PATCH 11/17] fix(a11y): improve screen reader (NVDA) support with aria-label attributes (#11678) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(a11y): improve screen reader support with aria-label attributes Add aria-label attributes to all interactive buttons and toolbar elements to improve accessibility for screen reader users (NVDA, etc.). Changes: - Add aria-label with i18n translations to all ActionIconButton components - Add role="button", tabIndex, and keyboard handlers for non-semantic elements - Fix hardcoded English aria-labels in WindowControls to use i18n - Add aria-pressed for toggle buttons to indicate state - Add aria-expanded for expandable menus - Add aria-disabled for disabled buttons Components updated: - SendMessageButton, CopyButton, SelectionToolbar - CodeToolbar, RichEditor toolbar, MinimalToolbar - WindowControls - 12 Inputbar tool buttons (WebSearch, Attachment, KnowledgeBase, etc.) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude * fix(a11y): enhance accessibility in CodeToolbar snapshot Added aria-label, role, and tabindex attributes to improve screen reader support for interactive elements in the CodeToolbar component. This change aligns with ongoing efforts to enhance accessibility across the application. --------- Co-authored-by: Claude --- .../__snapshots__/CodeToolbar.test.tsx.snap | 4 +++ .../src/components/CodeToolbar/button.tsx | 23 ++++++++++-- .../src/components/CodeToolbar/toolbar.tsx | 14 +++++++- src/renderer/src/components/CopyButton.tsx | 20 +++++++++-- .../src/components/RichEditor/toolbar.tsx | 4 ++- .../src/components/WindowControls/index.tsx | 8 +++-- .../pages/home/Inputbar/SendMessageButton.tsx | 19 ++++++++-- .../components/ActivityDirectoryButton.tsx | 2 +- .../tools/components/AttachmentButton.tsx | 14 ++++---- .../tools/components/GenerateImageButton.tsx | 16 ++++----- .../tools/components/KnowledgeBaseButton.tsx | 3 +- .../tools/components/MCPToolsButton.tsx | 5 ++- .../tools/components/MentionModelsButton.tsx | 5 ++- .../tools/components/NewContextButton.tsx | 4 ++- .../tools/components/QuickPhrasesButton.tsx | 2 +- .../tools/components/SlashCommandsButton.tsx | 6 +++- .../tools/components/ThinkingButton.tsx | 21 +++++------ .../tools/components/UrlContextbutton.tsx | 6 +++- .../tools/components/WebSearchButton.tsx | 14 ++++---- .../minapps/components/MinimalToolbar.tsx | 36 ++++++++++++++----- .../selection/toolbar/SelectionToolbar.tsx | 16 ++++++++- 21 files changed, 183 insertions(+), 59 deletions(-) diff --git a/src/renderer/src/components/CodeToolbar/__tests__/__snapshots__/CodeToolbar.test.tsx.snap b/src/renderer/src/components/CodeToolbar/__tests__/__snapshots__/CodeToolbar.test.tsx.snap index c2b4028e32..56fb14ccc4 100644 --- a/src/renderer/src/components/CodeToolbar/__tests__/__snapshots__/CodeToolbar.test.tsx.snap +++ b/src/renderer/src/components/CodeToolbar/__tests__/__snapshots__/CodeToolbar.test.tsx.snap @@ -64,7 +64,11 @@ exports[`CodeToolbar > basic rendering > should match snapshot with mixed tools data-title="code_block.more" >