diff --git a/electron-builder.yml b/electron-builder.yml
index bf7b7b4e91..af1774c941 100644
--- a/electron-builder.yml
+++ b/electron-builder.yml
@@ -28,6 +28,12 @@ files:
- "!**/{tsconfig.json,tsconfig.tsbuildinfo,tsconfig.node.json,tsconfig.web.json}"
- "!**/{.editorconfig,.jekyll-metadata}"
- "!src"
+ - "!config"
+ - "!patches"
+ - "!app-upgrade-config.json"
+ - "!**/node_modules/**/*.cpp"
+ - "!**/node_modules/node-addon-api/**"
+ - "!**/node_modules/prebuild-install/**"
- "!scripts"
- "!local"
- "!docs"
diff --git a/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/promptToolUsePlugin.ts b/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/promptToolUsePlugin.ts
index 224cee05ae..67dd33e9e0 100644
--- a/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/promptToolUsePlugin.ts
+++ b/packages/aiCore/src/core/plugins/built-in/toolUsePlugin/promptToolUsePlugin.ts
@@ -154,7 +154,8 @@ User:
search
26 million (2019)
-Assistant: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.`
+
+A: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.`
/**
* 构建可用工具部分(提取自 Cherry Studio)
diff --git a/packages/shared/aiCore/index.ts b/packages/shared/aiCore/index.ts
index 097047e20a..7ed6446efb 100644
--- a/packages/shared/aiCore/index.ts
+++ b/packages/shared/aiCore/index.ts
@@ -6,6 +6,7 @@
*/
export type { AiSdkConfig, AiSdkConfigContext, ApiKeyRotator, ProviderFormatContext } from './providerConfig'
export {
+ createDeveloperToSystemFetch,
defaultFormatAzureOpenAIApiHost,
formatProviderApiHost,
getBaseUrlForAiSdk,
diff --git a/packages/shared/aiCore/providerConfig.ts b/packages/shared/aiCore/providerConfig.ts
index 0c80a79448..ce4a3f2664 100644
--- a/packages/shared/aiCore/providerConfig.ts
+++ b/packages/shared/aiCore/providerConfig.ts
@@ -118,6 +118,18 @@ export interface AiSdkConfigContext {
* Returns a fetch function that adds signature headers to requests
*/
getCherryAISignedFetch?: () => typeof globalThis.fetch
+
+ /**
+ * Check if provider supports developer role
+ * Default: returns true (assumes support)
+ */
+ isSupportDeveloperRoleProvider?: (provider: MinimalProvider) => boolean
+
+ /**
+ * Check if model is an OpenAI reasoning model
+ * Default: returns false
+ */
+ isOpenAIReasoningModel?: (modelId: string) => boolean
}
/**
@@ -281,6 +293,16 @@ export function providerToAiSdkConfig(
extraOptions.fetch = context.fetch
}
+ // Apply developer-to-system role conversion for providers that don't support developer role
+ // bug: https://github.com/vercel/ai/issues/10982
+ // fixPR: https://github.com/vercel/ai/pull/11127
+ // TODO: but the PR don't backport to v5, the code will be removed when upgrading to v6
+ const isSupportDeveloperRole = context.isSupportDeveloperRoleProvider?.(provider) ?? true
+ const isReasoningModel = context.isOpenAIReasoningModel?.(modelId) ?? false
+ if (!isSupportDeveloperRole || !isReasoningModel) {
+ extraOptions.fetch = createDeveloperToSystemFetch(extraOptions.fetch as typeof fetch | undefined)
+ }
+
// Check if AI SDK supports this provider natively
if (hasProviderConfig(aiSdkProviderId) && aiSdkProviderId !== 'openai-compatible') {
const options = ProviderConfigFactory.fromProvider(aiSdkProviderId, baseConfig, extraOptions)
@@ -415,3 +437,41 @@ export const simpleKeyRotator: ApiKeyRotator = {
return keyList[0] || keys
}
}
+
+/**
+ * Creates a custom fetch wrapper that converts 'developer' role to 'system' role in request body.
+ * This is needed for providers that don't support the 'developer' role (e.g., Azure DeepSeek R1).
+ *
+ * @param originalFetch - Optional original fetch function to wrap
+ * @returns A fetch function that transforms the request body
+ */
+export function createDeveloperToSystemFetch(originalFetch?: typeof fetch): typeof fetch {
+ const baseFetch = originalFetch ?? fetch
+ return async (input: RequestInfo | URL, init?: RequestInit) => {
+ let options = init
+ if (options?.body && typeof options.body === 'string') {
+ try {
+ const body = JSON.parse(options.body)
+ if (body.messages && Array.isArray(body.messages)) {
+ let hasChanges = false
+ body.messages = body.messages.map((msg: { role: string }) => {
+ if (msg.role === 'developer') {
+ hasChanges = true
+ return { ...msg, role: 'system' }
+ }
+ return msg
+ })
+ if (hasChanges) {
+ options = {
+ ...options,
+ body: JSON.stringify(body)
+ }
+ }
+ }
+ } catch {
+ // If parsing fails, just use original body
+ }
+ }
+ return baseFetch(input, options)
+ }
+}
diff --git a/packages/shared/utils/index.ts b/packages/shared/utils/index.ts
index 11cefe0c9b..754acd8819 100644
--- a/packages/shared/utils/index.ts
+++ b/packages/shared/utils/index.ts
@@ -1,3 +1,3 @@
-export { defaultAppHeaders } from './headers'
+export { defaultAppHeaders, isBase64ImageDataUrl, isDataUrl, parseDataUrl } from './headers'
export { getBaseModelName, getLowerBaseModelName } from './naming'
export * from './url'
diff --git a/src/main/utils/ipService.ts b/src/main/utils/ipService.ts
index 3180f9457c..708af4c40e 100644
--- a/src/main/utils/ipService.ts
+++ b/src/main/utils/ipService.ts
@@ -13,18 +13,13 @@ export async function getIpCountry(): Promise {
const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), 5000)
- const ipinfo = await net.fetch('https://ipinfo.io/json', {
- signal: controller.signal,
- headers: {
- 'User-Agent':
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
- 'Accept-Language': 'en-US,en;q=0.9'
- }
+ const ipinfo = await net.fetch(`https://api.ipinfo.io/lite/me?token=2a42580355dae4`, {
+ signal: controller.signal
})
clearTimeout(timeoutId)
const data = await ipinfo.json()
- const country = data.country || 'CN'
+ const country = data.country_code || 'CN'
logger.info(`Detected user IP address country: ${country}`)
return country
} catch (error) {
diff --git a/src/renderer/src/aiCore/provider/providerConfig.ts b/src/renderer/src/aiCore/provider/providerConfig.ts
index 47cd1a6fbf..9d3a729db4 100644
--- a/src/renderer/src/aiCore/provider/providerConfig.ts
+++ b/src/renderer/src/aiCore/provider/providerConfig.ts
@@ -1,5 +1,5 @@
import { hasProviderConfig } from '@cherrystudio/ai-core/provider'
-import { isOpenAIChatCompletionOnlyModel } from '@renderer/config/models'
+import { isOpenAIChatCompletionOnlyModel, isOpenAIReasoningModel } from '@renderer/config/models'
import {
getAwsBedrockAccessKeyId,
getAwsBedrockApiKey,
@@ -11,7 +11,7 @@ import { createVertexProvider, isVertexAIConfigured } from '@renderer/hooks/useV
import { getProviderByModel } from '@renderer/services/AssistantService'
import store from '@renderer/store'
import { isSystemProvider, type Model, type Provider } from '@renderer/types'
-import { isSupportStreamOptionsProvider } from '@renderer/utils/provider'
+import { isSupportDeveloperRoleProvider, isSupportStreamOptionsProvider } from '@renderer/utils/provider'
import {
type AiSdkConfigContext,
formatProviderApiHost as sharedFormatProviderApiHost,
@@ -52,7 +52,9 @@ function createRendererSdkContext(model: Model): AiSdkConfigContext {
}
return createVertexProvider(provider as Provider)
},
- getEndpointType: () => model.endpoint_type
+ getEndpointType: () => model.endpoint_type,
+ isSupportDeveloperRoleProvider: (provider) => isSupportDeveloperRoleProvider(provider as Provider),
+ isOpenAIReasoningModel: () => isOpenAIReasoningModel(model)
}
}
@@ -202,5 +204,6 @@ export async function prepareSpecialProviderConfig(
}
}
}
+
return config
}
diff --git a/src/renderer/src/aiCore/utils/reasoning.ts b/src/renderer/src/aiCore/utils/reasoning.ts
index ab8a0b7983..c57dc31d17 100644
--- a/src/renderer/src/aiCore/utils/reasoning.ts
+++ b/src/renderer/src/aiCore/utils/reasoning.ts
@@ -118,6 +118,11 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
return { thinking: { type: 'disabled' } }
}
+ // Deepseek, default behavior is non-thinking
+ if (isDeepSeekHybridInferenceModel(model)) {
+ return {}
+ }
+
// GPT 5.1, GPT 5.2, or newer
if (isSupportNoneReasoningEffortModel(model)) {
return {
diff --git a/src/renderer/src/config/models/__tests__/reasoning.test.ts b/src/renderer/src/config/models/__tests__/reasoning.test.ts
index 56f9cd0b60..0f58be4ef0 100644
--- a/src/renderer/src/config/models/__tests__/reasoning.test.ts
+++ b/src/renderer/src/config/models/__tests__/reasoning.test.ts
@@ -745,7 +745,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
})
it('should return doubao_after_251015 for Doubao-Seed-1.8 models', () => {
- expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251215' }))).toBe('doubao_after_251015')
+ expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251228' }))).toBe('doubao_after_251015')
expect(getThinkModelType(createModel({ id: 'doubao-seed-1.8' }))).toBe('doubao_after_251015')
})
@@ -879,7 +879,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
// auto > after_251015 > no_auto
expect(getThinkModelType(createModel({ id: 'doubao-seed-1.6' }))).toBe('doubao')
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-6-251015' }))).toBe('doubao_after_251015')
- expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251215' }))).toBe('doubao_after_251015')
+ expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251228' }))).toBe('doubao_after_251015')
expect(getThinkModelType(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toBe('doubao_no_auto')
})
diff --git a/src/renderer/src/config/models/default.ts b/src/renderer/src/config/models/default.ts
index 1223d0c92c..408c047639 100644
--- a/src/renderer/src/config/models/default.ts
+++ b/src/renderer/src/config/models/default.ts
@@ -771,7 +771,7 @@ export const SYSTEM_MODELS: Record =
],
doubao: [
{
- id: 'doubao-seed-1-8-251215',
+ id: 'doubao-seed-1-8-251228',
provider: 'doubao',
name: 'Doubao-Seed-1.8',
group: 'Doubao-Seed-1.8'
diff --git a/src/renderer/src/pages/paintings/utils/TokenFluxService.ts b/src/renderer/src/pages/paintings/utils/TokenFluxService.ts
index 4b1e224a8a..ddd362045b 100644
--- a/src/renderer/src/pages/paintings/utils/TokenFluxService.ts
+++ b/src/renderer/src/pages/paintings/utils/TokenFluxService.ts
@@ -6,6 +6,9 @@ import type { TokenFluxModel } from '../config/tokenFluxConfig'
const logger = loggerService.withContext('TokenFluxService')
+// 图片 API 使用固定的基础地址,独立于 provider.apiHost(后者是 OpenAI 兼容的聊天 API 地址)
+const TOKENFLUX_IMAGE_API_HOST = 'https://api.tokenflux.ai'
+
export interface TokenFluxGenerationRequest {
model: string
input: {
@@ -66,7 +69,7 @@ export class TokenFluxService {
return cachedModels
}
- const response = await fetch(`${this.apiHost}/v1/images/models`, {
+ const response = await fetch(`${TOKENFLUX_IMAGE_API_HOST}/v1/images/models`, {
headers: {
Authorization: `Bearer ${this.apiKey}`
}
@@ -88,7 +91,7 @@ export class TokenFluxService {
* Create a new image generation request
*/
async createGeneration(request: TokenFluxGenerationRequest, signal?: AbortSignal): Promise {
- const response = await fetch(`${this.apiHost}/v1/images/generations`, {
+ const response = await fetch(`${TOKENFLUX_IMAGE_API_HOST}/v1/images/generations`, {
method: 'POST',
headers: this.getHeaders(),
body: JSON.stringify(request),
@@ -108,7 +111,7 @@ export class TokenFluxService {
* Get the status and result of a generation
*/
async getGenerationResult(generationId: string): Promise {
- const response = await fetch(`${this.apiHost}/v1/images/generations/${generationId}`, {
+ const response = await fetch(`${TOKENFLUX_IMAGE_API_HOST}/v1/images/generations/${generationId}`, {
headers: {
Authorization: `Bearer ${this.apiKey}`
}
diff --git a/src/renderer/src/utils/prompt.ts b/src/renderer/src/utils/prompt.ts
index 4e799800a7..326392947a 100644
--- a/src/renderer/src/utils/prompt.ts
+++ b/src/renderer/src/utils/prompt.ts
@@ -14,7 +14,7 @@ Here are a few examples using notional tools:
---
User: Generate an image of the oldest person in this document.
-Assistant: I can use the document_qa tool to find out who the oldest person is in the document.
+A: I can use the document_qa tool to find out who the oldest person is in the document.
document_qa
{"document": "document.pdf", "question": "Who is the oldest person mentioned?"}
@@ -25,7 +25,7 @@ User:
John Doe, a 55 year old lumberjack living in Newfoundland.
-Assistant: I can use the image_generator tool to create a portrait of John Doe.
+A: I can use the image_generator tool to create a portrait of John Doe.
image_generator
{"prompt": "A portrait of John Doe, a 55-year-old man living in Canada."}
@@ -36,12 +36,12 @@ User:
image.png
-Assistant: the image is generated as image.png
+A: the image is generated as image.png
---
User: "What is the result of the following operation: 5 + 3 + 1294.678?"
-Assistant: I can use the python_interpreter tool to calculate the result of the operation.
+A: I can use the python_interpreter tool to calculate the result of the operation.
python_interpreter
{"code": "5 + 3 + 1294.678"}
@@ -52,12 +52,12 @@ User:
1302.678
-Assistant: The result of the operation is 1302.678.
+A: The result of the operation is 1302.678.
---
User: "Which city has the highest population , Guangzhou or Shanghai?"
-Assistant: I can use the search tool to find the population of Guangzhou.
+A: I can use the search tool to find the population of Guangzhou.
search
{"query": "Population Guangzhou"}
@@ -68,7 +68,7 @@ User:
Guangzhou has a population of 15 million inhabitants as of 2021.
-Assistant: I can use the search tool to find the population of Shanghai.
+A: I can use the search tool to find the population of Shanghai.
search
{"query": "Population Shanghai"}
@@ -78,7 +78,8 @@ User:
search
26 million (2019)
-Assistant: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.
+
+A: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.
`
export const AvailableTools = (tools: MCPTool[]) => {