mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-12 00:49:14 +08:00
Merge remote-tracking branch 'origin/main' into feat/proxy-api-server
Resolved conflict in providerConfig.ts: - Kept shared aiCore implementation architecture from feat/proxy-api-server - Added createDeveloperToSystemFetch function to shared/aiCore/providerConfig.ts - Added isSupportDeveloperRoleProvider and isOpenAIReasoningModel context options - Exported missing parseDataUrl, isDataUrl, isBase64ImageDataUrl from shared/utils
This commit is contained in:
commit
8d5c5bac84
@ -28,6 +28,12 @@ files:
|
||||
- "!**/{tsconfig.json,tsconfig.tsbuildinfo,tsconfig.node.json,tsconfig.web.json}"
|
||||
- "!**/{.editorconfig,.jekyll-metadata}"
|
||||
- "!src"
|
||||
- "!config"
|
||||
- "!patches"
|
||||
- "!app-upgrade-config.json"
|
||||
- "!**/node_modules/**/*.cpp"
|
||||
- "!**/node_modules/node-addon-api/**"
|
||||
- "!**/node_modules/prebuild-install/**"
|
||||
- "!scripts"
|
||||
- "!local"
|
||||
- "!docs"
|
||||
|
||||
@ -154,7 +154,8 @@ User: <tool_use_result>
|
||||
<name>search</name>
|
||||
<result>26 million (2019)</result>
|
||||
</tool_use_result>
|
||||
Assistant: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.`
|
||||
|
||||
A: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.`
|
||||
|
||||
/**
|
||||
* 构建可用工具部分(提取自 Cherry Studio)
|
||||
|
||||
@ -6,6 +6,7 @@
|
||||
*/
|
||||
export type { AiSdkConfig, AiSdkConfigContext, ApiKeyRotator, ProviderFormatContext } from './providerConfig'
|
||||
export {
|
||||
createDeveloperToSystemFetch,
|
||||
defaultFormatAzureOpenAIApiHost,
|
||||
formatProviderApiHost,
|
||||
getBaseUrlForAiSdk,
|
||||
|
||||
@ -118,6 +118,18 @@ export interface AiSdkConfigContext {
|
||||
* Returns a fetch function that adds signature headers to requests
|
||||
*/
|
||||
getCherryAISignedFetch?: () => typeof globalThis.fetch
|
||||
|
||||
/**
|
||||
* Check if provider supports developer role
|
||||
* Default: returns true (assumes support)
|
||||
*/
|
||||
isSupportDeveloperRoleProvider?: (provider: MinimalProvider) => boolean
|
||||
|
||||
/**
|
||||
* Check if model is an OpenAI reasoning model
|
||||
* Default: returns false
|
||||
*/
|
||||
isOpenAIReasoningModel?: (modelId: string) => boolean
|
||||
}
|
||||
|
||||
/**
|
||||
@ -281,6 +293,16 @@ export function providerToAiSdkConfig(
|
||||
extraOptions.fetch = context.fetch
|
||||
}
|
||||
|
||||
// Apply developer-to-system role conversion for providers that don't support developer role
|
||||
// bug: https://github.com/vercel/ai/issues/10982
|
||||
// fixPR: https://github.com/vercel/ai/pull/11127
|
||||
// TODO: but the PR don't backport to v5, the code will be removed when upgrading to v6
|
||||
const isSupportDeveloperRole = context.isSupportDeveloperRoleProvider?.(provider) ?? true
|
||||
const isReasoningModel = context.isOpenAIReasoningModel?.(modelId) ?? false
|
||||
if (!isSupportDeveloperRole || !isReasoningModel) {
|
||||
extraOptions.fetch = createDeveloperToSystemFetch(extraOptions.fetch as typeof fetch | undefined)
|
||||
}
|
||||
|
||||
// Check if AI SDK supports this provider natively
|
||||
if (hasProviderConfig(aiSdkProviderId) && aiSdkProviderId !== 'openai-compatible') {
|
||||
const options = ProviderConfigFactory.fromProvider(aiSdkProviderId, baseConfig, extraOptions)
|
||||
@ -415,3 +437,41 @@ export const simpleKeyRotator: ApiKeyRotator = {
|
||||
return keyList[0] || keys
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a custom fetch wrapper that converts 'developer' role to 'system' role in request body.
|
||||
* This is needed for providers that don't support the 'developer' role (e.g., Azure DeepSeek R1).
|
||||
*
|
||||
* @param originalFetch - Optional original fetch function to wrap
|
||||
* @returns A fetch function that transforms the request body
|
||||
*/
|
||||
export function createDeveloperToSystemFetch(originalFetch?: typeof fetch): typeof fetch {
|
||||
const baseFetch = originalFetch ?? fetch
|
||||
return async (input: RequestInfo | URL, init?: RequestInit) => {
|
||||
let options = init
|
||||
if (options?.body && typeof options.body === 'string') {
|
||||
try {
|
||||
const body = JSON.parse(options.body)
|
||||
if (body.messages && Array.isArray(body.messages)) {
|
||||
let hasChanges = false
|
||||
body.messages = body.messages.map((msg: { role: string }) => {
|
||||
if (msg.role === 'developer') {
|
||||
hasChanges = true
|
||||
return { ...msg, role: 'system' }
|
||||
}
|
||||
return msg
|
||||
})
|
||||
if (hasChanges) {
|
||||
options = {
|
||||
...options,
|
||||
body: JSON.stringify(body)
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// If parsing fails, just use original body
|
||||
}
|
||||
}
|
||||
return baseFetch(input, options)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
export { defaultAppHeaders } from './headers'
|
||||
export { defaultAppHeaders, isBase64ImageDataUrl, isDataUrl, parseDataUrl } from './headers'
|
||||
export { getBaseModelName, getLowerBaseModelName } from './naming'
|
||||
export * from './url'
|
||||
|
||||
@ -13,18 +13,13 @@ export async function getIpCountry(): Promise<string> {
|
||||
const controller = new AbortController()
|
||||
const timeoutId = setTimeout(() => controller.abort(), 5000)
|
||||
|
||||
const ipinfo = await net.fetch('https://ipinfo.io/json', {
|
||||
signal: controller.signal,
|
||||
headers: {
|
||||
'User-Agent':
|
||||
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/124.0.0.0 Safari/537.36',
|
||||
'Accept-Language': 'en-US,en;q=0.9'
|
||||
}
|
||||
const ipinfo = await net.fetch(`https://api.ipinfo.io/lite/me?token=2a42580355dae4`, {
|
||||
signal: controller.signal
|
||||
})
|
||||
|
||||
clearTimeout(timeoutId)
|
||||
const data = await ipinfo.json()
|
||||
const country = data.country || 'CN'
|
||||
const country = data.country_code || 'CN'
|
||||
logger.info(`Detected user IP address country: ${country}`)
|
||||
return country
|
||||
} catch (error) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import { hasProviderConfig } from '@cherrystudio/ai-core/provider'
|
||||
import { isOpenAIChatCompletionOnlyModel } from '@renderer/config/models'
|
||||
import { isOpenAIChatCompletionOnlyModel, isOpenAIReasoningModel } from '@renderer/config/models'
|
||||
import {
|
||||
getAwsBedrockAccessKeyId,
|
||||
getAwsBedrockApiKey,
|
||||
@ -11,7 +11,7 @@ import { createVertexProvider, isVertexAIConfigured } from '@renderer/hooks/useV
|
||||
import { getProviderByModel } from '@renderer/services/AssistantService'
|
||||
import store from '@renderer/store'
|
||||
import { isSystemProvider, type Model, type Provider } from '@renderer/types'
|
||||
import { isSupportStreamOptionsProvider } from '@renderer/utils/provider'
|
||||
import { isSupportDeveloperRoleProvider, isSupportStreamOptionsProvider } from '@renderer/utils/provider'
|
||||
import {
|
||||
type AiSdkConfigContext,
|
||||
formatProviderApiHost as sharedFormatProviderApiHost,
|
||||
@ -52,7 +52,9 @@ function createRendererSdkContext(model: Model): AiSdkConfigContext {
|
||||
}
|
||||
return createVertexProvider(provider as Provider)
|
||||
},
|
||||
getEndpointType: () => model.endpoint_type
|
||||
getEndpointType: () => model.endpoint_type,
|
||||
isSupportDeveloperRoleProvider: (provider) => isSupportDeveloperRoleProvider(provider as Provider),
|
||||
isOpenAIReasoningModel: () => isOpenAIReasoningModel(model)
|
||||
}
|
||||
}
|
||||
|
||||
@ -202,5 +204,6 @@ export async function prepareSpecialProviderConfig(
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
@ -118,6 +118,11 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
||||
return { thinking: { type: 'disabled' } }
|
||||
}
|
||||
|
||||
// Deepseek, default behavior is non-thinking
|
||||
if (isDeepSeekHybridInferenceModel(model)) {
|
||||
return {}
|
||||
}
|
||||
|
||||
// GPT 5.1, GPT 5.2, or newer
|
||||
if (isSupportNoneReasoningEffortModel(model)) {
|
||||
return {
|
||||
|
||||
@ -745,7 +745,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
||||
})
|
||||
|
||||
it('should return doubao_after_251015 for Doubao-Seed-1.8 models', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251215' }))).toBe('doubao_after_251015')
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251228' }))).toBe('doubao_after_251015')
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1.8' }))).toBe('doubao_after_251015')
|
||||
})
|
||||
|
||||
@ -879,7 +879,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
||||
// auto > after_251015 > no_auto
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1.6' }))).toBe('doubao')
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-6-251015' }))).toBe('doubao_after_251015')
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251215' }))).toBe('doubao_after_251015')
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-seed-1-8-251228' }))).toBe('doubao_after_251015')
|
||||
expect(getThinkModelType(createModel({ id: 'doubao-1.5-thinking-vision-pro' }))).toBe('doubao_no_auto')
|
||||
})
|
||||
|
||||
|
||||
@ -771,7 +771,7 @@ export const SYSTEM_MODELS: Record<SystemProviderId | 'defaultModel', Model[]> =
|
||||
],
|
||||
doubao: [
|
||||
{
|
||||
id: 'doubao-seed-1-8-251215',
|
||||
id: 'doubao-seed-1-8-251228',
|
||||
provider: 'doubao',
|
||||
name: 'Doubao-Seed-1.8',
|
||||
group: 'Doubao-Seed-1.8'
|
||||
|
||||
@ -6,6 +6,9 @@ import type { TokenFluxModel } from '../config/tokenFluxConfig'
|
||||
|
||||
const logger = loggerService.withContext('TokenFluxService')
|
||||
|
||||
// 图片 API 使用固定的基础地址,独立于 provider.apiHost(后者是 OpenAI 兼容的聊天 API 地址)
|
||||
const TOKENFLUX_IMAGE_API_HOST = 'https://api.tokenflux.ai'
|
||||
|
||||
export interface TokenFluxGenerationRequest {
|
||||
model: string
|
||||
input: {
|
||||
@ -66,7 +69,7 @@ export class TokenFluxService {
|
||||
return cachedModels
|
||||
}
|
||||
|
||||
const response = await fetch(`${this.apiHost}/v1/images/models`, {
|
||||
const response = await fetch(`${TOKENFLUX_IMAGE_API_HOST}/v1/images/models`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${this.apiKey}`
|
||||
}
|
||||
@ -88,7 +91,7 @@ export class TokenFluxService {
|
||||
* Create a new image generation request
|
||||
*/
|
||||
async createGeneration(request: TokenFluxGenerationRequest, signal?: AbortSignal): Promise<string> {
|
||||
const response = await fetch(`${this.apiHost}/v1/images/generations`, {
|
||||
const response = await fetch(`${TOKENFLUX_IMAGE_API_HOST}/v1/images/generations`, {
|
||||
method: 'POST',
|
||||
headers: this.getHeaders(),
|
||||
body: JSON.stringify(request),
|
||||
@ -108,7 +111,7 @@ export class TokenFluxService {
|
||||
* Get the status and result of a generation
|
||||
*/
|
||||
async getGenerationResult(generationId: string): Promise<TokenFluxGenerationResponse['data']> {
|
||||
const response = await fetch(`${this.apiHost}/v1/images/generations/${generationId}`, {
|
||||
const response = await fetch(`${TOKENFLUX_IMAGE_API_HOST}/v1/images/generations/${generationId}`, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${this.apiKey}`
|
||||
}
|
||||
|
||||
@ -14,7 +14,7 @@ Here are a few examples using notional tools:
|
||||
---
|
||||
User: Generate an image of the oldest person in this document.
|
||||
|
||||
Assistant: I can use the document_qa tool to find out who the oldest person is in the document.
|
||||
A: I can use the document_qa tool to find out who the oldest person is in the document.
|
||||
<tool_use>
|
||||
<name>document_qa</name>
|
||||
<arguments>{"document": "document.pdf", "question": "Who is the oldest person mentioned?"}</arguments>
|
||||
@ -25,7 +25,7 @@ User: <tool_use_result>
|
||||
<result>John Doe, a 55 year old lumberjack living in Newfoundland.</result>
|
||||
</tool_use_result>
|
||||
|
||||
Assistant: I can use the image_generator tool to create a portrait of John Doe.
|
||||
A: I can use the image_generator tool to create a portrait of John Doe.
|
||||
<tool_use>
|
||||
<name>image_generator</name>
|
||||
<arguments>{"prompt": "A portrait of John Doe, a 55-year-old man living in Canada."}</arguments>
|
||||
@ -36,12 +36,12 @@ User: <tool_use_result>
|
||||
<result>image.png</result>
|
||||
</tool_use_result>
|
||||
|
||||
Assistant: the image is generated as image.png
|
||||
A: the image is generated as image.png
|
||||
|
||||
---
|
||||
User: "What is the result of the following operation: 5 + 3 + 1294.678?"
|
||||
|
||||
Assistant: I can use the python_interpreter tool to calculate the result of the operation.
|
||||
A: I can use the python_interpreter tool to calculate the result of the operation.
|
||||
<tool_use>
|
||||
<name>python_interpreter</name>
|
||||
<arguments>{"code": "5 + 3 + 1294.678"}</arguments>
|
||||
@ -52,12 +52,12 @@ User: <tool_use_result>
|
||||
<result>1302.678</result>
|
||||
</tool_use_result>
|
||||
|
||||
Assistant: The result of the operation is 1302.678.
|
||||
A: The result of the operation is 1302.678.
|
||||
|
||||
---
|
||||
User: "Which city has the highest population , Guangzhou or Shanghai?"
|
||||
|
||||
Assistant: I can use the search tool to find the population of Guangzhou.
|
||||
A: I can use the search tool to find the population of Guangzhou.
|
||||
<tool_use>
|
||||
<name>search</name>
|
||||
<arguments>{"query": "Population Guangzhou"}</arguments>
|
||||
@ -68,7 +68,7 @@ User: <tool_use_result>
|
||||
<result>Guangzhou has a population of 15 million inhabitants as of 2021.</result>
|
||||
</tool_use_result>
|
||||
|
||||
Assistant: I can use the search tool to find the population of Shanghai.
|
||||
A: I can use the search tool to find the population of Shanghai.
|
||||
<tool_use>
|
||||
<name>search</name>
|
||||
<arguments>{"query": "Population Shanghai"}</arguments>
|
||||
@ -78,7 +78,8 @@ User: <tool_use_result>
|
||||
<name>search</name>
|
||||
<result>26 million (2019)</result>
|
||||
</tool_use_result>
|
||||
Assistant: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.
|
||||
|
||||
A: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.
|
||||
`
|
||||
|
||||
export const AvailableTools = (tools: MCPTool[]) => {
|
||||
|
||||
Loading…
Reference in New Issue
Block a user