mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-26 11:44:28 +08:00
feat(OpenAI): new Responses API support (#5621)
* feat(OpenAI): new Responses API support - Added OpenAICompatibleProvider to handle requests for OpenAI-compatible models. - Updated ProviderSettings to reflect changes in provider types from 'openai' to 'openai-compatible'. - Enhanced model validation and response handling in OpenAIProvider to support new input formats. - Refactored existing components to accommodate the new provider structure and ensure compatibility with OpenAI's response API. - Incremented store version to 98 to reflect changes in provider types and settings. - Updated migration logic to convert existing 'openai' provider types to 'openai-compatible' where applicable. * refactor(OpenAI): update model validation and response handling - Renamed `isOpenAIModel` to `isOpenAILLMModel` for clarity in model type checking. - Updated references to the new model validation function across `OpenAICompatibleProvider` and `OpenAIProvider`. - Enhanced web search model validation logic to accommodate new model checks. - Refactored `getOpenAIWebSearchParams` to return structured parameters based on model type. - Improved citation formatting in message blocks for better web search results handling. * fix(OpenAICompatibleProvider): reset timestamps for first token handling - Updated logic to reset timestamps for the first token and content when no prior thinking content is present. - Added comments for clarity on the purpose of the changes and marked a temporary fix for timestamp handling. * refactor(OpenAICompatibleProvider): improve code readability with consistent formatting * fix(OpenAIProvider): refine service tier logic for model identification * fix: eslint error * fix(OpenAIProvider): enhance response metrics tracking in streaming process * feat(OpenAIProvider): add timeout handling for model requests --------- Co-authored-by: 自由的世界人 <3196812536@qq.com>
This commit is contained in:
parent
c8730c62f7
commit
8b3894fd19
@ -130,7 +130,6 @@ import XirangModelLogoDark from '@renderer/assets/images/models/xirang_dark.png'
|
||||
import YiModelLogo from '@renderer/assets/images/models/yi.png'
|
||||
import YiModelLogoDark from '@renderer/assets/images/models/yi_dark.png'
|
||||
import { getProviderByModel } from '@renderer/services/AssistantService'
|
||||
import WebSearchService from '@renderer/services/WebSearchService'
|
||||
import { Assistant, Model } from '@renderer/types'
|
||||
import OpenAI from 'openai'
|
||||
|
||||
@ -2223,6 +2222,19 @@ export function isOpenAIReasoningModel(model: Model): boolean {
|
||||
return model.id.includes('o1') || model.id.includes('o3') || model.id.includes('o4')
|
||||
}
|
||||
|
||||
export function isOpenAILLMModel(model: Model): boolean {
|
||||
if (!model) {
|
||||
return false
|
||||
}
|
||||
if (isOpenAIReasoningModel(model)) {
|
||||
return true
|
||||
}
|
||||
if (model.id.includes('gpt')) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
export function isSupportedReasoningEffortOpenAIModel(model: Model): boolean {
|
||||
return (
|
||||
(model.id.includes('o1') && !(model.id.includes('o1-preview') || model.id.includes('o1-mini'))) ||
|
||||
@ -2387,16 +2399,38 @@ export function isWebSearchModel(model: Model): boolean {
|
||||
return false
|
||||
}
|
||||
|
||||
if (provider.type === 'openai') {
|
||||
if (
|
||||
isOpenAILLMModel(model) &&
|
||||
!isTextToImageModel(model) &&
|
||||
!isOpenAIReasoningModel(model) &&
|
||||
!GENERATE_IMAGE_MODELS.includes(model.id)
|
||||
) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
if (provider.id === 'perplexity') {
|
||||
return PERPLEXITY_SEARCH_MODELS.includes(model?.id)
|
||||
}
|
||||
|
||||
if (provider.id === 'aihubmix') {
|
||||
if (
|
||||
isOpenAILLMModel(model) &&
|
||||
!isTextToImageModel(model) &&
|
||||
!isOpenAIReasoningModel(model) &&
|
||||
!GENERATE_IMAGE_MODELS.includes(model.id)
|
||||
) {
|
||||
return true
|
||||
}
|
||||
|
||||
const models = ['gemini-2.0-flash-search', 'gemini-2.0-flash-exp-search', 'gemini-2.0-pro-exp-02-05-search']
|
||||
return models.includes(model?.id)
|
||||
}
|
||||
|
||||
if (provider?.type === 'openai') {
|
||||
if (provider?.type === 'openai-compatible') {
|
||||
if (GEMINI_SEARCH_MODELS.includes(model?.id) || isOpenAIWebSearch(model)) {
|
||||
return true
|
||||
}
|
||||
@ -2450,9 +2484,6 @@ export function isGenerateImageModel(model: Model): boolean {
|
||||
}
|
||||
|
||||
export function getOpenAIWebSearchParams(assistant: Assistant, model: Model): Record<string, any> {
|
||||
if (WebSearchService.isWebSearchEnabled()) {
|
||||
return {}
|
||||
}
|
||||
if (isWebSearchModel(model)) {
|
||||
if (assistant.enableWebSearch) {
|
||||
const webSearchTools = getWebSearchTools(model)
|
||||
@ -2477,7 +2508,9 @@ export function getOpenAIWebSearchParams(assistant: Assistant, model: Model): Re
|
||||
}
|
||||
|
||||
if (isOpenAIWebSearch(model)) {
|
||||
return {}
|
||||
return {
|
||||
web_search_options: {}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
|
||||
@ -16,7 +16,7 @@ interface Props {
|
||||
const PopupContainer: React.FC<Props> = ({ provider, resolve }) => {
|
||||
const [open, setOpen] = useState(true)
|
||||
const [name, setName] = useState(provider?.name || '')
|
||||
const [type, setType] = useState<ProviderType>(provider?.type || 'openai')
|
||||
const [type, setType] = useState<ProviderType>(provider?.type || 'openai-compatible')
|
||||
const [logo, setLogo] = useState<string | null>(null)
|
||||
const [dropdownOpen, setDropdownOpen] = useState(false)
|
||||
const { t } = useTranslation()
|
||||
@ -52,7 +52,7 @@ const PopupContainer: React.FC<Props> = ({ provider, resolve }) => {
|
||||
|
||||
const onCancel = () => {
|
||||
setOpen(false)
|
||||
resolve({ name: '', type: 'openai' })
|
||||
resolve({ name: '', type: 'openai-compatible' })
|
||||
}
|
||||
|
||||
const onClose = () => {
|
||||
@ -188,7 +188,8 @@ const PopupContainer: React.FC<Props> = ({ provider, resolve }) => {
|
||||
value={type}
|
||||
onChange={setType}
|
||||
options={[
|
||||
{ label: 'OpenAI', value: 'openai' },
|
||||
{ label: 'OpenAI-Compatible', value: 'openai-compatible' },
|
||||
{ label: 'OpenAI-Response', value: 'openai' },
|
||||
{ label: 'Gemini', value: 'gemini' },
|
||||
{ label: 'Anthropic', value: 'anthropic' },
|
||||
{ label: 'Azure OpenAI', value: 'azure-openai' }
|
||||
|
||||
@ -269,8 +269,10 @@ const ProviderSetting: FC<Props> = ({ provider: _provider }) => {
|
||||
if (apiHost.endsWith('#')) {
|
||||
return apiHost.replace('#', '')
|
||||
}
|
||||
|
||||
return formatApiHost(apiHost) + 'chat/completions'
|
||||
if (provider.type === 'openai-compatible') {
|
||||
return formatApiHost(apiHost) + 'chat/completions'
|
||||
}
|
||||
return formatApiHost(apiHost) + 'responses'
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
|
||||
1090
src/renderer/src/providers/AiProvider/OpenAICompatibleProvider.ts
Normal file
1090
src/renderer/src/providers/AiProvider/OpenAICompatibleProvider.ts
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -3,17 +3,22 @@ import { Provider } from '@renderer/types'
|
||||
import AnthropicProvider from './AnthropicProvider'
|
||||
import BaseProvider from './BaseProvider'
|
||||
import GeminiProvider from './GeminiProvider'
|
||||
import OpenAICompatibleProvider from './OpenAICompatibleProvider'
|
||||
import OpenAIProvider from './OpenAIProvider'
|
||||
|
||||
export default class ProviderFactory {
|
||||
static create(provider: Provider): BaseProvider {
|
||||
switch (provider.type) {
|
||||
case 'openai':
|
||||
return new OpenAIProvider(provider)
|
||||
case 'openai-compatible':
|
||||
return new OpenAICompatibleProvider(provider)
|
||||
case 'anthropic':
|
||||
return new AnthropicProvider(provider)
|
||||
case 'gemini':
|
||||
return new GeminiProvider(provider)
|
||||
default:
|
||||
return new OpenAIProvider(provider)
|
||||
return new OpenAICompatibleProvider(provider)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -46,7 +46,7 @@ const persistedReducer = persistReducer(
|
||||
{
|
||||
key: 'cherry-studio',
|
||||
storage,
|
||||
version: 97,
|
||||
version: 98,
|
||||
blacklist: ['runtime', 'messages', 'messageBlocks'],
|
||||
migrate
|
||||
},
|
||||
|
||||
@ -28,7 +28,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'silicon',
|
||||
name: 'Silicon',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.siliconflow.cn',
|
||||
models: SYSTEM_MODELS.silicon,
|
||||
@ -48,7 +48,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'ocoolai',
|
||||
name: 'ocoolAI',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.ocoolai.com',
|
||||
models: SYSTEM_MODELS.ocoolai,
|
||||
@ -58,7 +58,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'deepseek',
|
||||
name: 'deepseek',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.deepseek.com',
|
||||
models: SYSTEM_MODELS.deepseek,
|
||||
@ -78,7 +78,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'ppio',
|
||||
name: 'PPIO',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.ppinfra.com/v3/openai',
|
||||
models: SYSTEM_MODELS.ppio,
|
||||
@ -88,7 +88,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'alayanew',
|
||||
name: 'AlayaNew',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://deepseek.alayanew.com',
|
||||
models: SYSTEM_MODELS.alayanew,
|
||||
@ -98,7 +98,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'infini',
|
||||
name: 'Infini',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://cloud.infini-ai.com/maas',
|
||||
models: SYSTEM_MODELS.infini,
|
||||
@ -108,7 +108,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'qiniu',
|
||||
name: 'Qiniu',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.qnaigc.com',
|
||||
models: SYSTEM_MODELS.qiniu,
|
||||
@ -118,7 +118,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'dmxapi',
|
||||
name: 'DMXAPI',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://www.dmxapi.cn',
|
||||
models: SYSTEM_MODELS.dmxapi,
|
||||
@ -128,7 +128,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'o3',
|
||||
name: 'O3',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.o3.fan',
|
||||
models: SYSTEM_MODELS.o3,
|
||||
@ -138,7 +138,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'ollama',
|
||||
name: 'Ollama',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'http://localhost:11434',
|
||||
models: SYSTEM_MODELS.ollama,
|
||||
@ -148,7 +148,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'lmstudio',
|
||||
name: 'LM Studio',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'http://localhost:1234',
|
||||
models: SYSTEM_MODELS.lmstudio,
|
||||
@ -178,7 +178,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'azure-openai',
|
||||
name: 'Azure OpenAI',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: '',
|
||||
apiVersion: '',
|
||||
@ -199,7 +199,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'zhipu',
|
||||
name: 'ZhiPu',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://open.bigmodel.cn/api/paas/v4/',
|
||||
models: SYSTEM_MODELS.zhipu,
|
||||
@ -209,7 +209,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'github',
|
||||
name: 'Github Models',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://models.inference.ai.azure.com/',
|
||||
models: SYSTEM_MODELS.github,
|
||||
@ -219,7 +219,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'copilot',
|
||||
name: 'Github Copilot',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.githubcopilot.com/',
|
||||
models: SYSTEM_MODELS.copilot,
|
||||
@ -230,7 +230,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'yi',
|
||||
name: 'Yi',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.lingyiwanwu.com',
|
||||
models: SYSTEM_MODELS.yi,
|
||||
@ -240,7 +240,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'moonshot',
|
||||
name: 'Moonshot AI',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.moonshot.cn',
|
||||
models: SYSTEM_MODELS.moonshot,
|
||||
@ -250,7 +250,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'baichuan',
|
||||
name: 'BAICHUAN AI',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.baichuan-ai.com',
|
||||
models: SYSTEM_MODELS.baichuan,
|
||||
@ -260,7 +260,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'dashscope',
|
||||
name: 'Bailian',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://dashscope.aliyuncs.com/compatible-mode/v1/',
|
||||
models: SYSTEM_MODELS.bailian,
|
||||
@ -270,7 +270,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'stepfun',
|
||||
name: 'StepFun',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.stepfun.com',
|
||||
models: SYSTEM_MODELS.stepfun,
|
||||
@ -280,7 +280,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'doubao',
|
||||
name: 'doubao',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://ark.cn-beijing.volces.com/api/v3/',
|
||||
models: SYSTEM_MODELS.doubao,
|
||||
@ -290,7 +290,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'minimax',
|
||||
name: 'MiniMax',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.minimax.chat/v1/',
|
||||
models: SYSTEM_MODELS.minimax,
|
||||
@ -300,7 +300,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'groq',
|
||||
name: 'Groq',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.groq.com/openai',
|
||||
models: SYSTEM_MODELS.groq,
|
||||
@ -310,7 +310,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'together',
|
||||
name: 'Together',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.together.xyz',
|
||||
models: SYSTEM_MODELS.together,
|
||||
@ -320,7 +320,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'fireworks',
|
||||
name: 'Fireworks',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.fireworks.ai/inference',
|
||||
models: SYSTEM_MODELS.fireworks,
|
||||
@ -330,7 +330,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'zhinao',
|
||||
name: 'zhinao',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.360.cn',
|
||||
models: SYSTEM_MODELS.zhinao,
|
||||
@ -340,7 +340,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'hunyuan',
|
||||
name: 'hunyuan',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.hunyuan.cloud.tencent.com',
|
||||
models: SYSTEM_MODELS.hunyuan,
|
||||
@ -350,7 +350,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'nvidia',
|
||||
name: 'nvidia',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://integrate.api.nvidia.com',
|
||||
models: SYSTEM_MODELS.nvidia,
|
||||
@ -360,7 +360,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'grok',
|
||||
name: 'Grok',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.x.ai',
|
||||
models: SYSTEM_MODELS.grok,
|
||||
@ -370,7 +370,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'hyperbolic',
|
||||
name: 'Hyperbolic',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.hyperbolic.xyz',
|
||||
models: SYSTEM_MODELS.hyperbolic,
|
||||
@ -380,7 +380,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'mistral',
|
||||
name: 'Mistral',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.mistral.ai',
|
||||
models: SYSTEM_MODELS.mistral,
|
||||
@ -390,7 +390,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'jina',
|
||||
name: 'Jina',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.jina.ai',
|
||||
models: SYSTEM_MODELS.jina,
|
||||
@ -400,7 +400,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'gitee-ai',
|
||||
name: 'gitee ai',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://ai.gitee.com',
|
||||
models: SYSTEM_MODELS['gitee-ai'],
|
||||
@ -410,7 +410,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'perplexity',
|
||||
name: 'Perplexity',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.perplexity.ai/',
|
||||
models: SYSTEM_MODELS.perplexity,
|
||||
@ -420,7 +420,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'modelscope',
|
||||
name: 'ModelScope',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api-inference.modelscope.cn/v1/',
|
||||
models: SYSTEM_MODELS.modelscope,
|
||||
@ -430,7 +430,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'xirang',
|
||||
name: 'Xirang',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://wishub-x1.ctyun.cn',
|
||||
models: SYSTEM_MODELS.xirang,
|
||||
@ -440,7 +440,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'tencent-cloud-ti',
|
||||
name: 'Tencent Cloud TI',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.lkeap.cloud.tencent.com',
|
||||
models: SYSTEM_MODELS['tencent-cloud-ti'],
|
||||
@ -450,7 +450,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'baidu-cloud',
|
||||
name: 'Baidu Cloud',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://qianfan.baidubce.com/v2/',
|
||||
models: SYSTEM_MODELS['baidu-cloud'],
|
||||
@ -460,7 +460,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'gpustack',
|
||||
name: 'GPUStack',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: '',
|
||||
models: SYSTEM_MODELS.gpustack,
|
||||
@ -470,7 +470,7 @@ export const INITIAL_PROVIDERS: Provider[] = [
|
||||
{
|
||||
id: 'voyageai',
|
||||
name: 'VoyageAI',
|
||||
type: 'openai',
|
||||
type: 'openai-compatible',
|
||||
apiKey: '',
|
||||
apiHost: 'https://api.voyageai.com',
|
||||
models: SYSTEM_MODELS.voyageai,
|
||||
|
||||
@ -98,6 +98,25 @@ const formatCitationsFromBlock = (block: CitationMessageBlock | undefined): Cita
|
||||
})) || []
|
||||
break
|
||||
case WebSearchSource.OPENAI:
|
||||
formattedCitations =
|
||||
(block.response.results as OpenAI.Responses.ResponseOutputText.URLCitation[])?.map((result, index) => {
|
||||
let hostname: string | undefined
|
||||
try {
|
||||
hostname = result.title ? undefined : new URL(result.url).hostname
|
||||
} catch {
|
||||
hostname = result.url
|
||||
}
|
||||
return {
|
||||
number: index + 1,
|
||||
url: result.url,
|
||||
title: result.title,
|
||||
hostname: hostname,
|
||||
showFavicon: true,
|
||||
type: 'websearch'
|
||||
}
|
||||
}) || []
|
||||
break
|
||||
case WebSearchSource.OPENAI_COMPATIBLE:
|
||||
formattedCitations =
|
||||
(block.response.results as OpenAI.Chat.Completions.ChatCompletionMessage.Annotation[])?.map((url, index) => {
|
||||
const urlCitation = url.url_citation
|
||||
|
||||
@ -1240,6 +1240,18 @@ const migrateConfig = {
|
||||
} catch (error) {
|
||||
return state
|
||||
}
|
||||
},
|
||||
'98': (state: RootState) => {
|
||||
try {
|
||||
state.llm.providers.forEach((provider) => {
|
||||
if (provider.type === 'openai' && provider.id !== 'openai') {
|
||||
provider.type = 'openai-compatible'
|
||||
}
|
||||
})
|
||||
return state
|
||||
} catch (error) {
|
||||
return state
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -157,7 +157,7 @@ export type Provider = {
|
||||
notes?: string
|
||||
}
|
||||
|
||||
export type ProviderType = 'openai' | 'anthropic' | 'gemini' | 'qwenlm' | 'azure-openai'
|
||||
export type ProviderType = 'openai' | 'openai-compatible' | 'anthropic' | 'gemini' | 'qwenlm' | 'azure-openai'
|
||||
|
||||
export type ModelType = 'text' | 'vision' | 'embedding' | 'reasoning' | 'function_calling' | 'web_search'
|
||||
|
||||
@ -448,11 +448,13 @@ export type WebSearchResults =
|
||||
| WebSearchProviderResponse
|
||||
| GroundingMetadata
|
||||
| OpenAI.Chat.Completions.ChatCompletionMessage.Annotation.URLCitation[]
|
||||
| OpenAI.Responses.ResponseOutputText.URLCitation[]
|
||||
| any[]
|
||||
|
||||
export enum WebSearchSource {
|
||||
WEBSEARCH = 'websearch',
|
||||
OPENAI = 'openai',
|
||||
OPENAI_COMPATIBLE = 'openai-compatible',
|
||||
OPENROUTER = 'openrouter',
|
||||
GEMINI = 'gemini',
|
||||
PERPLEXITY = 'perplexity',
|
||||
|
||||
@ -5,6 +5,7 @@ import store from '@renderer/store'
|
||||
import { MCPCallToolResponse, MCPServer, MCPTool, MCPToolResponse } from '@renderer/types'
|
||||
import type { MCPToolCompleteChunk, MCPToolInProgressChunk } from '@renderer/types/chunk'
|
||||
import { ChunkType } from '@renderer/types/chunk'
|
||||
import OpenAI from 'openai'
|
||||
import { ChatCompletionContentPart, ChatCompletionMessageParam, ChatCompletionMessageToolCall } from 'openai/resources'
|
||||
|
||||
import { CompletionsParams } from '../providers/AiProvider'
|
||||
@ -401,11 +402,11 @@ export async function parseAndCallTools(
|
||||
toolCallId: string,
|
||||
resp: MCPCallToolResponse,
|
||||
isVisionModel: boolean
|
||||
) => ChatCompletionMessageParam | MessageParam | Content,
|
||||
) => ChatCompletionMessageParam | MessageParam | Content | OpenAI.Responses.EasyInputMessage,
|
||||
mcpTools?: MCPTool[],
|
||||
isVisionModel: boolean = false
|
||||
): Promise<(ChatCompletionMessageParam | MessageParam | Content)[]> {
|
||||
const toolResults: (ChatCompletionMessageParam | MessageParam | Content)[] = []
|
||||
): Promise<(ChatCompletionMessageParam | MessageParam | Content | OpenAI.Responses.EasyInputMessage)[]> {
|
||||
const toolResults: (ChatCompletionMessageParam | MessageParam | Content | OpenAI.Responses.EasyInputMessage)[] = []
|
||||
// process tool use
|
||||
const tools = parseToolUse(content, mcpTools || [])
|
||||
if (!tools || tools.length === 0) {
|
||||
@ -448,7 +449,7 @@ export async function parseAndCallTools(
|
||||
return toolResults
|
||||
}
|
||||
|
||||
export function mcpToolCallResponseToOpenAIMessage(
|
||||
export function mcpToolCallResponseToOpenAICompatibleMessage(
|
||||
toolCallId: string,
|
||||
resp: MCPCallToolResponse,
|
||||
isVisionModel: boolean = false
|
||||
@ -515,6 +516,62 @@ export function mcpToolCallResponseToOpenAIMessage(
|
||||
return message
|
||||
}
|
||||
|
||||
export function mcpToolCallResponseToOpenAIMessage(
|
||||
toolCallId: string,
|
||||
resp: MCPCallToolResponse,
|
||||
isVisionModel: boolean = false
|
||||
): OpenAI.Responses.EasyInputMessage {
|
||||
const message = {
|
||||
role: 'user'
|
||||
} as OpenAI.Responses.EasyInputMessage
|
||||
|
||||
if (resp.isError) {
|
||||
message.content = JSON.stringify(resp.content)
|
||||
} else {
|
||||
const content: OpenAI.Responses.ResponseInputContent[] = [
|
||||
{
|
||||
type: 'input_text',
|
||||
text: `Here is the result of tool call ${toolCallId}:`
|
||||
}
|
||||
]
|
||||
|
||||
if (isVisionModel) {
|
||||
for (const item of resp.content) {
|
||||
switch (item.type) {
|
||||
case 'text':
|
||||
content.push({
|
||||
type: 'input_text',
|
||||
text: item.text || 'no content'
|
||||
})
|
||||
break
|
||||
case 'image':
|
||||
content.push({
|
||||
type: 'input_image',
|
||||
image_url: `data:${item.mimeType};base64,${item.data}`,
|
||||
detail: 'auto'
|
||||
})
|
||||
break
|
||||
default:
|
||||
content.push({
|
||||
type: 'input_text',
|
||||
text: `Unsupported type: ${item.type}`
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
content.push({
|
||||
type: 'input_text',
|
||||
text: JSON.stringify(resp.content)
|
||||
})
|
||||
}
|
||||
|
||||
message.content = content
|
||||
}
|
||||
|
||||
return message
|
||||
}
|
||||
|
||||
export function mcpToolCallResponseToAnthropicMessage(
|
||||
toolCallId: string,
|
||||
resp: MCPCallToolResponse,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user