mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2026-01-07 05:39:05 +08:00
fix(openrouter): support GPT-5.1/5.2 reasoning effort 'none' for OpenRouter and improve error handling (#12088)
This commit is contained in:
parent
89a6d817f1
commit
d9171e0596
@ -14,7 +14,6 @@ import {
|
|||||||
isDoubaoSeedAfter251015,
|
isDoubaoSeedAfter251015,
|
||||||
isDoubaoThinkingAutoModel,
|
isDoubaoThinkingAutoModel,
|
||||||
isGemini3ThinkingTokenModel,
|
isGemini3ThinkingTokenModel,
|
||||||
isGPT51SeriesModel,
|
|
||||||
isGrok4FastReasoningModel,
|
isGrok4FastReasoningModel,
|
||||||
isOpenAIDeepResearchModel,
|
isOpenAIDeepResearchModel,
|
||||||
isOpenAIModel,
|
isOpenAIModel,
|
||||||
@ -32,7 +31,8 @@ import {
|
|||||||
isSupportedThinkingTokenMiMoModel,
|
isSupportedThinkingTokenMiMoModel,
|
||||||
isSupportedThinkingTokenModel,
|
isSupportedThinkingTokenModel,
|
||||||
isSupportedThinkingTokenQwenModel,
|
isSupportedThinkingTokenQwenModel,
|
||||||
isSupportedThinkingTokenZhipuModel
|
isSupportedThinkingTokenZhipuModel,
|
||||||
|
isSupportNoneReasoningEffortModel
|
||||||
} from '@renderer/config/models'
|
} from '@renderer/config/models'
|
||||||
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||||
import { getAssistantSettings, getProviderByModel } from '@renderer/services/AssistantService'
|
import { getAssistantSettings, getProviderByModel } from '@renderer/services/AssistantService'
|
||||||
@ -74,9 +74,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
|||||||
if (reasoningEffort === 'none') {
|
if (reasoningEffort === 'none') {
|
||||||
// openrouter: use reasoning
|
// openrouter: use reasoning
|
||||||
if (model.provider === SystemProviderIds.openrouter) {
|
if (model.provider === SystemProviderIds.openrouter) {
|
||||||
// 'none' is not an available value for effort for now.
|
if (isSupportNoneReasoningEffortModel(model) && reasoningEffort === 'none') {
|
||||||
// I think they should resolve this issue soon, so I'll just go ahead and use this value.
|
|
||||||
if (isGPT51SeriesModel(model) && reasoningEffort === 'none') {
|
|
||||||
return { reasoning: { effort: 'none' } }
|
return { reasoning: { effort: 'none' } }
|
||||||
}
|
}
|
||||||
return { reasoning: { enabled: false, exclude: true } }
|
return { reasoning: { enabled: false, exclude: true } }
|
||||||
@ -120,8 +118,8 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
|
|||||||
return { thinking: { type: 'disabled' } }
|
return { thinking: { type: 'disabled' } }
|
||||||
}
|
}
|
||||||
|
|
||||||
// Specially for GPT-5.1. Suppose this is a OpenAI Compatible provider
|
// GPT 5.1, GPT 5.2, or newer
|
||||||
if (isGPT51SeriesModel(model)) {
|
if (isSupportNoneReasoningEffortModel(model)) {
|
||||||
return {
|
return {
|
||||||
reasoningEffort: 'none'
|
reasoningEffort: 'none'
|
||||||
}
|
}
|
||||||
|
|||||||
139
src/renderer/src/config/models/__tests__/openai.test.ts
Normal file
139
src/renderer/src/config/models/__tests__/openai.test.ts
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
import type { Model } from '@renderer/types'
|
||||||
|
import { describe, expect, it, vi } from 'vitest'
|
||||||
|
|
||||||
|
import { isSupportNoneReasoningEffortModel } from '../openai'
|
||||||
|
|
||||||
|
// Mock store and settings to avoid initialization issues
|
||||||
|
vi.mock('@renderer/store', () => ({
|
||||||
|
__esModule: true,
|
||||||
|
default: {
|
||||||
|
getState: () => ({
|
||||||
|
llm: { providers: [] },
|
||||||
|
settings: {}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
|
||||||
|
vi.mock('@renderer/hooks/useStore', () => ({
|
||||||
|
getStoreProviders: vi.fn(() => [])
|
||||||
|
}))
|
||||||
|
|
||||||
|
const createModel = (overrides: Partial<Model> = {}): Model => ({
|
||||||
|
id: 'gpt-4o',
|
||||||
|
name: 'gpt-4o',
|
||||||
|
provider: 'openai',
|
||||||
|
group: 'OpenAI',
|
||||||
|
...overrides
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('OpenAI Model Detection', () => {
|
||||||
|
describe('isSupportNoneReasoningEffortModel', () => {
|
||||||
|
describe('should return true for GPT-5.1 and GPT-5.2 reasoning models', () => {
|
||||||
|
it('returns true for GPT-5.1 base model', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1' }))).toBe(true)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.1' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns true for GPT-5.1 mini model', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-mini' }))).toBe(true)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-mini-preview' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns true for GPT-5.1 preview model', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-preview' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns true for GPT-5.2 base model', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2' }))).toBe(true)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.2' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns true for GPT-5.2 mini model', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2-mini' }))).toBe(true)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2-mini-preview' }))).toBe(true)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns true for GPT-5.2 preview model', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2-preview' }))).toBe(true)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('should return false for pro variants', () => {
|
||||||
|
it('returns false for GPT-5.1-pro models', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-pro' }))).toBe(false)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.1-Pro' }))).toBe(false)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-pro-preview' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for GPT-5.2-pro models', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2-pro' }))).toBe(false)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.2-Pro' }))).toBe(false)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2-pro-preview' }))).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('should return false for chat variants', () => {
|
||||||
|
it('returns false for GPT-5.1-chat models', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-chat' }))).toBe(false)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.1-Chat' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for GPT-5.2-chat models', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2-chat' }))).toBe(false)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.2-Chat' }))).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('should return false for GPT-5 series (non-5.1/5.2)', () => {
|
||||||
|
it('returns false for GPT-5 base model', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for GPT-5 pro model', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5-pro' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for GPT-5 preview model', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5-preview' }))).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('should return false for other OpenAI models', () => {
|
||||||
|
it('returns false for GPT-4 models', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-4o' }))).toBe(false)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-4-turbo' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for o1 models', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'o1' }))).toBe(false)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'o1-mini' }))).toBe(false)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'o1-preview' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('returns false for o3 models', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'o3' }))).toBe(false)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'o3-mini' }))).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
describe('edge cases', () => {
|
||||||
|
it('handles models with version suffixes', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-2025-01-01' }))).toBe(true)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.2-latest' }))).toBe(true)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'gpt-5.1-pro-2025-01-01' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles models with OpenRouter prefixes', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'openai/gpt-5.1' }))).toBe(true)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'openai/gpt-5.2-mini' }))).toBe(true)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'openai/gpt-5.1-pro' }))).toBe(false)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'openai/gpt-5.1-chat' }))).toBe(false)
|
||||||
|
})
|
||||||
|
|
||||||
|
it('handles mixed case with chat and pro', () => {
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.1-CHAT' }))).toBe(false)
|
||||||
|
expect(isSupportNoneReasoningEffortModel(createModel({ id: 'GPT-5.2-PRO' }))).toBe(false)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
@ -77,6 +77,34 @@ export function isSupportVerbosityModel(model: Model): boolean {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determines if a model supports the "none" reasoning effort parameter.
|
||||||
|
*
|
||||||
|
* This applies to GPT-5.1 and GPT-5.2 series reasoning models (non-chat, non-pro variants).
|
||||||
|
* These models allow setting reasoning_effort to "none" to skip reasoning steps.
|
||||||
|
*
|
||||||
|
* @param model - The model to check
|
||||||
|
* @returns true if the model supports "none" reasoning effort, false otherwise
|
||||||
|
*
|
||||||
|
* @example
|
||||||
|
* ```ts
|
||||||
|
* // Returns true
|
||||||
|
* isSupportNoneReasoningEffortModel({ id: 'gpt-5.1', provider: 'openai' })
|
||||||
|
* isSupportNoneReasoningEffortModel({ id: 'gpt-5.2-mini', provider: 'openai' })
|
||||||
|
*
|
||||||
|
* // Returns false
|
||||||
|
* isSupportNoneReasoningEffortModel({ id: 'gpt-5.1-pro', provider: 'openai' })
|
||||||
|
* isSupportNoneReasoningEffortModel({ id: 'gpt-5.1-chat', provider: 'openai' })
|
||||||
|
* isSupportNoneReasoningEffortModel({ id: 'gpt-5-pro', provider: 'openai' })
|
||||||
|
* ```
|
||||||
|
*/
|
||||||
|
export function isSupportNoneReasoningEffortModel(model: Model): boolean {
|
||||||
|
const modelId = getLowerBaseModelName(model.id)
|
||||||
|
return (
|
||||||
|
(isGPT51SeriesModel(model) || isGPT52SeriesModel(model)) && !modelId.includes('chat') && !modelId.includes('pro')
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
export function isOpenAIChatCompletionOnlyModel(model: Model): boolean {
|
export function isOpenAIChatCompletionOnlyModel(model: Model): boolean {
|
||||||
if (!model) {
|
if (!model) {
|
||||||
return false
|
return false
|
||||||
|
|||||||
@ -30,8 +30,7 @@ import {
|
|||||||
} from '@renderer/types'
|
} from '@renderer/types'
|
||||||
import { getFileExtension, isTextFile, runAsyncFunction, uuid } from '@renderer/utils'
|
import { getFileExtension, isTextFile, runAsyncFunction, uuid } from '@renderer/utils'
|
||||||
import { abortCompletion } from '@renderer/utils/abortController'
|
import { abortCompletion } from '@renderer/utils/abortController'
|
||||||
import { isAbortError } from '@renderer/utils/error'
|
import { formatErrorMessageWithPrefix, isAbortError } from '@renderer/utils/error'
|
||||||
import { formatErrorMessage } from '@renderer/utils/error'
|
|
||||||
import { getFilesFromDropEvent, getTextFromDropEvent } from '@renderer/utils/input'
|
import { getFilesFromDropEvent, getTextFromDropEvent } from '@renderer/utils/input'
|
||||||
import {
|
import {
|
||||||
createInputScrollHandler,
|
createInputScrollHandler,
|
||||||
@ -181,7 +180,7 @@ const TranslatePage: FC = () => {
|
|||||||
window.toast.info(t('translate.info.aborted'))
|
window.toast.info(t('translate.info.aborted'))
|
||||||
} else {
|
} else {
|
||||||
logger.error('Failed to translate text', e as Error)
|
logger.error('Failed to translate text', e as Error)
|
||||||
window.toast.error(t('translate.error.failed') + ': ' + formatErrorMessage(e))
|
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.error.failed')))
|
||||||
}
|
}
|
||||||
setTranslating(false)
|
setTranslating(false)
|
||||||
return
|
return
|
||||||
@ -202,11 +201,11 @@ const TranslatePage: FC = () => {
|
|||||||
await saveTranslateHistory(text, translated, actualSourceLanguage.langCode, actualTargetLanguage.langCode)
|
await saveTranslateHistory(text, translated, actualSourceLanguage.langCode, actualTargetLanguage.langCode)
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
logger.error('Failed to save translate history', e as Error)
|
logger.error('Failed to save translate history', e as Error)
|
||||||
window.toast.error(t('translate.history.error.save') + ': ' + formatErrorMessage(e))
|
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.history.error.save')))
|
||||||
}
|
}
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
logger.error('Failed to translate', e as Error)
|
logger.error('Failed to translate', e as Error)
|
||||||
window.toast.error(t('translate.error.unknown') + ': ' + formatErrorMessage(e))
|
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.error.unknown')))
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
[autoCopy, copy, dispatch, setTimeoutTimer, setTranslatedContent, setTranslating, t, translating]
|
[autoCopy, copy, dispatch, setTimeoutTimer, setTranslatedContent, setTranslating, t, translating]
|
||||||
@ -266,7 +265,7 @@ const TranslatePage: FC = () => {
|
|||||||
await translate(text, actualSourceLanguage, actualTargetLanguage)
|
await translate(text, actualSourceLanguage, actualTargetLanguage)
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error('Translation error:', error as Error)
|
logger.error('Translation error:', error as Error)
|
||||||
window.toast.error(t('translate.error.failed') + ': ' + formatErrorMessage(error))
|
window.toast.error(formatErrorMessageWithPrefix(error, t('translate.error.failed')))
|
||||||
return
|
return
|
||||||
} finally {
|
} finally {
|
||||||
setTranslating(false)
|
setTranslating(false)
|
||||||
@ -427,7 +426,7 @@ const TranslatePage: FC = () => {
|
|||||||
setAutoDetectionMethod(method)
|
setAutoDetectionMethod(method)
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
logger.error('Failed to update auto detection method setting.', e as Error)
|
logger.error('Failed to update auto detection method setting.', e as Error)
|
||||||
window.toast.error(t('translate.error.detect.update_setting') + formatErrorMessage(e))
|
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.error.detect.update_setting')))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -498,7 +497,7 @@ const TranslatePage: FC = () => {
|
|||||||
isText = await isTextFile(file.path)
|
isText = await isTextFile(file.path)
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
logger.error('Failed to check file type.', e as Error)
|
logger.error('Failed to check file type.', e as Error)
|
||||||
window.toast.error(t('translate.files.error.check_type') + ': ' + formatErrorMessage(e))
|
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.files.error.check_type')))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -530,11 +529,11 @@ const TranslatePage: FC = () => {
|
|||||||
setText(text + result)
|
setText(text + result)
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
logger.error('Failed to read file.', e as Error)
|
logger.error('Failed to read file.', e as Error)
|
||||||
window.toast.error(t('translate.files.error.unknown') + ': ' + formatErrorMessage(e))
|
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.files.error.unknown')))
|
||||||
}
|
}
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
logger.error('Failed to read file.', e as Error)
|
logger.error('Failed to read file.', e as Error)
|
||||||
window.toast.error(t('translate.files.error.unknown') + ': ' + formatErrorMessage(e))
|
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.files.error.unknown')))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
const promise = _readFile()
|
const promise = _readFile()
|
||||||
@ -578,7 +577,7 @@ const TranslatePage: FC = () => {
|
|||||||
await processFile(file)
|
await processFile(file)
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
logger.error('Unknown error when selecting file.', e as Error)
|
logger.error('Unknown error when selecting file.', e as Error)
|
||||||
window.toast.error(t('translate.files.error.unknown') + ': ' + formatErrorMessage(e))
|
window.toast.error(formatErrorMessageWithPrefix(e, t('translate.files.error.unknown')))
|
||||||
} finally {
|
} finally {
|
||||||
clearFiles()
|
clearFiles()
|
||||||
setIsProcessing(false)
|
setIsProcessing(false)
|
||||||
|
|||||||
@ -42,7 +42,7 @@ export const translateText = async (
|
|||||||
abortKey?: string,
|
abortKey?: string,
|
||||||
options?: TranslateOptions
|
options?: TranslateOptions
|
||||||
) => {
|
) => {
|
||||||
let abortError
|
let error
|
||||||
const assistantSettings: Partial<AssistantSettings> | undefined = options
|
const assistantSettings: Partial<AssistantSettings> | undefined = options
|
||||||
? { reasoning_effort: options?.reasoningEffort }
|
? { reasoning_effort: options?.reasoningEffort }
|
||||||
: undefined
|
: undefined
|
||||||
@ -58,8 +58,8 @@ export const translateText = async (
|
|||||||
} else if (chunk.type === ChunkType.TEXT_COMPLETE) {
|
} else if (chunk.type === ChunkType.TEXT_COMPLETE) {
|
||||||
completed = true
|
completed = true
|
||||||
} else if (chunk.type === ChunkType.ERROR) {
|
} else if (chunk.type === ChunkType.ERROR) {
|
||||||
|
error = chunk.error
|
||||||
if (isAbortError(chunk.error)) {
|
if (isAbortError(chunk.error)) {
|
||||||
abortError = chunk.error
|
|
||||||
completed = true
|
completed = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -84,8 +84,8 @@ export const translateText = async (
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (abortError) {
|
if (error !== undefined && !isAbortError(error)) {
|
||||||
throw abortError
|
throw error
|
||||||
}
|
}
|
||||||
|
|
||||||
const trimmedText = translatedText.trim()
|
const trimmedText = translatedText.trim()
|
||||||
|
|||||||
@ -1,3 +1,4 @@
|
|||||||
|
import { loggerService } from '@logger'
|
||||||
import type { McpError } from '@modelcontextprotocol/sdk/types.js'
|
import type { McpError } from '@modelcontextprotocol/sdk/types.js'
|
||||||
import type { AgentServerError } from '@renderer/types'
|
import type { AgentServerError } from '@renderer/types'
|
||||||
import { AgentServerErrorSchema } from '@renderer/types'
|
import { AgentServerErrorSchema } from '@renderer/types'
|
||||||
@ -20,7 +21,7 @@ import { ZodError } from 'zod'
|
|||||||
import { parseJSON } from './json'
|
import { parseJSON } from './json'
|
||||||
import { safeSerialize } from './serialize'
|
import { safeSerialize } from './serialize'
|
||||||
|
|
||||||
// const logger = loggerService.withContext('Utils:error')
|
const logger = loggerService.withContext('Utils:error')
|
||||||
|
|
||||||
export function getErrorDetails(err: any, seen = new WeakSet()): any {
|
export function getErrorDetails(err: any, seen = new WeakSet()): any {
|
||||||
// Handle circular references
|
// Handle circular references
|
||||||
@ -65,11 +66,16 @@ export function formatErrorMessage(error: unknown): string {
|
|||||||
delete detailedError?.stack
|
delete detailedError?.stack
|
||||||
delete detailedError?.request_id
|
delete detailedError?.request_id
|
||||||
|
|
||||||
const formattedJson = JSON.stringify(detailedError, null, 2)
|
if (detailedError) {
|
||||||
.split('\n')
|
const formattedJson = JSON.stringify(detailedError, null, 2)
|
||||||
.map((line) => ` ${line}`)
|
.split('\n')
|
||||||
.join('\n')
|
.map((line) => ` ${line}`)
|
||||||
return detailedError.message ? detailedError.message : `Error Details:\n${formattedJson}`
|
.join('\n')
|
||||||
|
return detailedError.message ? detailedError.message : `Error Details:\n${formattedJson}`
|
||||||
|
} else {
|
||||||
|
logger.warn('Get detailed error failed.')
|
||||||
|
return ''
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export function getErrorMessage(error: unknown): string {
|
export function getErrorMessage(error: unknown): string {
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user