mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-19 14:41:24 +08:00
Merge remote-tracking branch 'origin/main' into feat/proxy-api-server
This commit is contained in:
commit
f4a1eeed0e
@ -1,5 +1,5 @@
|
||||
diff --git a/dist/index.js b/dist/index.js
|
||||
index 51ce7e423934fb717cb90245cdfcdb3dae6780e6..0f7f7009e2f41a79a8669d38c8a44867bbff5e1f 100644
|
||||
index d004b415c5841a1969705823614f395265ea5a8a..6b1e0dad4610b0424393ecc12e9114723bbe316b 100644
|
||||
--- a/dist/index.js
|
||||
+++ b/dist/index.js
|
||||
@@ -474,7 +474,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
||||
@ -12,7 +12,7 @@ index 51ce7e423934fb717cb90245cdfcdb3dae6780e6..0f7f7009e2f41a79a8669d38c8a44867
|
||||
|
||||
// src/google-generative-ai-options.ts
|
||||
diff --git a/dist/index.mjs b/dist/index.mjs
|
||||
index f4b77e35c0cbfece85a3ef0d4f4e67aa6dde6271..8d2fecf8155a226006a0bde72b00b6036d4014b6 100644
|
||||
index 1780dd2391b7f42224a0b8048c723d2f81222c44..1f12ed14399d6902107ce9b435d7d8e6cc61e06b 100644
|
||||
--- a/dist/index.mjs
|
||||
+++ b/dist/index.mjs
|
||||
@@ -480,7 +480,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
|
||||
@ -24,3 +24,14 @@ index f4b77e35c0cbfece85a3ef0d4f4e67aa6dde6271..8d2fecf8155a226006a0bde72b00b603
|
||||
}
|
||||
|
||||
// src/google-generative-ai-options.ts
|
||||
@@ -1909,8 +1909,7 @@ function createGoogleGenerativeAI(options = {}) {
|
||||
}
|
||||
var google = createGoogleGenerativeAI();
|
||||
export {
|
||||
- VERSION,
|
||||
createGoogleGenerativeAI,
|
||||
- google
|
||||
+ google, VERSION
|
||||
};
|
||||
//# sourceMappingURL=index.mjs.map
|
||||
\ No newline at end of file
|
||||
@ -114,8 +114,8 @@
|
||||
"@ai-sdk/anthropic": "^2.0.49",
|
||||
"@ai-sdk/cerebras": "^1.0.31",
|
||||
"@ai-sdk/gateway": "^2.0.15",
|
||||
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.43#~/.yarn/patches/@ai-sdk-google-npm-2.0.43-689ed559b3.patch",
|
||||
"@ai-sdk/google-vertex": "^3.0.79",
|
||||
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.49#~/.yarn/patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch",
|
||||
"@ai-sdk/google-vertex": "^3.0.94",
|
||||
"@ai-sdk/huggingface": "^0.0.10",
|
||||
"@ai-sdk/mistral": "^2.0.24",
|
||||
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||
@ -416,7 +416,8 @@
|
||||
"@langchain/openai@npm:>=0.2.0 <0.7.0": "patch:@langchain/openai@npm%3A1.0.0#~/.yarn/patches/@langchain-openai-npm-1.0.0-474d0ad9d4.patch",
|
||||
"@ai-sdk/openai@npm:^2.0.42": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch",
|
||||
"@ai-sdk/google@npm:^2.0.40": "patch:@ai-sdk/google@npm%3A2.0.40#~/.yarn/patches/@ai-sdk-google-npm-2.0.40-47e0eeee83.patch",
|
||||
"@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch"
|
||||
"@ai-sdk/openai-compatible@npm:^1.0.27": "patch:@ai-sdk/openai-compatible@npm%3A1.0.27#~/.yarn/patches/@ai-sdk-openai-compatible-npm-1.0.27-06f74278cf.patch",
|
||||
"@ai-sdk/google@npm:2.0.49": "patch:@ai-sdk/google@npm%3A2.0.49#~/.yarn/patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch"
|
||||
},
|
||||
"packageManager": "yarn@4.9.1",
|
||||
"lint-staged": {
|
||||
|
||||
@ -69,7 +69,7 @@ export abstract class OpenAIBaseClient<
|
||||
const sdk = await this.getSdkInstance()
|
||||
const response = (await sdk.request({
|
||||
method: 'post',
|
||||
path: '/images/generations',
|
||||
path: '/v1/images/generations',
|
||||
signal,
|
||||
body: {
|
||||
model,
|
||||
|
||||
@ -36,7 +36,7 @@ import {
|
||||
} from '@renderer/config/models'
|
||||
import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||
import { getAssistantSettings, getProviderByModel } from '@renderer/services/AssistantService'
|
||||
import type { Assistant, Model } from '@renderer/types'
|
||||
import type { Assistant, Model, ReasoningEffortOption } from '@renderer/types'
|
||||
import { EFFORT_RATIO, isSystemProvider, SystemProviderIds } from '@renderer/types'
|
||||
import type { OpenAIReasoningSummary } from '@renderer/types/aiCoreTypes'
|
||||
import type { ReasoningEffortOptionalParams } from '@renderer/types/sdk'
|
||||
@ -539,20 +539,25 @@ export function getAnthropicReasoningParams(
|
||||
return {}
|
||||
}
|
||||
|
||||
// type GoogleThinkingLevel = NonNullable<GoogleGenerativeAIProviderOptions['thinkingConfig']>['thinkingLevel']
|
||||
type GoogleThinkingLevel = NonNullable<GoogleGenerativeAIProviderOptions['thinkingConfig']>['thinkingLevel']
|
||||
|
||||
// function mapToGeminiThinkingLevel(reasoningEffort: ReasoningEffortOption): GoogelThinkingLevel {
|
||||
// switch (reasoningEffort) {
|
||||
// case 'low':
|
||||
// return 'low'
|
||||
// case 'medium':
|
||||
// return 'medium'
|
||||
// case 'high':
|
||||
// return 'high'
|
||||
// default:
|
||||
// return 'medium'
|
||||
// }
|
||||
// }
|
||||
function mapToGeminiThinkingLevel(reasoningEffort: ReasoningEffortOption): GoogleThinkingLevel {
|
||||
switch (reasoningEffort) {
|
||||
case 'default':
|
||||
return undefined
|
||||
case 'minimal':
|
||||
return 'minimal'
|
||||
case 'low':
|
||||
return 'low'
|
||||
case 'medium':
|
||||
return 'medium'
|
||||
case 'high':
|
||||
return 'high'
|
||||
default:
|
||||
logger.warn('Unknown thinking level for Gemini. Fallback to medium instead.', { reasoningEffort })
|
||||
return 'medium'
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取 Gemini 推理参数
|
||||
@ -585,15 +590,15 @@ export function getGeminiReasoningParams(
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: 很多中转还不支持
|
||||
// https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#new_api_features_in_gemini_3
|
||||
// if (isGemini3ThinkingTokenModel(model)) {
|
||||
// return {
|
||||
// thinkingConfig: {
|
||||
// thinkingLevel: mapToGeminiThinkingLevel(reasoningEffort)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
if (isGemini3ThinkingTokenModel(model)) {
|
||||
return {
|
||||
thinkingConfig: {
|
||||
includeThoughts: true,
|
||||
thinkingLevel: mapToGeminiThinkingLevel(reasoningEffort)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const effortRatio = EFFORT_RATIO[reasoningEffort]
|
||||
|
||||
|
||||
@ -695,15 +695,20 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
||||
})
|
||||
|
||||
describe('Gemini models', () => {
|
||||
it('should return gemini for Flash models', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'gemini-2.5-flash-latest' }))).toBe('gemini')
|
||||
expect(getThinkModelType(createModel({ id: 'gemini-flash-latest' }))).toBe('gemini')
|
||||
expect(getThinkModelType(createModel({ id: 'gemini-flash-lite-latest' }))).toBe('gemini')
|
||||
it('should return gemini2_flash for Flash models', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'gemini-2.5-flash-latest' }))).toBe('gemini2_flash')
|
||||
})
|
||||
it('should return gemini3_flash for Gemini 3 Flash models', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'gemini-3-flash-preview' }))).toBe('gemini3_flash')
|
||||
expect(getThinkModelType(createModel({ id: 'gemini-flash-latest' }))).toBe('gemini3_flash')
|
||||
})
|
||||
|
||||
it('should return gemini_pro for Pro models', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'gemini-2.5-pro-latest' }))).toBe('gemini_pro')
|
||||
expect(getThinkModelType(createModel({ id: 'gemini-pro-latest' }))).toBe('gemini_pro')
|
||||
it('should return gemini2_pro for Gemini 2.5 Pro models', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'gemini-2.5-pro-latest' }))).toBe('gemini2_pro')
|
||||
})
|
||||
it('should return gemini3_pro for Gemini 3 Pro models', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'gemini-3-pro-preview' }))).toBe('gemini3_pro')
|
||||
expect(getThinkModelType(createModel({ id: 'gemini-pro-latest' }))).toBe('gemini3_pro')
|
||||
})
|
||||
})
|
||||
|
||||
@ -810,7 +815,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
||||
name: 'gemini-2.5-flash-latest'
|
||||
})
|
||||
)
|
||||
).toBe('gemini')
|
||||
).toBe('gemini2_flash')
|
||||
})
|
||||
|
||||
it('should use id result when id matches', () => {
|
||||
@ -835,7 +840,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
||||
|
||||
it('should handle case insensitivity correctly', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'GPT-5.1' }))).toBe('gpt5_1')
|
||||
expect(getThinkModelType(createModel({ id: 'Gemini-2.5-Flash-Latest' }))).toBe('gemini')
|
||||
expect(getThinkModelType(createModel({ id: 'Gemini-2.5-Flash-Latest' }))).toBe('gemini2_flash')
|
||||
expect(getThinkModelType(createModel({ id: 'DeepSeek-V3.1' }))).toBe('deepseek_hybrid')
|
||||
})
|
||||
|
||||
@ -855,7 +860,7 @@ describe('getThinkModelType - Comprehensive Coverage', () => {
|
||||
it('should handle models with version suffixes', () => {
|
||||
expect(getThinkModelType(createModel({ id: 'gpt-5-preview-2024' }))).toBe('gpt5')
|
||||
expect(getThinkModelType(createModel({ id: 'o3-mini-2024' }))).toBe('o')
|
||||
expect(getThinkModelType(createModel({ id: 'gemini-2.5-flash-latest-001' }))).toBe('gemini')
|
||||
expect(getThinkModelType(createModel({ id: 'gemini-2.5-flash-latest-001' }))).toBe('gemini2_flash')
|
||||
})
|
||||
|
||||
it('should prioritize GPT-5.1 over GPT-5 detection', () => {
|
||||
@ -955,6 +960,14 @@ describe('Gemini Models', () => {
|
||||
group: ''
|
||||
})
|
||||
).toBe(true)
|
||||
expect(
|
||||
isSupportedThinkingTokenGeminiModel({
|
||||
id: 'gemini-3-flash-preview',
|
||||
name: '',
|
||||
provider: '',
|
||||
group: ''
|
||||
})
|
||||
).toBe(true)
|
||||
expect(
|
||||
isSupportedThinkingTokenGeminiModel({
|
||||
id: 'google/gemini-3-pro-preview',
|
||||
@ -996,6 +1009,31 @@ describe('Gemini Models', () => {
|
||||
group: ''
|
||||
})
|
||||
).toBe(true)
|
||||
// Version with date suffixes
|
||||
expect(
|
||||
isSupportedThinkingTokenGeminiModel({
|
||||
id: 'gemini-3-flash-preview-09-2025',
|
||||
name: '',
|
||||
provider: '',
|
||||
group: ''
|
||||
})
|
||||
).toBe(true)
|
||||
expect(
|
||||
isSupportedThinkingTokenGeminiModel({
|
||||
id: 'gemini-3-pro-preview-09-2025',
|
||||
name: '',
|
||||
provider: '',
|
||||
group: ''
|
||||
})
|
||||
).toBe(true)
|
||||
expect(
|
||||
isSupportedThinkingTokenGeminiModel({
|
||||
id: 'gemini-3-flash-exp-1234',
|
||||
name: '',
|
||||
provider: '',
|
||||
group: ''
|
||||
})
|
||||
).toBe(true)
|
||||
// Version with decimals
|
||||
expect(
|
||||
isSupportedThinkingTokenGeminiModel({
|
||||
@ -1015,7 +1053,8 @@ describe('Gemini Models', () => {
|
||||
).toBe(true)
|
||||
})
|
||||
|
||||
it('should return true for gemini-3 image models', () => {
|
||||
it('should return true for gemini-3-pro-image models only', () => {
|
||||
// Only gemini-3-pro-image models should return true
|
||||
expect(
|
||||
isSupportedThinkingTokenGeminiModel({
|
||||
id: 'gemini-3-pro-image-preview',
|
||||
@ -1024,6 +1063,17 @@ describe('Gemini Models', () => {
|
||||
group: ''
|
||||
})
|
||||
).toBe(true)
|
||||
expect(
|
||||
isSupportedThinkingTokenGeminiModel({
|
||||
id: 'gemini-3-pro-image',
|
||||
name: '',
|
||||
provider: '',
|
||||
group: ''
|
||||
})
|
||||
).toBe(true)
|
||||
})
|
||||
|
||||
it('should return false for other gemini-3 image models', () => {
|
||||
expect(
|
||||
isSupportedThinkingTokenGeminiModel({
|
||||
id: 'gemini-3.0-flash-image-preview',
|
||||
@ -1086,6 +1136,22 @@ describe('Gemini Models', () => {
|
||||
group: ''
|
||||
})
|
||||
).toBe(false)
|
||||
expect(
|
||||
isSupportedThinkingTokenGeminiModel({
|
||||
id: 'gemini-3-flash-preview-tts',
|
||||
name: '',
|
||||
provider: '',
|
||||
group: ''
|
||||
})
|
||||
).toBe(false)
|
||||
expect(
|
||||
isSupportedThinkingTokenGeminiModel({
|
||||
id: 'gemini-3-pro-tts',
|
||||
name: '',
|
||||
provider: '',
|
||||
group: ''
|
||||
})
|
||||
).toBe(false)
|
||||
})
|
||||
|
||||
it('should return false for older gemini models', () => {
|
||||
@ -1811,7 +1877,7 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
describe('Gemini models', () => {
|
||||
it('should return correct options for Gemini Flash models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-2.5-flash-latest' }))).toEqual([
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-2.5-flash' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'low',
|
||||
@ -1819,36 +1885,46 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
'high',
|
||||
'auto'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-flash-latest' }))).toEqual([
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-flash-preview' }))).toEqual([
|
||||
'default',
|
||||
'none',
|
||||
'minimal',
|
||||
'low',
|
||||
'medium',
|
||||
'high',
|
||||
'auto'
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-flash-latest' }))).toEqual([
|
||||
'default',
|
||||
'minimal',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for Gemini Pro models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-2.5-pro-latest' }))).toEqual([
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-2.5-pro' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high',
|
||||
'auto'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-pro-preview' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'high'
|
||||
])
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-pro-latest' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high',
|
||||
'auto'
|
||||
'high'
|
||||
])
|
||||
})
|
||||
|
||||
it('should return correct options for Gemini 3 models', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-flash' }))).toEqual([
|
||||
'default',
|
||||
'minimal',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
@ -1856,7 +1932,6 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
expect(getModelSupportedReasoningEffortOptions(createModel({ id: 'gemini-3-pro-preview' }))).toEqual([
|
||||
'default',
|
||||
'low',
|
||||
'medium',
|
||||
'high'
|
||||
])
|
||||
})
|
||||
@ -2078,7 +2153,7 @@ describe('getModelSupportedReasoningEffortOptions', () => {
|
||||
|
||||
const geminiModel = createModel({ id: 'gemini-2.5-flash-latest' })
|
||||
const geminiResult = getModelSupportedReasoningEffortOptions(geminiModel)
|
||||
expect(geminiResult).toEqual(MODEL_SUPPORTED_OPTIONS.gemini)
|
||||
expect(geminiResult).toEqual(MODEL_SUPPORTED_OPTIONS.gemini2_flash)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@ -21,6 +21,8 @@ import {
|
||||
getModelSupportedVerbosity,
|
||||
groupQwenModels,
|
||||
isAnthropicModel,
|
||||
isGemini3FlashModel,
|
||||
isGemini3ProModel,
|
||||
isGeminiModel,
|
||||
isGemmaModel,
|
||||
isGenerateImageModels,
|
||||
@ -462,6 +464,101 @@ describe('model utils', () => {
|
||||
})
|
||||
})
|
||||
|
||||
describe('isGemini3FlashModel', () => {
|
||||
it('detects gemini-3-flash model', () => {
|
||||
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-flash' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('detects gemini-3-flash-preview model', () => {
|
||||
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-flash-preview' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('detects gemini-3-flash with version suffixes', () => {
|
||||
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-flash-latest' }))).toBe(true)
|
||||
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-flash-preview-09-2025' }))).toBe(true)
|
||||
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-flash-exp-1234' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('detects gemini-flash-latest alias', () => {
|
||||
expect(isGemini3FlashModel(createModel({ id: 'gemini-flash-latest' }))).toBe(true)
|
||||
expect(isGemini3FlashModel(createModel({ id: 'Gemini-Flash-Latest' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('detects gemini-3-flash with uppercase', () => {
|
||||
expect(isGemini3FlashModel(createModel({ id: 'Gemini-3-Flash' }))).toBe(true)
|
||||
expect(isGemini3FlashModel(createModel({ id: 'GEMINI-3-FLASH-PREVIEW' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('excludes gemini-3-flash-image models', () => {
|
||||
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-flash-image-preview' }))).toBe(false)
|
||||
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-flash-image' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for non-flash gemini-3 models', () => {
|
||||
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-pro' }))).toBe(false)
|
||||
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-pro-preview' }))).toBe(false)
|
||||
expect(isGemini3FlashModel(createModel({ id: 'gemini-3-pro-image-preview' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for other gemini models', () => {
|
||||
expect(isGemini3FlashModel(createModel({ id: 'gemini-2-flash' }))).toBe(false)
|
||||
expect(isGemini3FlashModel(createModel({ id: 'gemini-2-flash-preview' }))).toBe(false)
|
||||
expect(isGemini3FlashModel(createModel({ id: 'gemini-2.5-flash-preview-09-2025' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for null/undefined models', () => {
|
||||
expect(isGemini3FlashModel(null)).toBe(false)
|
||||
expect(isGemini3FlashModel(undefined)).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('isGemini3ProModel', () => {
|
||||
it('detects gemini-3-pro model', () => {
|
||||
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('detects gemini-3-pro-preview model', () => {
|
||||
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro-preview' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('detects gemini-3-pro with version suffixes', () => {
|
||||
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro-latest' }))).toBe(true)
|
||||
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro-preview-09-2025' }))).toBe(true)
|
||||
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro-exp-1234' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('detects gemini-pro-latest alias', () => {
|
||||
expect(isGemini3ProModel(createModel({ id: 'gemini-pro-latest' }))).toBe(true)
|
||||
expect(isGemini3ProModel(createModel({ id: 'Gemini-Pro-Latest' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('detects gemini-3-pro with uppercase', () => {
|
||||
expect(isGemini3ProModel(createModel({ id: 'Gemini-3-Pro' }))).toBe(true)
|
||||
expect(isGemini3ProModel(createModel({ id: 'GEMINI-3-PRO-PREVIEW' }))).toBe(true)
|
||||
})
|
||||
|
||||
it('excludes gemini-3-pro-image models', () => {
|
||||
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro-image-preview' }))).toBe(false)
|
||||
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro-image' }))).toBe(false)
|
||||
expect(isGemini3ProModel(createModel({ id: 'gemini-3-pro-image-latest' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for non-pro gemini-3 models', () => {
|
||||
expect(isGemini3ProModel(createModel({ id: 'gemini-3-flash' }))).toBe(false)
|
||||
expect(isGemini3ProModel(createModel({ id: 'gemini-3-flash-preview' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for other gemini models', () => {
|
||||
expect(isGemini3ProModel(createModel({ id: 'gemini-2-pro' }))).toBe(false)
|
||||
expect(isGemini3ProModel(createModel({ id: 'gemini-2.5-pro-preview-09-2025' }))).toBe(false)
|
||||
})
|
||||
|
||||
it('returns false for null/undefined models', () => {
|
||||
expect(isGemini3ProModel(null)).toBe(false)
|
||||
expect(isGemini3ProModel(undefined)).toBe(false)
|
||||
})
|
||||
})
|
||||
|
||||
describe('isZhipuModel', () => {
|
||||
it('detects Zhipu models by provider', () => {
|
||||
expect(isZhipuModel(createModel({ provider: 'zhipu' }))).toBe(true)
|
||||
|
||||
@ -20,7 +20,7 @@ import {
|
||||
isOpenAIReasoningModel,
|
||||
isSupportedReasoningEffortOpenAIModel
|
||||
} from './openai'
|
||||
import { GEMINI_FLASH_MODEL_REGEX, isGemini3ThinkingTokenModel } from './utils'
|
||||
import { GEMINI_FLASH_MODEL_REGEX, isGemini3FlashModel, isGemini3ProModel } from './utils'
|
||||
import { isTextToImageModel } from './vision'
|
||||
|
||||
// Reasoning models
|
||||
@ -43,9 +43,10 @@ export const MODEL_SUPPORTED_REASONING_EFFORT = {
|
||||
gpt52pro: ['medium', 'high', 'xhigh'] as const,
|
||||
grok: ['low', 'high'] as const,
|
||||
grok4_fast: ['auto'] as const,
|
||||
gemini: ['low', 'medium', 'high', 'auto'] as const,
|
||||
gemini3: ['low', 'medium', 'high'] as const,
|
||||
gemini_pro: ['low', 'medium', 'high', 'auto'] as const,
|
||||
gemini2_flash: ['low', 'medium', 'high', 'auto'] as const,
|
||||
gemini2_pro: ['low', 'medium', 'high', 'auto'] as const,
|
||||
gemini3_flash: ['minimal', 'low', 'medium', 'high'] as const,
|
||||
gemini3_pro: ['low', 'high'] as const,
|
||||
qwen: ['low', 'medium', 'high'] as const,
|
||||
qwen_thinking: ['low', 'medium', 'high'] as const,
|
||||
doubao: ['auto', 'high'] as const,
|
||||
@ -53,6 +54,7 @@ export const MODEL_SUPPORTED_REASONING_EFFORT = {
|
||||
doubao_after_251015: ['minimal', 'low', 'medium', 'high'] as const,
|
||||
hunyuan: ['auto'] as const,
|
||||
mimo: ['auto'] as const,
|
||||
mimo: ['auto'] as const,
|
||||
zhipu: ['auto'] as const,
|
||||
perplexity: ['low', 'medium', 'high'] as const,
|
||||
deepseek_hybrid: ['auto'] as const
|
||||
@ -73,9 +75,10 @@ export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
|
||||
gpt52pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gpt52pro] as const,
|
||||
grok: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.grok] as const,
|
||||
grok4_fast: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.grok4_fast] as const,
|
||||
gemini: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
|
||||
gemini_pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro] as const,
|
||||
gemini3: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini3] as const,
|
||||
gemini2_flash: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini2_flash] as const,
|
||||
gemini2_pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini2_pro] as const,
|
||||
gemini3_flash: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini3_flash] as const,
|
||||
gemini3_pro: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini3_pro] as const,
|
||||
qwen: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen] as const,
|
||||
qwen_thinking: ['default', ...MODEL_SUPPORTED_REASONING_EFFORT.qwen_thinking] as const,
|
||||
doubao: ['default', 'none', ...MODEL_SUPPORTED_REASONING_EFFORT.doubao] as const,
|
||||
@ -102,8 +105,7 @@ const _getThinkModelType = (model: Model): ThinkingModelType => {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
if (isOpenAIDeepResearchModel(model)) {
|
||||
return 'openai_deep_research'
|
||||
}
|
||||
if (isGPT51SeriesModel(model)) {
|
||||
} else if (isGPT51SeriesModel(model)) {
|
||||
if (modelId.includes('codex')) {
|
||||
thinkingModelType = 'gpt5_1_codex'
|
||||
if (isGPT51CodexMaxModel(model)) {
|
||||
@ -131,16 +133,18 @@ const _getThinkModelType = (model: Model): ThinkingModelType => {
|
||||
} else if (isGrok4FastReasoningModel(model)) {
|
||||
thinkingModelType = 'grok4_fast'
|
||||
} else if (isSupportedThinkingTokenGeminiModel(model)) {
|
||||
if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
|
||||
thinkingModelType = 'gemini'
|
||||
if (isGemini3FlashModel(model)) {
|
||||
thinkingModelType = 'gemini3_flash'
|
||||
} else if (isGemini3ProModel(model)) {
|
||||
thinkingModelType = 'gemini3_pro'
|
||||
} else if (GEMINI_FLASH_MODEL_REGEX.test(model.id)) {
|
||||
thinkingModelType = 'gemini2_flash'
|
||||
} else {
|
||||
thinkingModelType = 'gemini_pro'
|
||||
thinkingModelType = 'gemini2_pro'
|
||||
}
|
||||
if (isGemini3ThinkingTokenModel(model)) {
|
||||
thinkingModelType = 'gemini3'
|
||||
}
|
||||
} else if (isSupportedReasoningEffortGrokModel(model)) thinkingModelType = 'grok'
|
||||
else if (isSupportedThinkingTokenQwenModel(model)) {
|
||||
} else if (isSupportedReasoningEffortGrokModel(model)) {
|
||||
thinkingModelType = 'grok'
|
||||
} else if (isSupportedThinkingTokenQwenModel(model)) {
|
||||
if (isQwenAlwaysThinkModel(model)) {
|
||||
thinkingModelType = 'qwen_thinking'
|
||||
}
|
||||
@ -153,11 +157,17 @@ const _getThinkModelType = (model: Model): ThinkingModelType => {
|
||||
} else {
|
||||
thinkingModelType = 'doubao_no_auto'
|
||||
}
|
||||
} else if (isSupportedThinkingTokenHunyuanModel(model)) thinkingModelType = 'hunyuan'
|
||||
else if (isSupportedReasoningEffortPerplexityModel(model)) thinkingModelType = 'perplexity'
|
||||
else if (isSupportedThinkingTokenZhipuModel(model)) thinkingModelType = 'zhipu'
|
||||
else if (isDeepSeekHybridInferenceModel(model)) thinkingModelType = 'deepseek_hybrid'
|
||||
else if (isSupportedThinkingTokenMiMoModel(model)) thinkingModelType = 'mimo'
|
||||
} else if (isSupportedThinkingTokenHunyuanModel(model)) {
|
||||
thinkingModelType = 'hunyuan'
|
||||
} else if (isSupportedReasoningEffortPerplexityModel(model)) {
|
||||
thinkingModelType = 'perplexity'
|
||||
} else if (isSupportedThinkingTokenZhipuModel(model)) {
|
||||
thinkingModelType = 'zhipu'
|
||||
} else if (isDeepSeekHybridInferenceModel(model)) {
|
||||
thinkingModelType = 'deepseek_hybrid'
|
||||
} else if (isSupportedThinkingTokenMiMoModel(model)) {
|
||||
thinkingModelType = 'mimo'
|
||||
}
|
||||
return thinkingModelType
|
||||
}
|
||||
|
||||
|
||||
@ -282,3 +282,43 @@ export const isGemini3ThinkingTokenModel = (model: Model) => {
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
return isGemini3Model(model) && !modelId.includes('image')
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the model is a Gemini 3 Flash model
|
||||
* Matches: gemini-3-flash, gemini-3-flash-preview, gemini-3-flash-preview-09-2025, gemini-flash-latest (alias)
|
||||
* Excludes: gemini-3-flash-image-preview
|
||||
* @param model - The model to check
|
||||
* @returns true if the model is a Gemini 3 Flash model
|
||||
*/
|
||||
export const isGemini3FlashModel = (model: Model | undefined | null): boolean => {
|
||||
if (!model) {
|
||||
return false
|
||||
}
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
// Check for gemini-flash-latest alias (currently points to gemini-3-flash, may change in future)
|
||||
if (modelId === 'gemini-flash-latest') {
|
||||
return true
|
||||
}
|
||||
// Check for gemini-3-flash with optional suffixes, excluding image variants
|
||||
return /gemini-3-flash(?!-image)(?:-[\w-]+)*$/i.test(modelId)
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the model is a Gemini 3 Pro model
|
||||
* Matches: gemini-3-pro, gemini-3-pro-preview, gemini-3-pro-preview-09-2025, gemini-pro-latest (alias)
|
||||
* Excludes: gemini-3-pro-image-preview
|
||||
* @param model - The model to check
|
||||
* @returns true if the model is a Gemini 3 Pro model
|
||||
*/
|
||||
export const isGemini3ProModel = (model: Model | undefined | null): boolean => {
|
||||
if (!model) {
|
||||
return false
|
||||
}
|
||||
const modelId = getLowerBaseModelName(model.id)
|
||||
// Check for gemini-pro-latest alias (currently points to gemini-3-pro, may change in future)
|
||||
if (modelId === 'gemini-pro-latest') {
|
||||
return true
|
||||
}
|
||||
// Check for gemini-3-pro with optional suffixes, excluding image variants
|
||||
return /gemini-3-pro(?!-image)(?:-[\w-]+)*$/i.test(modelId)
|
||||
}
|
||||
|
||||
@ -560,7 +560,7 @@
|
||||
"medium": "斟酌",
|
||||
"medium_description": "中强度推理",
|
||||
"minimal": "微念",
|
||||
"minimal_description": "最小程度的思考",
|
||||
"minimal_description": "最小程度的推理",
|
||||
"off": "关闭",
|
||||
"off_description": "禁用推理",
|
||||
"xhigh": "穷究",
|
||||
|
||||
@ -95,9 +95,10 @@ const ThinkModelTypes = [
|
||||
'gpt52pro',
|
||||
'grok',
|
||||
'grok4_fast',
|
||||
'gemini',
|
||||
'gemini_pro',
|
||||
'gemini3',
|
||||
'gemini2_flash',
|
||||
'gemini2_pro',
|
||||
'gemini3_flash',
|
||||
'gemini3_pro',
|
||||
'qwen',
|
||||
'qwen_thinking',
|
||||
'doubao',
|
||||
|
||||
111
yarn.lock
111
yarn.lock
@ -102,6 +102,18 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/anthropic@npm:2.0.56":
|
||||
version: 2.0.56
|
||||
resolution: "@ai-sdk/anthropic@npm:2.0.56"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.19"
|
||||
peerDependencies:
|
||||
zod: ^3.25.76 || ^4.1.8
|
||||
checksum: 10c0/f2b6029c92443f831a2d124420e805d057668003067b1f677a4292d02f27aa3ad533374ea996d77ede7746a42c46fb94a8f2d8c0e7758a4555ea18c8b532052c
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/azure@npm:^2.0.87":
|
||||
version: 2.0.87
|
||||
resolution: "@ai-sdk/azure@npm:2.0.87"
|
||||
@ -166,42 +178,42 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/google-vertex@npm:^3.0.79":
|
||||
version: 3.0.79
|
||||
resolution: "@ai-sdk/google-vertex@npm:3.0.79"
|
||||
"@ai-sdk/google-vertex@npm:^3.0.94":
|
||||
version: 3.0.94
|
||||
resolution: "@ai-sdk/google-vertex@npm:3.0.94"
|
||||
dependencies:
|
||||
"@ai-sdk/anthropic": "npm:2.0.49"
|
||||
"@ai-sdk/google": "npm:2.0.43"
|
||||
"@ai-sdk/anthropic": "npm:2.0.56"
|
||||
"@ai-sdk/google": "npm:2.0.49"
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.17"
|
||||
google-auth-library: "npm:^9.15.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.19"
|
||||
google-auth-library: "npm:^10.5.0"
|
||||
peerDependencies:
|
||||
zod: ^3.25.76 || ^4.1.8
|
||||
checksum: 10c0/a86949b8d4a855409acdf7dc8d93ad9ea8ccf2bc3849acbe1ecbe4d6d66f06bcb5242f0df8eea24214e78732618b71ec8a019cbbeab16366f9ad3c860c5d8d30
|
||||
checksum: 10c0/68e2ee9e6525a5e43f90304980e64bf2a4227fd3ce74a7bf17e5ace094ea1bca8f8f18a8cc332a492fee4b912568a768f7479a4eed8148b84e7de1adf4104ad0
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/google@npm:2.0.43":
|
||||
version: 2.0.43
|
||||
resolution: "@ai-sdk/google@npm:2.0.43"
|
||||
"@ai-sdk/google@npm:2.0.49":
|
||||
version: 2.0.49
|
||||
resolution: "@ai-sdk/google@npm:2.0.49"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.17"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.19"
|
||||
peerDependencies:
|
||||
zod: ^3.25.76 || ^4.1.8
|
||||
checksum: 10c0/5a421a9746cf8cbdf3bb7fb49426453a4fe0e354ea55a0123e628afb7acf9bb19959d512c0f8e6d7dbefbfa7e1cef4502fc146149007258a8eeb57743ac5e9e5
|
||||
checksum: 10c0/f3f8acfcd956edc7d807d22963d5eff0f765418f1f2c7d18615955ccdfcebb4d43cc26ce1f712c6a53572f1d8becc0773311b77b1f1bf1af87d675c5f017d5a4
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.43#~/.yarn/patches/@ai-sdk-google-npm-2.0.43-689ed559b3.patch":
|
||||
version: 2.0.43
|
||||
resolution: "@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.43#~/.yarn/patches/@ai-sdk-google-npm-2.0.43-689ed559b3.patch::version=2.0.43&hash=4dde1e"
|
||||
"@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.49#~/.yarn/patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch":
|
||||
version: 2.0.49
|
||||
resolution: "@ai-sdk/google@patch:@ai-sdk/google@npm%3A2.0.49#~/.yarn/patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch::version=2.0.49&hash=406c25"
|
||||
dependencies:
|
||||
"@ai-sdk/provider": "npm:2.0.0"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.17"
|
||||
"@ai-sdk/provider-utils": "npm:3.0.19"
|
||||
peerDependencies:
|
||||
zod: ^3.25.76 || ^4.1.8
|
||||
checksum: 10c0/4cfd17e9c47f2b742d8a0b1ca3532b4dc48753088363b74b01a042f63652174fa9a3fbf655a23f823974c673121dffbd2d192bb0c1bf158da4e2bf498fc76527
|
||||
checksum: 10c0/8d4d881583c2301dce8a4e3066af2ba7d99b30520b6219811f90271c93bf8a07dc23e752fa25ffd0e72c6ec56e97d40d32e04072a362accf7d01a745a2d2a352
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
@ -10051,8 +10063,8 @@ __metadata:
|
||||
"@ai-sdk/anthropic": "npm:^2.0.49"
|
||||
"@ai-sdk/cerebras": "npm:^1.0.31"
|
||||
"@ai-sdk/gateway": "npm:^2.0.15"
|
||||
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.43#~/.yarn/patches/@ai-sdk-google-npm-2.0.43-689ed559b3.patch"
|
||||
"@ai-sdk/google-vertex": "npm:^3.0.79"
|
||||
"@ai-sdk/google": "patch:@ai-sdk/google@npm%3A2.0.49#~/.yarn/patches/@ai-sdk-google-npm-2.0.49-84720f41bd.patch"
|
||||
"@ai-sdk/google-vertex": "npm:^3.0.94"
|
||||
"@ai-sdk/huggingface": "npm:^0.0.10"
|
||||
"@ai-sdk/mistral": "npm:^2.0.24"
|
||||
"@ai-sdk/openai": "patch:@ai-sdk/openai@npm%3A2.0.85#~/.yarn/patches/@ai-sdk-openai-npm-2.0.85-27483d1d6a.patch"
|
||||
@ -15499,6 +15511,18 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"gaxios@npm:^7.0.0":
|
||||
version: 7.1.3
|
||||
resolution: "gaxios@npm:7.1.3"
|
||||
dependencies:
|
||||
extend: "npm:^3.0.2"
|
||||
https-proxy-agent: "npm:^7.0.1"
|
||||
node-fetch: "npm:^3.3.2"
|
||||
rimraf: "npm:^5.0.1"
|
||||
checksum: 10c0/a4a1cdf9a392c0c22e9734a40dca5a77a2903f505b939a50f1e68e312458b1289b7993d2f72d011426e89657cae77a3aa9fc62fb140e8ba90a1faa31fdbde4d2
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"gcp-metadata@npm:^6.1.0":
|
||||
version: 6.1.1
|
||||
resolution: "gcp-metadata@npm:6.1.1"
|
||||
@ -15510,6 +15534,17 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"gcp-metadata@npm:^8.0.0":
|
||||
version: 8.1.2
|
||||
resolution: "gcp-metadata@npm:8.1.2"
|
||||
dependencies:
|
||||
gaxios: "npm:^7.0.0"
|
||||
google-logging-utils: "npm:^1.0.0"
|
||||
json-bigint: "npm:^1.0.0"
|
||||
checksum: 10c0/15a61231a9410dc11c2828d2c9fdc8b0a939f1af746195c44edc6f2ffea0acab52cef3a7b9828069a36fd5d68bda730f7328a415fe42a01258f6e249dfba6908
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"gensync@npm:^1.0.0-beta.2":
|
||||
version: 1.0.0-beta.2
|
||||
resolution: "gensync@npm:1.0.0-beta.2"
|
||||
@ -15733,7 +15768,22 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"google-auth-library@npm:^9.14.2, google-auth-library@npm:^9.15.0, google-auth-library@npm:^9.15.1, google-auth-library@npm:^9.4.2":
|
||||
"google-auth-library@npm:^10.5.0":
|
||||
version: 10.5.0
|
||||
resolution: "google-auth-library@npm:10.5.0"
|
||||
dependencies:
|
||||
base64-js: "npm:^1.3.0"
|
||||
ecdsa-sig-formatter: "npm:^1.0.11"
|
||||
gaxios: "npm:^7.0.0"
|
||||
gcp-metadata: "npm:^8.0.0"
|
||||
google-logging-utils: "npm:^1.0.0"
|
||||
gtoken: "npm:^8.0.0"
|
||||
jws: "npm:^4.0.0"
|
||||
checksum: 10c0/49d3931d20b1f4a4d075216bf5518e2b3396dcf441a8f1952611cf3b6080afb1261c3d32009609047ee4a1cc545269a74b4957e6bba9cce840581df309c4b145
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"google-auth-library@npm:^9.14.2, google-auth-library@npm:^9.15.1, google-auth-library@npm:^9.4.2":
|
||||
version: 9.15.1
|
||||
resolution: "google-auth-library@npm:9.15.1"
|
||||
dependencies:
|
||||
@ -15754,6 +15804,13 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"google-logging-utils@npm:^1.0.0":
|
||||
version: 1.1.3
|
||||
resolution: "google-logging-utils@npm:1.1.3"
|
||||
checksum: 10c0/e65201c7e96543bd1423b9324013736646b9eed60941e0bfa47b9bfd146d2f09cf3df1c99ca60b7d80a726075263ead049ee72de53372cb8458c3bc55c2c1e59
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"gopd@npm:^1.0.1, gopd@npm:^1.2.0":
|
||||
version: 1.2.0
|
||||
resolution: "gopd@npm:1.2.0"
|
||||
@ -15842,6 +15899,16 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"gtoken@npm:^8.0.0":
|
||||
version: 8.0.0
|
||||
resolution: "gtoken@npm:8.0.0"
|
||||
dependencies:
|
||||
gaxios: "npm:^7.0.0"
|
||||
jws: "npm:^4.0.0"
|
||||
checksum: 10c0/058538e5bbe081d30ada5f1fd34d3a8194357c2e6ecbf7c8a98daeefbf13f7e06c15649c7dace6a1d4cc3bc6dc5483bd484d6d7adc5852021896d7c05c439f37
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"hachure-fill@npm:^0.5.2":
|
||||
version: 0.5.2
|
||||
resolution: "hachure-fill@npm:0.5.2"
|
||||
@ -22778,7 +22845,7 @@ __metadata:
|
||||
languageName: node
|
||||
linkType: hard
|
||||
|
||||
"rimraf@npm:^5.0.10":
|
||||
"rimraf@npm:^5.0.1, rimraf@npm:^5.0.10":
|
||||
version: 5.0.10
|
||||
resolution: "rimraf@npm:5.0.10"
|
||||
dependencies:
|
||||
|
||||
Loading…
Reference in New Issue
Block a user