mirror of
https://github.com/CherryHQ/cherry-studio.git
synced 2025-12-28 05:11:24 +08:00
chore: update dependencies and remove unused patches
- Updated various package versions in yarn.lock for improved compatibility and performance. - Removed obsolete patches for antd and openai, streamlining the dependency management. - Adjusted icon imports in Dropdown and useIcons to utilize Lucide icons for better visual consistency.
This commit is contained in:
parent
d05ed94702
commit
628919b562
69
.yarn/patches/antd-npm-5.24.7-356a553ae5.patch
vendored
69
.yarn/patches/antd-npm-5.24.7-356a553ae5.patch
vendored
@ -1,69 +0,0 @@
|
||||
diff --git a/es/dropdown/dropdown.js b/es/dropdown/dropdown.js
|
||||
index 986877a762b9ad0aca596a8552732cd12d2eaabb..1f18aa2ea745e68950e4cee16d4d655f5c835fd5 100644
|
||||
--- a/es/dropdown/dropdown.js
|
||||
+++ b/es/dropdown/dropdown.js
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
import * as React from 'react';
|
||||
import LeftOutlined from "@ant-design/icons/es/icons/LeftOutlined";
|
||||
-import RightOutlined from "@ant-design/icons/es/icons/RightOutlined";
|
||||
+import { ChevronRight } from 'lucide-react';
|
||||
import classNames from 'classnames';
|
||||
import RcDropdown from 'rc-dropdown';
|
||||
import useEvent from "rc-util/es/hooks/useEvent";
|
||||
@@ -158,8 +158,10 @@ const Dropdown = props => {
|
||||
className: `${prefixCls}-menu-submenu-arrow`
|
||||
}, direction === 'rtl' ? (/*#__PURE__*/React.createElement(LeftOutlined, {
|
||||
className: `${prefixCls}-menu-submenu-arrow-icon`
|
||||
- })) : (/*#__PURE__*/React.createElement(RightOutlined, {
|
||||
- className: `${prefixCls}-menu-submenu-arrow-icon`
|
||||
+ })) : (/*#__PURE__*/React.createElement(ChevronRight, {
|
||||
+ size: 16,
|
||||
+ strokeWidth: 1.8,
|
||||
+ className: `${prefixCls}-menu-submenu-arrow-icon lucide-custom`
|
||||
}))),
|
||||
mode: "vertical",
|
||||
selectable: false,
|
||||
diff --git a/es/dropdown/style/index.js b/es/dropdown/style/index.js
|
||||
index 768c01783002c6901c85a73061ff6b3e776a60ce..39b1b95a56cdc9fb586a193c3adad5141f5cf213 100644
|
||||
--- a/es/dropdown/style/index.js
|
||||
+++ b/es/dropdown/style/index.js
|
||||
@@ -240,7 +240,8 @@ const genBaseStyle = token => {
|
||||
marginInlineEnd: '0 !important',
|
||||
color: token.colorTextDescription,
|
||||
fontSize: fontSizeIcon,
|
||||
- fontStyle: 'normal'
|
||||
+ fontStyle: 'normal',
|
||||
+ marginTop: 3,
|
||||
}
|
||||
}
|
||||
}),
|
||||
diff --git a/es/select/useIcons.js b/es/select/useIcons.js
|
||||
index 959115be936ef8901548af2658c5dcfdc5852723..c812edd52123eb0faf4638b1154fcfa1b05b513b 100644
|
||||
--- a/es/select/useIcons.js
|
||||
+++ b/es/select/useIcons.js
|
||||
@@ -4,10 +4,10 @@ import * as React from 'react';
|
||||
import CheckOutlined from "@ant-design/icons/es/icons/CheckOutlined";
|
||||
import CloseCircleFilled from "@ant-design/icons/es/icons/CloseCircleFilled";
|
||||
import CloseOutlined from "@ant-design/icons/es/icons/CloseOutlined";
|
||||
-import DownOutlined from "@ant-design/icons/es/icons/DownOutlined";
|
||||
import LoadingOutlined from "@ant-design/icons/es/icons/LoadingOutlined";
|
||||
import SearchOutlined from "@ant-design/icons/es/icons/SearchOutlined";
|
||||
import { devUseWarning } from '../_util/warning';
|
||||
+import { ChevronDown } from 'lucide-react';
|
||||
export default function useIcons(_ref) {
|
||||
let {
|
||||
suffixIcon,
|
||||
@@ -56,8 +56,10 @@ export default function useIcons(_ref) {
|
||||
className: iconCls
|
||||
}));
|
||||
}
|
||||
- return getSuffixIconNode(/*#__PURE__*/React.createElement(DownOutlined, {
|
||||
- className: iconCls
|
||||
+ return getSuffixIconNode(/*#__PURE__*/React.createElement(ChevronDown, {
|
||||
+ size: 16,
|
||||
+ strokeWidth: 1.8,
|
||||
+ className: `${iconCls} lucide-custom`
|
||||
}));
|
||||
};
|
||||
}
|
||||
279
.yarn/patches/openai-npm-5.1.0-0e7b3ccb07.patch
vendored
279
.yarn/patches/openai-npm-5.1.0-0e7b3ccb07.patch
vendored
@ -1,279 +0,0 @@
|
||||
diff --git a/client.js b/client.js
|
||||
index 33b4ff6309d5f29187dab4e285d07dac20340bab..8f568637ee9e4677585931fb0284c8165a933f69 100644
|
||||
--- a/client.js
|
||||
+++ b/client.js
|
||||
@@ -433,7 +433,7 @@ class OpenAI {
|
||||
'User-Agent': this.getUserAgent(),
|
||||
'X-Stainless-Retry-Count': String(retryCount),
|
||||
...(options.timeout ? { 'X-Stainless-Timeout': String(Math.trunc(options.timeout / 1000)) } : {}),
|
||||
- ...(0, detect_platform_1.getPlatformHeaders)(),
|
||||
+ // ...(0, detect_platform_1.getPlatformHeaders)(),
|
||||
'OpenAI-Organization': this.organization,
|
||||
'OpenAI-Project': this.project,
|
||||
},
|
||||
diff --git a/client.mjs b/client.mjs
|
||||
index c34c18213073540ebb296ea540b1d1ad39527906..1ce1a98256d7e90e26ca963582f235b23e996e73 100644
|
||||
--- a/client.mjs
|
||||
+++ b/client.mjs
|
||||
@@ -430,7 +430,7 @@ export class OpenAI {
|
||||
'User-Agent': this.getUserAgent(),
|
||||
'X-Stainless-Retry-Count': String(retryCount),
|
||||
...(options.timeout ? { 'X-Stainless-Timeout': String(Math.trunc(options.timeout / 1000)) } : {}),
|
||||
- ...getPlatformHeaders(),
|
||||
+ // ...getPlatformHeaders(),
|
||||
'OpenAI-Organization': this.organization,
|
||||
'OpenAI-Project': this.project,
|
||||
},
|
||||
diff --git a/core/error.js b/core/error.js
|
||||
index a12d9d9ccd242050161adeb0f82e1b98d9e78e20..fe3a5462480558bc426deea147f864f12b36f9bd 100644
|
||||
--- a/core/error.js
|
||||
+++ b/core/error.js
|
||||
@@ -40,7 +40,7 @@ class APIError extends OpenAIError {
|
||||
if (!status || !headers) {
|
||||
return new APIConnectionError({ message, cause: (0, errors_1.castToError)(errorResponse) });
|
||||
}
|
||||
- const error = errorResponse?.['error'];
|
||||
+ const error = errorResponse?.['error'] || errorResponse;
|
||||
if (status === 400) {
|
||||
return new BadRequestError(status, error, message, headers);
|
||||
}
|
||||
diff --git a/core/error.mjs b/core/error.mjs
|
||||
index 83cefbaffeb8c657536347322d8de9516af479a2..63334b7972ec04882aa4a0800c1ead5982345045 100644
|
||||
--- a/core/error.mjs
|
||||
+++ b/core/error.mjs
|
||||
@@ -36,7 +36,7 @@ export class APIError extends OpenAIError {
|
||||
if (!status || !headers) {
|
||||
return new APIConnectionError({ message, cause: castToError(errorResponse) });
|
||||
}
|
||||
- const error = errorResponse?.['error'];
|
||||
+ const error = errorResponse?.['error'] || errorResponse;
|
||||
if (status === 400) {
|
||||
return new BadRequestError(status, error, message, headers);
|
||||
}
|
||||
diff --git a/resources/embeddings.js b/resources/embeddings.js
|
||||
index 2404264d4ba0204322548945ebb7eab3bea82173..8f1bc45cc45e0797d50989d96b51147b90ae6790 100644
|
||||
--- a/resources/embeddings.js
|
||||
+++ b/resources/embeddings.js
|
||||
@@ -5,52 +5,64 @@ exports.Embeddings = void 0;
|
||||
const resource_1 = require("../core/resource.js");
|
||||
const utils_1 = require("../internal/utils.js");
|
||||
class Embeddings extends resource_1.APIResource {
|
||||
- /**
|
||||
- * Creates an embedding vector representing the input text.
|
||||
- *
|
||||
- * @example
|
||||
- * ```ts
|
||||
- * const createEmbeddingResponse =
|
||||
- * await client.embeddings.create({
|
||||
- * input: 'The quick brown fox jumped over the lazy dog',
|
||||
- * model: 'text-embedding-3-small',
|
||||
- * });
|
||||
- * ```
|
||||
- */
|
||||
- create(body, options) {
|
||||
- const hasUserProvidedEncodingFormat = !!body.encoding_format;
|
||||
- // No encoding_format specified, defaulting to base64 for performance reasons
|
||||
- // See https://github.com/openai/openai-node/pull/1312
|
||||
- let encoding_format = hasUserProvidedEncodingFormat ? body.encoding_format : 'base64';
|
||||
- if (hasUserProvidedEncodingFormat) {
|
||||
- (0, utils_1.loggerFor)(this._client).debug('embeddings/user defined encoding_format:', body.encoding_format);
|
||||
- }
|
||||
- const response = this._client.post('/embeddings', {
|
||||
- body: {
|
||||
- ...body,
|
||||
- encoding_format: encoding_format,
|
||||
- },
|
||||
- ...options,
|
||||
- });
|
||||
- // if the user specified an encoding_format, return the response as-is
|
||||
- if (hasUserProvidedEncodingFormat) {
|
||||
- return response;
|
||||
- }
|
||||
- // in this stage, we are sure the user did not specify an encoding_format
|
||||
- // and we defaulted to base64 for performance reasons
|
||||
- // we are sure then that the response is base64 encoded, let's decode it
|
||||
- // the returned result will be a float32 array since this is OpenAI API's default encoding
|
||||
- (0, utils_1.loggerFor)(this._client).debug('embeddings/decoding base64 embeddings from base64');
|
||||
- return response._thenUnwrap((response) => {
|
||||
- if (response && response.data) {
|
||||
- response.data.forEach((embeddingBase64Obj) => {
|
||||
- const embeddingBase64Str = embeddingBase64Obj.embedding;
|
||||
- embeddingBase64Obj.embedding = (0, utils_1.toFloat32Array)(embeddingBase64Str);
|
||||
- });
|
||||
- }
|
||||
- return response;
|
||||
- });
|
||||
- }
|
||||
+ /**
|
||||
+ * Creates an embedding vector representing the input text.
|
||||
+ *
|
||||
+ * @example
|
||||
+ * ```ts
|
||||
+ * const createEmbeddingResponse =
|
||||
+ * await client.embeddings.create({
|
||||
+ * input: 'The quick brown fox jumped over the lazy dog',
|
||||
+ * model: 'text-embedding-3-small',
|
||||
+ * });
|
||||
+ * ```
|
||||
+ */
|
||||
+ create(body, options) {
|
||||
+ const hasUserProvidedEncodingFormat = !!body.encoding_format;
|
||||
+ // No encoding_format specified, defaulting to base64 for performance reasons
|
||||
+ // See https://github.com/openai/openai-node/pull/1312
|
||||
+ let encoding_format = hasUserProvidedEncodingFormat
|
||||
+ ? body.encoding_format
|
||||
+ : "base64";
|
||||
+ if (body.model.includes("jina")) {
|
||||
+ encoding_format = undefined;
|
||||
+ }
|
||||
+ if (hasUserProvidedEncodingFormat) {
|
||||
+ (0, utils_1.loggerFor)(this._client).debug(
|
||||
+ "embeddings/user defined encoding_format:",
|
||||
+ body.encoding_format
|
||||
+ );
|
||||
+ }
|
||||
+ const response = this._client.post("/embeddings", {
|
||||
+ body: {
|
||||
+ ...body,
|
||||
+ encoding_format: encoding_format,
|
||||
+ },
|
||||
+ ...options,
|
||||
+ });
|
||||
+ // if the user specified an encoding_format, return the response as-is
|
||||
+ if (hasUserProvidedEncodingFormat || body.model.includes("jina")) {
|
||||
+ return response;
|
||||
+ }
|
||||
+ // in this stage, we are sure the user did not specify an encoding_format
|
||||
+ // and we defaulted to base64 for performance reasons
|
||||
+ // we are sure then that the response is base64 encoded, let's decode it
|
||||
+ // the returned result will be a float32 array since this is OpenAI API's default encoding
|
||||
+ (0, utils_1.loggerFor)(this._client).debug(
|
||||
+ "embeddings/decoding base64 embeddings from base64"
|
||||
+ );
|
||||
+ return response._thenUnwrap((response) => {
|
||||
+ if (response && response.data && typeof response.data[0]?.embedding === 'string') {
|
||||
+ response.data.forEach((embeddingBase64Obj) => {
|
||||
+ const embeddingBase64Str = embeddingBase64Obj.embedding;
|
||||
+ embeddingBase64Obj.embedding = (0, utils_1.toFloat32Array)(
|
||||
+ embeddingBase64Str
|
||||
+ );
|
||||
+ });
|
||||
+ }
|
||||
+ return response;
|
||||
+ });
|
||||
+ }
|
||||
}
|
||||
exports.Embeddings = Embeddings;
|
||||
//# sourceMappingURL=embeddings.js.map
|
||||
diff --git a/resources/embeddings.mjs b/resources/embeddings.mjs
|
||||
index 19dcaef578c194a89759c4360073cfd4f7dd2cbf..0284e9cc615c900eff508eb595f7360a74bd9200 100644
|
||||
--- a/resources/embeddings.mjs
|
||||
+++ b/resources/embeddings.mjs
|
||||
@@ -2,51 +2,61 @@
|
||||
import { APIResource } from "../core/resource.mjs";
|
||||
import { loggerFor, toFloat32Array } from "../internal/utils.mjs";
|
||||
export class Embeddings extends APIResource {
|
||||
- /**
|
||||
- * Creates an embedding vector representing the input text.
|
||||
- *
|
||||
- * @example
|
||||
- * ```ts
|
||||
- * const createEmbeddingResponse =
|
||||
- * await client.embeddings.create({
|
||||
- * input: 'The quick brown fox jumped over the lazy dog',
|
||||
- * model: 'text-embedding-3-small',
|
||||
- * });
|
||||
- * ```
|
||||
- */
|
||||
- create(body, options) {
|
||||
- const hasUserProvidedEncodingFormat = !!body.encoding_format;
|
||||
- // No encoding_format specified, defaulting to base64 for performance reasons
|
||||
- // See https://github.com/openai/openai-node/pull/1312
|
||||
- let encoding_format = hasUserProvidedEncodingFormat ? body.encoding_format : 'base64';
|
||||
- if (hasUserProvidedEncodingFormat) {
|
||||
- loggerFor(this._client).debug('embeddings/user defined encoding_format:', body.encoding_format);
|
||||
- }
|
||||
- const response = this._client.post('/embeddings', {
|
||||
- body: {
|
||||
- ...body,
|
||||
- encoding_format: encoding_format,
|
||||
- },
|
||||
- ...options,
|
||||
- });
|
||||
- // if the user specified an encoding_format, return the response as-is
|
||||
- if (hasUserProvidedEncodingFormat) {
|
||||
- return response;
|
||||
- }
|
||||
- // in this stage, we are sure the user did not specify an encoding_format
|
||||
- // and we defaulted to base64 for performance reasons
|
||||
- // we are sure then that the response is base64 encoded, let's decode it
|
||||
- // the returned result will be a float32 array since this is OpenAI API's default encoding
|
||||
- loggerFor(this._client).debug('embeddings/decoding base64 embeddings from base64');
|
||||
- return response._thenUnwrap((response) => {
|
||||
- if (response && response.data) {
|
||||
- response.data.forEach((embeddingBase64Obj) => {
|
||||
- const embeddingBase64Str = embeddingBase64Obj.embedding;
|
||||
- embeddingBase64Obj.embedding = toFloat32Array(embeddingBase64Str);
|
||||
- });
|
||||
- }
|
||||
- return response;
|
||||
- });
|
||||
- }
|
||||
+ /**
|
||||
+ * Creates an embedding vector representing the input text.
|
||||
+ *
|
||||
+ * @example
|
||||
+ * ```ts
|
||||
+ * const createEmbeddingResponse =
|
||||
+ * await client.embeddings.create({
|
||||
+ * input: 'The quick brown fox jumped over the lazy dog',
|
||||
+ * model: 'text-embedding-3-small',
|
||||
+ * });
|
||||
+ * ```
|
||||
+ */
|
||||
+ create(body, options) {
|
||||
+ const hasUserProvidedEncodingFormat = !!body.encoding_format;
|
||||
+ // No encoding_format specified, defaulting to base64 for performance reasons
|
||||
+ // See https://github.com/openai/openai-node/pull/1312
|
||||
+ let encoding_format = hasUserProvidedEncodingFormat
|
||||
+ ? body.encoding_format
|
||||
+ : "base64";
|
||||
+ if (body.model.includes("jina")) {
|
||||
+ encoding_format = undefined;
|
||||
+ }
|
||||
+ if (hasUserProvidedEncodingFormat) {
|
||||
+ loggerFor(this._client).debug(
|
||||
+ "embeddings/user defined encoding_format:",
|
||||
+ body.encoding_format
|
||||
+ );
|
||||
+ }
|
||||
+ const response = this._client.post("/embeddings", {
|
||||
+ body: {
|
||||
+ ...body,
|
||||
+ encoding_format: encoding_format,
|
||||
+ },
|
||||
+ ...options,
|
||||
+ });
|
||||
+ // if the user specified an encoding_format, return the response as-is
|
||||
+ if (hasUserProvidedEncodingFormat || body.model.includes("jina")) {
|
||||
+ return response;
|
||||
+ }
|
||||
+ // in this stage, we are sure the user did not specify an encoding_format
|
||||
+ // and we defaulted to base64 for performance reasons
|
||||
+ // we are sure then that the response is base64 encoded, let's decode it
|
||||
+ // the returned result will be a float32 array since this is OpenAI API's default encoding
|
||||
+ loggerFor(this._client).debug(
|
||||
+ "embeddings/decoding base64 embeddings from base64"
|
||||
+ );
|
||||
+ return response._thenUnwrap((response) => {
|
||||
+ if (response && response.data && typeof response.data[0]?.embedding === 'string') {
|
||||
+ response.data.forEach((embeddingBase64Obj) => {
|
||||
+ const embeddingBase64Str = embeddingBase64Obj.embedding;
|
||||
+ embeddingBase64Obj.embedding = toFloat32Array(embeddingBase64Str);
|
||||
+ });
|
||||
+ }
|
||||
+ return response;
|
||||
+ });
|
||||
+ }
|
||||
}
|
||||
//# sourceMappingURL=embeddings.mjs.map
|
||||
@ -12,7 +12,7 @@ import { getStoreSetting } from '@renderer/hooks/useSettings'
|
||||
import i18n from '@renderer/i18n'
|
||||
import store from '@renderer/store'
|
||||
import { Assistant, MCPServer, MCPTool, Model, Provider } from '@renderer/types'
|
||||
import { type Chunk } from '@renderer/types/chunk'
|
||||
import { type Chunk, ChunkType } from '@renderer/types/chunk'
|
||||
import { Message } from '@renderer/types/newMessage'
|
||||
import { SdkModel } from '@renderer/types/sdk'
|
||||
import { removeSpecialCharactersForTopicName } from '@renderer/utils'
|
||||
@ -157,6 +157,7 @@ export async function fetchChatCompletion({
|
||||
// onChunkReceived({ type: ChunkType.LLM_WEB_SEARCH_IN_PROGRESS })
|
||||
// }
|
||||
// --- Call AI Completions ---
|
||||
onChunkReceived({ type: ChunkType.LLM_RESPONSE_CREATED })
|
||||
|
||||
// 在 AI SDK 调用时设置正确的 OpenTelemetry 上下文
|
||||
if (topicId) {
|
||||
|
||||
Loading…
Reference in New Issue
Block a user