Merge remote-tracking branch 'origin/main' into feat/agents-new

This commit is contained in:
Vaayne 2025-09-30 18:03:35 +08:00
commit d9d8bae2d6
31 changed files with 1972 additions and 9 deletions

View File

@ -34,6 +34,7 @@ export enum IpcChannel {
App_GetBinaryPath = 'app:get-binary-path',
App_InstallUvBinary = 'app:install-uv-binary',
App_InstallBunBinary = 'app:install-bun-binary',
App_InstallOvmsBinary = 'app:install-ovms-binary',
App_LogToMain = 'app:log-to-main',
App_SaveData = 'app:save-data',
App_GetDiskInfo = 'app:get-disk-info',
@ -225,6 +226,7 @@ export enum IpcChannel {
// system
System_GetDeviceType = 'system:getDeviceType',
System_GetHostname = 'system:getHostname',
System_GetCpuName = 'system:getCpuName',
// DevTools
System_ToggleDevTools = 'system:toggleDevTools',
@ -335,6 +337,15 @@ export enum IpcChannel {
// OCR
OCR_ocr = 'ocr:ocr',
// OVMS
Ovms_AddModel = 'ovms:add-model',
Ovms_StopAddModel = 'ovms:stop-addmodel',
Ovms_GetModels = 'ovms:get-models',
Ovms_IsRunning = 'ovms:is-running',
Ovms_GetStatus = 'ovms:get-status',
Ovms_RunOVMS = 'ovms:run-ovms',
Ovms_StopOVMS = 'ovms:stop-ovms',
// CherryAI
Cherryai_GetSignature = 'cherryai:get-signature'
}

View File

@ -1,5 +1,7 @@
const https = require('https')
const fs = require('fs')
const path = require('path')
const { execSync } = require('child_process')
/**
* Downloads a file from a URL with redirect handling
@ -32,4 +34,39 @@ async function downloadWithRedirects(url, destinationPath) {
})
}
module.exports = { downloadWithRedirects }
/**
* Downloads a file using PowerShell Invoke-WebRequest command
* @param {string} url The URL to download from
* @param {string} destinationPath The path to save the file to
* @returns {Promise<boolean>} Promise that resolves to true if download succeeds
*/
async function downloadWithPowerShell(url, destinationPath) {
return new Promise((resolve, reject) => {
try {
// Only support windows platform for PowerShell download
if (process.platform !== 'win32') {
return reject(new Error('PowerShell download is only supported on Windows'))
}
const outputDir = path.dirname(destinationPath)
fs.mkdirSync(outputDir, { recursive: true })
// PowerShell command to download the file with progress disabled for faster download
const psCommand = `powershell -Command "$ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest '${url}' -OutFile '${destinationPath}'"`
console.log(`Downloading with PowerShell: ${url}`)
execSync(psCommand, { stdio: 'inherit' })
if (fs.existsSync(destinationPath)) {
console.log(`Download completed: ${destinationPath}`)
resolve(true)
} else {
reject(new Error('Download failed: File not found after download'))
}
} catch (error) {
reject(new Error(`PowerShell download failed: ${error.message}`))
}
})
}
module.exports = { downloadWithRedirects, downloadWithPowerShell }

View File

@ -0,0 +1,177 @@
const fs = require('fs')
const path = require('path')
const os = require('os')
const { execSync } = require('child_process')
const { downloadWithPowerShell } = require('./download')
// Base URL for downloading OVMS binaries
const OVMS_PKG_NAME = 'ovms250911.zip'
const OVMS_RELEASE_BASE_URL = [`https://gitcode.com/gcw_ggDjjkY3/kjfile/releases/download/download/${OVMS_PKG_NAME}`]
/**
* Downloads and extracts the OVMS binary for the specified platform
*/
async function downloadOvmsBinary() {
// Create output directory structure - OVMS goes into its own subdirectory
const csDir = path.join(os.homedir(), '.cherrystudio')
// Ensure directories exist
fs.mkdirSync(csDir, { recursive: true })
const csOvmsDir = path.join(csDir, 'ovms')
// Delete existing OVMS directory if it exists
if (fs.existsSync(csOvmsDir)) {
fs.rmSync(csOvmsDir, { recursive: true })
}
const tempdir = os.tmpdir()
const tempFilename = path.join(tempdir, 'ovms.zip')
// Try each URL until one succeeds
let downloadSuccess = false
let lastError = null
for (let i = 0; i < OVMS_RELEASE_BASE_URL.length; i++) {
const downloadUrl = OVMS_RELEASE_BASE_URL[i]
console.log(`Attempting download from URL ${i + 1}/${OVMS_RELEASE_BASE_URL.length}: ${downloadUrl}`)
try {
console.log(`Downloading OVMS from ${downloadUrl} to ${tempFilename}...`)
// Try PowerShell download first, fallback to Node.js download if it fails
await downloadWithPowerShell(downloadUrl, tempFilename)
// If we get here, download was successful
downloadSuccess = true
console.log(`Successfully downloaded from: ${downloadUrl}`)
break
} catch (error) {
console.warn(`Download failed from ${downloadUrl}: ${error.message}`)
lastError = error
// Clean up failed download file if it exists
if (fs.existsSync(tempFilename)) {
try {
fs.unlinkSync(tempFilename)
} catch (cleanupError) {
console.warn(`Failed to clean up temporary file: ${cleanupError.message}`)
}
}
// Continue to next URL if this one failed
if (i < OVMS_RELEASE_BASE_URL.length - 1) {
console.log(`Trying next URL...`)
}
}
}
// Check if any download succeeded
if (!downloadSuccess) {
console.error(`All download URLs failed. Last error: ${lastError?.message || 'Unknown error'}`)
return 103
}
try {
console.log(`Extracting to ${csDir}...`)
// Use tar.exe to extract the ZIP file
console.log(`Extracting OVMS to ${csDir}...`)
execSync(`tar -xf ${tempFilename} -C ${csDir}`, { stdio: 'inherit' })
console.log(`OVMS extracted to ${csDir}`)
// Clean up temporary file
fs.unlinkSync(tempFilename)
console.log(`Installation directory: ${csDir}`)
} catch (error) {
console.error(`Error installing OVMS: ${error.message}`)
if (fs.existsSync(tempFilename)) {
fs.unlinkSync(tempFilename)
}
// Check if ovmsDir is empty and remove it if so
try {
const ovmsDir = path.join(csDir, 'ovms')
const files = fs.readdirSync(ovmsDir)
if (files.length === 0) {
fs.rmSync(ovmsDir, { recursive: true })
console.log(`Removed empty directory: ${ovmsDir}`)
}
} catch (cleanupError) {
console.warn(`Warning: Failed to clean up directory: ${cleanupError.message}`)
return 105
}
return 104
}
return 0
}
/**
* Get the CPU Name and ID
*/
function getCpuInfo() {
const cpuInfo = {
name: '',
id: ''
}
// Use PowerShell to get CPU information
try {
const psCommand = `powershell -Command "Get-CimInstance -ClassName Win32_Processor | Select-Object Name, DeviceID | ConvertTo-Json"`
const psOutput = execSync(psCommand).toString()
const cpuData = JSON.parse(psOutput)
if (Array.isArray(cpuData)) {
cpuInfo.name = cpuData[0].Name || ''
cpuInfo.id = cpuData[0].DeviceID || ''
} else {
cpuInfo.name = cpuData.Name || ''
cpuInfo.id = cpuData.DeviceID || ''
}
} catch (error) {
console.error(`Failed to get CPU info: ${error.message}`)
}
return cpuInfo
}
/**
* Main function to install OVMS
*/
async function installOvms() {
const platform = os.platform()
console.log(`Detected platform: ${platform}`)
const cpuName = getCpuInfo().name
console.log(`CPU Name: ${cpuName}`)
// Check if CPU name contains "Ultra"
if (!cpuName.toLowerCase().includes('intel') || !cpuName.toLowerCase().includes('ultra')) {
console.error('OVMS installation requires an Intel(R) Core(TM) Ultra CPU.')
return 101
}
// only support windows
if (platform !== 'win32') {
console.error('OVMS installation is only supported on Windows.')
return 102
}
return await downloadOvmsBinary()
}
// Run the installation
installOvms()
.then((retcode) => {
if (retcode === 0) {
console.log('OVMS installation successful')
} else {
console.error('OVMS installation failed')
}
process.exit(retcode)
})
.catch((error) => {
console.error('OVMS installation failed:', error)
process.exit(100)
})

View File

@ -45,6 +45,7 @@ import NotificationService from './services/NotificationService'
import * as NutstoreService from './services/NutstoreService'
import ObsidianVaultService from './services/ObsidianVaultService'
import { ocrService } from './services/ocr/OcrService'
import OvmsManager from './services/OvmsManager'
import { proxyManager } from './services/ProxyManager'
import { pythonService } from './services/PythonService'
import { FileServiceManager } from './services/remotefile/FileServiceManager'
@ -91,6 +92,7 @@ const obsidianVaultService = new ObsidianVaultService()
const vertexAIService = VertexAIService.getInstance()
const memoryService = MemoryService.getInstance()
const dxtService = new DxtService()
const ovmsManager = new OvmsManager()
export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
const appUpdater = new AppUpdater()
@ -463,6 +465,7 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
// system
ipcMain.handle(IpcChannel.System_GetDeviceType, () => (isMac ? 'mac' : isWin ? 'windows' : 'linux'))
ipcMain.handle(IpcChannel.System_GetHostname, () => require('os').hostname())
ipcMain.handle(IpcChannel.System_GetCpuName, () => require('os').cpus()[0].model)
ipcMain.handle(IpcChannel.System_ToggleDevTools, (e) => {
const win = BrowserWindow.fromWebContents(e.sender)
win && win.webContents.toggleDevTools()
@ -742,6 +745,7 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
ipcMain.handle(IpcChannel.App_GetBinaryPath, (_, name: string) => getBinaryPath(name))
ipcMain.handle(IpcChannel.App_InstallUvBinary, () => runInstallScript('install-uv.js'))
ipcMain.handle(IpcChannel.App_InstallBunBinary, () => runInstallScript('install-bun.js'))
ipcMain.handle(IpcChannel.App_InstallOvmsBinary, () => runInstallScript('install-ovms.js'))
//copilot
ipcMain.handle(IpcChannel.Copilot_GetAuthMessage, CopilotService.getAuthMessage.bind(CopilotService))
@ -873,6 +877,17 @@ export function registerIpc(mainWindow: BrowserWindow, app: Electron.App) {
ocrService.ocr(file, provider)
)
// OVMS
ipcMain.handle(IpcChannel.Ovms_AddModel, (_, modelName: string, modelId: string, modelSource: string, task: string) =>
ovmsManager.addModel(modelName, modelId, modelSource, task)
)
ipcMain.handle(IpcChannel.Ovms_StopAddModel, () => ovmsManager.stopAddModel())
ipcMain.handle(IpcChannel.Ovms_GetModels, () => ovmsManager.getModels())
ipcMain.handle(IpcChannel.Ovms_IsRunning, () => ovmsManager.initializeOvms())
ipcMain.handle(IpcChannel.Ovms_GetStatus, () => ovmsManager.getOvmsStatus())
ipcMain.handle(IpcChannel.Ovms_RunOVMS, () => ovmsManager.runOvms())
ipcMain.handle(IpcChannel.Ovms_StopOVMS, () => ovmsManager.stopOvms())
// CherryAI
ipcMain.handle(IpcChannel.Cherryai_GetSignature, (_, params) => generateSignature(params))
}

View File

@ -0,0 +1,586 @@
import { exec } from 'node:child_process'
import { homedir } from 'node:os'
import { promisify } from 'node:util'
import { loggerService } from '@logger'
import * as fs from 'fs-extra'
import * as path from 'path'
const logger = loggerService.withContext('OvmsManager')
const execAsync = promisify(exec)
interface OvmsProcess {
pid: number
path: string
workingDirectory: string
}
interface ModelConfig {
name: string
base_path: string
}
interface OvmsConfig {
mediapipe_config_list: ModelConfig[]
}
class OvmsManager {
private ovms: OvmsProcess | null = null
/**
* Recursively terminate a process and all its child processes
* @param pid Process ID to terminate
* @returns Promise<{ success: boolean; message?: string }>
*/
private async terminalProcess(pid: number): Promise<{ success: boolean; message?: string }> {
try {
// Check if the process is running
const processCheckCommand = `Get-Process -Id ${pid} -ErrorAction SilentlyContinue | Select-Object Id | ConvertTo-Json`
const { stdout: processStdout } = await execAsync(`powershell -Command "${processCheckCommand}"`)
if (!processStdout.trim()) {
logger.info(`Process with PID ${pid} is not running`)
return { success: true, message: `Process with PID ${pid} is not running` }
}
// Find child processes
const childProcessCommand = `Get-WmiObject -Class Win32_Process | Where-Object { $_.ParentProcessId -eq ${pid} } | Select-Object ProcessId | ConvertTo-Json`
const { stdout: childStdout } = await execAsync(`powershell -Command "${childProcessCommand}"`)
// If there are child processes, terminate them first
if (childStdout.trim()) {
const childProcesses = JSON.parse(childStdout)
const childList = Array.isArray(childProcesses) ? childProcesses : [childProcesses]
logger.info(`Found ${childList.length} child processes for PID ${pid}`)
// Recursively terminate each child process
for (const childProcess of childList) {
const childPid = childProcess.ProcessId
logger.info(`Terminating child process PID: ${childPid}`)
await this.terminalProcess(childPid)
}
} else {
logger.info(`No child processes found for PID ${pid}`)
}
// Finally, terminate the parent process
const killCommand = `Stop-Process -Id ${pid} -Force -ErrorAction SilentlyContinue`
await execAsync(`powershell -Command "${killCommand}"`)
logger.info(`Terminated process with PID: ${pid}`)
// Wait for the process to disappear with 5-second timeout
const timeout = 5000 // 5 seconds
const startTime = Date.now()
while (Date.now() - startTime < timeout) {
const checkCommand = `Get-Process -Id ${pid} -ErrorAction SilentlyContinue | Select-Object Id | ConvertTo-Json`
const { stdout: checkStdout } = await execAsync(`powershell -Command "${checkCommand}"`)
if (!checkStdout.trim()) {
logger.info(`Process with PID ${pid} has disappeared`)
return { success: true, message: `Process ${pid} and all child processes terminated successfully` }
}
// Wait 300ms before checking again
await new Promise((resolve) => setTimeout(resolve, 300))
}
logger.warn(`Process with PID ${pid} did not disappear within timeout`)
return { success: false, message: `Process ${pid} did not disappear within 5 seconds` }
} catch (error) {
logger.error(`Failed to terminate process ${pid}:`, error as Error)
return { success: false, message: `Failed to terminate process ${pid}` }
}
}
/**
* Stop OVMS process if it's running
* @returns Promise<{ success: boolean; message?: string }>
*/
public async stopOvms(): Promise<{ success: boolean; message?: string }> {
try {
// Check if OVMS process is running
const psCommand = `Get-Process -Name "ovms" -ErrorAction SilentlyContinue | Select-Object Id, Path | ConvertTo-Json`
const { stdout } = await execAsync(`powershell -Command "${psCommand}"`)
if (!stdout.trim()) {
logger.info('OVMS process is not running')
return { success: true, message: 'OVMS process is not running' }
}
const processes = JSON.parse(stdout)
const processList = Array.isArray(processes) ? processes : [processes]
if (processList.length === 0) {
logger.info('OVMS process is not running')
return { success: true, message: 'OVMS process is not running' }
}
// Terminate all OVMS processes using terminalProcess
for (const process of processList) {
const result = await this.terminalProcess(process.Id)
if (!result.success) {
logger.error(`Failed to terminate OVMS process with PID: ${process.Id}, ${result.message}`)
return { success: false, message: `Failed to terminate OVMS process: ${result.message}` }
}
logger.info(`Terminated OVMS process with PID: ${process.Id}`)
}
// Reset the ovms instance
this.ovms = null
logger.info('OVMS process stopped successfully')
return { success: true, message: 'OVMS process stopped successfully' }
} catch (error) {
logger.error(`Failed to stop OVMS process: ${error}`)
return { success: false, message: 'Failed to stop OVMS process' }
}
}
/**
* Run OVMS by ensuring config.json exists and executing run.bat
* @returns Promise<{ success: boolean; message?: string }>
*/
public async runOvms(): Promise<{ success: boolean; message?: string }> {
const homeDir = homedir()
const ovmsDir = path.join(homeDir, '.cherrystudio', 'ovms', 'ovms')
const configPath = path.join(ovmsDir, 'models', 'config.json')
const runBatPath = path.join(ovmsDir, 'run.bat')
try {
// Check if config.json exists, if not create it with default content
if (!(await fs.pathExists(configPath))) {
logger.info(`Config file does not exist, creating: ${configPath}`)
// Ensure the models directory exists
await fs.ensureDir(path.dirname(configPath))
// Create config.json with default content
const defaultConfig = {
mediapipe_config_list: [],
model_config_list: []
}
await fs.writeJson(configPath, defaultConfig, { spaces: 2 })
logger.info(`Config file created: ${configPath}`)
}
// Check if run.bat exists
if (!(await fs.pathExists(runBatPath))) {
logger.error(`run.bat not found at: ${runBatPath}`)
return { success: false, message: 'run.bat not found' }
}
// Run run.bat without waiting for it to complete
logger.info(`Starting OVMS with run.bat: ${runBatPath}`)
exec(`"${runBatPath}"`, { cwd: ovmsDir }, (error) => {
if (error) {
logger.error(`Error running run.bat: ${error}`)
}
})
logger.info('OVMS started successfully')
return { success: true }
} catch (error) {
logger.error(`Failed to run OVMS: ${error}`)
return { success: false, message: 'Failed to run OVMS' }
}
}
/**
* Get OVMS status - checks installation and running status
* @returns 'not-installed' | 'not-running' | 'running'
*/
public async getOvmsStatus(): Promise<'not-installed' | 'not-running' | 'running'> {
const homeDir = homedir()
const ovmsPath = path.join(homeDir, '.cherrystudio', 'ovms', 'ovms', 'ovms.exe')
try {
// Check if OVMS executable exists
if (!(await fs.pathExists(ovmsPath))) {
logger.info(`OVMS executable not found at: ${ovmsPath}`)
return 'not-installed'
}
// Check if OVMS process is running
//const psCommand = `Get-Process -Name "ovms" -ErrorAction SilentlyContinue | Where-Object { $_.Path -eq "${ovmsPath.replace(/\\/g, '\\\\')}" } | Select-Object Id | ConvertTo-Json`;
//const { stdout } = await execAsync(`powershell -Command "${psCommand}"`);
const psCommand = `Get-Process -Name "ovms" -ErrorAction SilentlyContinue | Select-Object Id, Path | ConvertTo-Json`
const { stdout } = await execAsync(`powershell -Command "${psCommand}"`)
if (!stdout.trim()) {
logger.info('OVMS process not running')
return 'not-running'
}
const processes = JSON.parse(stdout)
const processList = Array.isArray(processes) ? processes : [processes]
if (processList.length > 0) {
logger.info('OVMS process is running')
return 'running'
} else {
logger.info('OVMS process not running')
return 'not-running'
}
} catch (error) {
logger.info(`Failed to check OVMS status: ${error}`)
return 'not-running'
}
}
/**
* Initialize OVMS by finding the executable path and working directory
*/
public async initializeOvms(): Promise<boolean> {
// Use PowerShell to find ovms.exe processes with their paths
const psCommand = `Get-Process -Name "ovms" -ErrorAction SilentlyContinue | Select-Object Id, Path | ConvertTo-Json`
const { stdout } = await execAsync(`powershell -Command "${psCommand}"`)
if (!stdout.trim()) {
logger.error('Command to find OVMS process returned no output')
return false
}
logger.debug(`OVMS process output: ${stdout}`)
const processes = JSON.parse(stdout)
const processList = Array.isArray(processes) ? processes : [processes]
// Find the first process with a valid path
for (const process of processList) {
this.ovms = {
pid: process.Id,
path: process.Path,
workingDirectory: path.dirname(process.Path)
}
return true
}
return this.ovms !== null
}
/**
* Check if the Model Name and ID are valid, they are valid only if they are not used in the config.json
* @param modelName Name of the model to check
* @param modelId ID of the model to check
*/
public async isNameAndIDAvalid(modelName: string, modelId: string): Promise<boolean> {
if (!modelName || !modelId) {
logger.error('Model name and ID cannot be empty')
return false
}
const homeDir = homedir()
const configPath = path.join(homeDir, '.cherrystudio', 'ovms', 'ovms', 'models', 'config.json')
try {
if (!(await fs.pathExists(configPath))) {
logger.warn(`Config file does not exist: ${configPath}`)
return false
}
const config: OvmsConfig = await fs.readJson(configPath)
if (!config.mediapipe_config_list) {
logger.warn(`No mediapipe_config_list found in config: ${configPath}`)
return false
}
// Check if the model name or ID already exists in the config
const exists = config.mediapipe_config_list.some(
(model) => model.name === modelName || model.base_path === modelId
)
if (exists) {
logger.warn(`Model with name "${modelName}" or ID "${modelId}" already exists in the config`)
return false
}
} catch (error) {
logger.error(`Failed to check model existence: ${error}`)
return false
}
return true
}
private async applyModelPath(modelDirPath: string): Promise<boolean> {
const homeDir = homedir()
const patchDir = path.join(homeDir, '.cherrystudio', 'ovms', 'patch')
if (!(await fs.pathExists(patchDir))) {
return true
}
const modelId = path.basename(modelDirPath)
// get all sub directories in patchDir
const patchs = await fs.readdir(patchDir)
for (const patch of patchs) {
const fullPatchPath = path.join(patchDir, patch)
if (fs.lstatSync(fullPatchPath).isDirectory()) {
if (modelId.toLowerCase().includes(patch.toLowerCase())) {
// copy all files from fullPath to modelDirPath
try {
const files = await fs.readdir(fullPatchPath)
for (const file of files) {
const srcFile = path.join(fullPatchPath, file)
const destFile = path.join(modelDirPath, file)
await fs.copyFile(srcFile, destFile)
}
} catch (error) {
logger.error(`Failed to copy files from ${fullPatchPath} to ${modelDirPath}: ${error}`)
return false
}
logger.info(`Applied patchs for model ${modelId}`)
return true
}
}
}
return true
}
/**
* Add a model to OVMS by downloading it
* @param modelName Name of the model to add
* @param modelId ID of the model to download
* @param modelSource Model Source: huggingface, hf-mirror and modelscope, default is huggingface
* @param task Task type: text_generation, embedding, rerank, image_generation
*/
public async addModel(
modelName: string,
modelId: string,
modelSource: string,
task: string = 'text_generation'
): Promise<{ success: boolean; message?: string }> {
logger.info(`Adding model: ${modelName} with ID: ${modelId}, Source: ${modelSource}, Task: ${task}`)
const homeDir = homedir()
const ovdndDir = path.join(homeDir, '.cherrystudio', 'ovms', 'ovms')
const pathModel = path.join(ovdndDir, 'models', modelId)
try {
// check the ovdnDir+'models'+modelId exist or not
if (await fs.pathExists(pathModel)) {
logger.error(`Model with ID ${modelId} already exists`)
return { success: false, message: 'Model ID already exists!' }
}
// remove the model directory if it exists
if (await fs.pathExists(pathModel)) {
logger.info(`Removing existing model directory: ${pathModel}`)
await fs.remove(pathModel)
}
// Use ovdnd.exe for downloading instead of ovms.exe
const ovdndPath = path.join(ovdndDir, 'ovdnd.exe')
const command =
`"${ovdndPath}" --pull ` +
`--model_repository_path "${ovdndDir}/models" ` +
`--source_model "${modelId}" ` +
`--model_name "${modelName}" ` +
`--target_device GPU ` +
`--task ${task} ` +
`--overwrite_models`
const env: Record<string, string | undefined> = {
...process.env,
OVMS_DIR: ovdndDir,
PYTHONHOME: path.join(ovdndDir, 'python'),
PATH: `${process.env.PATH};${ovdndDir};${path.join(ovdndDir, 'python')}`
}
if (modelSource) {
env.HF_ENDPOINT = modelSource
}
logger.info(`Running command: ${command} from ${modelSource}`)
const { stdout } = await execAsync(command, { env: env, cwd: ovdndDir })
logger.info('Model download completed')
logger.debug(`Command output: ${stdout}`)
} catch (error) {
// remove ovdnDir+'models'+modelId if it exists
if (await fs.pathExists(pathModel)) {
logger.info(`Removing failed model directory: ${pathModel}`)
await fs.remove(pathModel)
}
logger.error(`Failed to add model: ${error}`)
return {
success: false,
message: `Download model ${modelId} failed, please check following items and try it again:<p>- the model id</p><p>- network connection and proxy</p>`
}
}
// Update config file
if (!(await this.updateModelConfig(modelName, modelId))) {
logger.error('Failed to update model config')
return { success: false, message: 'Failed to update model config' }
}
if (!(await this.applyModelPath(pathModel))) {
logger.error('Failed to apply model patchs')
return { success: false, message: 'Failed to apply model patchs' }
}
logger.info(`Model ${modelName} added successfully with ID ${modelId}`)
return { success: true }
}
/**
* Stop the model download process if it's running
* @returns Promise<{ success: boolean; message?: string }>
*/
public async stopAddModel(): Promise<{ success: boolean; message?: string }> {
try {
// Check if ovdnd.exe process is running
const psCommand = `Get-Process -Name "ovdnd" -ErrorAction SilentlyContinue | Select-Object Id, Path | ConvertTo-Json`
const { stdout } = await execAsync(`powershell -Command "${psCommand}"`)
if (!stdout.trim()) {
logger.info('ovdnd process is not running')
return { success: true, message: 'Model download process is not running' }
}
const processes = JSON.parse(stdout)
const processList = Array.isArray(processes) ? processes : [processes]
if (processList.length === 0) {
logger.info('ovdnd process is not running')
return { success: true, message: 'Model download process is not running' }
}
// Terminate all ovdnd processes
for (const process of processList) {
this.terminalProcess(process.Id)
}
logger.info('Model download process stopped successfully')
return { success: true, message: 'Model download process stopped successfully' }
} catch (error) {
logger.error(`Failed to stop model download process: ${error}`)
return { success: false, message: 'Failed to stop model download process' }
}
}
/**
* check if the model id exists in the OVMS configuration
* @param modelId ID of the model to check
*/
public async checkModelExists(modelId: string): Promise<boolean> {
const homeDir = homedir()
const ovmsDir = path.join(homeDir, '.cherrystudio', 'ovms', 'ovms')
const configPath = path.join(ovmsDir, 'models', 'config.json')
try {
if (!(await fs.pathExists(configPath))) {
logger.warn(`Config file does not exist: ${configPath}`)
return false
}
const config: OvmsConfig = await fs.readJson(configPath)
if (!config.mediapipe_config_list) {
logger.warn('No mediapipe_config_list found in config')
return false
}
return config.mediapipe_config_list.some((model) => model.base_path === modelId)
} catch (error) {
logger.error(`Failed to check model existence: ${error}`)
return false
}
}
/**
* Update the model configuration file
*/
public async updateModelConfig(modelName: string, modelId: string): Promise<boolean> {
const homeDir = homedir()
const ovmsDir = path.join(homeDir, '.cherrystudio', 'ovms', 'ovms')
const configPath = path.join(ovmsDir, 'models', 'config.json')
try {
// Ensure the models directory exists
await fs.ensureDir(path.dirname(configPath))
let config: OvmsConfig
// Read existing config or create new one
if (await fs.pathExists(configPath)) {
config = await fs.readJson(configPath)
} else {
config = { mediapipe_config_list: [] }
}
// Ensure mediapipe_config_list exists
if (!config.mediapipe_config_list) {
config.mediapipe_config_list = []
}
// Add new model config
const newModelConfig: ModelConfig = {
name: modelName,
base_path: modelId
}
// Check if model already exists, if so, update it
const existingIndex = config.mediapipe_config_list.findIndex((model) => model.base_path === modelId)
if (existingIndex >= 0) {
config.mediapipe_config_list[existingIndex] = newModelConfig
logger.info(`Updated existing model config: ${modelName}`)
} else {
config.mediapipe_config_list.push(newModelConfig)
logger.info(`Added new model config: ${modelName}`)
}
// Write config back to file
await fs.writeJson(configPath, config, { spaces: 2 })
logger.info(`Config file updated: ${configPath}`)
} catch (error) {
logger.error(`Failed to update model config: ${error}`)
return false
}
return true
}
/**
* Get all models from OVMS config, filtered for image generation models
* @returns Array of model configurations
*/
public async getModels(): Promise<ModelConfig[]> {
const homeDir = homedir()
const ovmsDir = path.join(homeDir, '.cherrystudio', 'ovms', 'ovms')
const configPath = path.join(ovmsDir, 'models', 'config.json')
try {
if (!(await fs.pathExists(configPath))) {
logger.warn(`Config file does not exist: ${configPath}`)
return []
}
const config: OvmsConfig = await fs.readJson(configPath)
if (!config.mediapipe_config_list) {
logger.warn('No mediapipe_config_list found in config')
return []
}
// Filter models for image generation (SD, Stable-Diffusion, Stable Diffusion, FLUX)
const imageGenerationModels = config.mediapipe_config_list.filter((model) => {
const modelName = model.name.toLowerCase()
return (
modelName.startsWith('sd') ||
modelName.startsWith('stable-diffusion') ||
modelName.startsWith('stable diffusion') ||
modelName.startsWith('flux')
)
})
logger.info(`Found ${imageGenerationModels.length} image generation models`)
return imageGenerationModels
} catch (error) {
logger.error(`Failed to get models: ${error}`)
return []
}
}
}
export default OvmsManager

View File

@ -95,7 +95,8 @@ const api = {
},
system: {
getDeviceType: () => ipcRenderer.invoke(IpcChannel.System_GetDeviceType),
getHostname: () => ipcRenderer.invoke(IpcChannel.System_GetHostname)
getHostname: () => ipcRenderer.invoke(IpcChannel.System_GetHostname),
getCpuName: () => ipcRenderer.invoke(IpcChannel.System_GetCpuName)
},
devTools: {
toggle: () => ipcRenderer.invoke(IpcChannel.System_ToggleDevTools)
@ -287,6 +288,16 @@ const api = {
clearAuthCache: (projectId: string, clientEmail?: string) =>
ipcRenderer.invoke(IpcChannel.VertexAI_ClearAuthCache, projectId, clientEmail)
},
ovms: {
addModel: (modelName: string, modelId: string, modelSource: string, task: string) =>
ipcRenderer.invoke(IpcChannel.Ovms_AddModel, modelName, modelId, modelSource, task),
stopAddModel: () => ipcRenderer.invoke(IpcChannel.Ovms_StopAddModel),
getModels: () => ipcRenderer.invoke(IpcChannel.Ovms_GetModels),
isRunning: () => ipcRenderer.invoke(IpcChannel.Ovms_IsRunning),
getStatus: () => ipcRenderer.invoke(IpcChannel.Ovms_GetStatus),
runOvms: () => ipcRenderer.invoke(IpcChannel.Ovms_RunOVMS),
stopOvms: () => ipcRenderer.invoke(IpcChannel.Ovms_StopOVMS)
},
config: {
set: (key: string, value: any, isNotify: boolean = false) =>
ipcRenderer.invoke(IpcChannel.Config_Set, key, value, isNotify),
@ -352,6 +363,7 @@ const api = {
getBinaryPath: (name: string) => ipcRenderer.invoke(IpcChannel.App_GetBinaryPath, name),
installUVBinary: () => ipcRenderer.invoke(IpcChannel.App_InstallUvBinary),
installBunBinary: () => ipcRenderer.invoke(IpcChannel.App_InstallBunBinary),
installOvmsBinary: () => ipcRenderer.invoke(IpcChannel.App_InstallOvmsBinary),
protocol: {
onReceiveData: (callback: (data: { url: string; params: any }) => void) => {
const listener = (_event: Electron.IpcRendererEvent, data: { url: string; params: any }) => {

View File

@ -12,6 +12,7 @@ import { VertexAPIClient } from './gemini/VertexAPIClient'
import { NewAPIClient } from './newapi/NewAPIClient'
import { OpenAIAPIClient } from './openai/OpenAIApiClient'
import { OpenAIResponseAPIClient } from './openai/OpenAIResponseAPIClient'
import { OVMSClient } from './ovms/OVMSClient'
import { PPIOAPIClient } from './ppio/PPIOAPIClient'
import { ZhipuAPIClient } from './zhipu/ZhipuAPIClient'
@ -63,6 +64,12 @@ export class ApiClientFactory {
return instance
}
if (provider.id === 'ovms') {
logger.debug(`Creating OVMSClient for provider: ${provider.id}`)
instance = new OVMSClient(provider) as BaseApiClient
return instance
}
// 然后检查标准的 Provider Type
switch (provider.type) {
case 'openai':

View File

@ -0,0 +1,56 @@
import { loggerService } from '@logger'
import { isSupportedModel } from '@renderer/config/models'
import { objectKeys, Provider } from '@renderer/types'
import OpenAI from 'openai'
import { OpenAIAPIClient } from '../openai/OpenAIApiClient'
const logger = loggerService.withContext('OVMSClient')
export class OVMSClient extends OpenAIAPIClient {
constructor(provider: Provider) {
super(provider)
}
override async listModels(): Promise<OpenAI.Models.Model[]> {
try {
const sdk = await this.getSdkInstance()
const chatModelsResponse = await sdk.request({
method: 'get',
path: '../v1/config'
})
logger.debug(`Chat models response: ${JSON.stringify(chatModelsResponse)}`)
// Parse the config response to extract model information
const config = chatModelsResponse as Record<string, any>
const models = objectKeys(config)
.map((modelName) => {
const modelInfo = config[modelName]
// Check if model has at least one version with "AVAILABLE" state
const hasAvailableVersion = modelInfo?.model_version_status?.some(
(versionStatus: any) => versionStatus?.state === 'AVAILABLE'
)
if (hasAvailableVersion) {
return {
id: modelName,
object: 'model' as const,
owned_by: 'ovms',
created: Date.now()
}
}
return null // Skip models without available versions
})
.filter(Boolean) // Remove null entries
logger.debug(`Processed models: ${JSON.stringify(models)}`)
// Filter out unsupported models
return models.filter((model): model is OpenAI.Models.Model => model !== null && isSupportedModel(model))
} catch (error) {
logger.error(`Error listing OVMS models: ${error}`)
return []
}
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.6 KiB

View File

@ -260,6 +260,7 @@ export const SYSTEM_MODELS: Record<SystemProviderId | 'defaultModel', Model[]> =
{ id: 'deepseek-r1', name: 'DeepSeek-R1', provider: 'burncloud', group: 'deepseek-ai' },
{ id: 'deepseek-v3', name: 'DeepSeek-V3', provider: 'burncloud', group: 'deepseek-ai' }
],
ovms: [],
ollama: [],
lmstudio: [],
silicon: [

View File

@ -61,6 +61,7 @@ import ChatGPTImageModelLogo from '@renderer/assets/images/models/gpt_image_1.pn
import ChatGPTo1ModelLogo from '@renderer/assets/images/models/gpt_o1.png'
import GPT5ModelLogo from '@renderer/assets/images/models/gpt-5.png'
import GPT5ChatModelLogo from '@renderer/assets/images/models/gpt-5-chat.png'
import GPT5CodexModelLogo from '@renderer/assets/images/models/gpt-5-codex.png'
import GPT5MiniModelLogo from '@renderer/assets/images/models/gpt-5-mini.png'
import GPT5NanoModelLogo from '@renderer/assets/images/models/gpt-5-nano.png'
import GrokModelLogo from '@renderer/assets/images/models/grok.png'
@ -162,6 +163,7 @@ export function getModelLogo(modelId: string) {
return undefined
}
// key is regex
const logoMap = {
pixtral: isLight ? PixtralModelLogo : PixtralModelLogoDark,
jina: isLight ? JinaModelLogo : JinaModelLogoDark,
@ -177,6 +179,7 @@ export function getModelLogo(modelId: string) {
'gpt-5-mini': GPT5MiniModelLogo,
'gpt-5-nano': GPT5NanoModelLogo,
'gpt-5-chat': GPT5ChatModelLogo,
'gpt-5-codex': GPT5CodexModelLogo,
'gpt-5': GPT5ModelLogo,
gpts: isLight ? ChatGPT4ModelLogo : ChatGPT4ModelLogoDark,
'gpt-oss(?:-[\\w-]+)': isLight ? ChatGptModelLogo : ChatGptModelLogoDark,
@ -286,7 +289,7 @@ export function getModelLogo(modelId: string) {
longcat: LongCatAppLogo,
bytedance: BytedanceModelLogo,
'(V_1|V_1_TURBO|V_2|V_2A|V_2_TURBO|DESCRIBE|UPSCALE)': IdeogramModelLogo
}
} as const
for (const key in logoMap) {
const regex = new RegExp(key, 'i')

View File

@ -22,6 +22,7 @@ export const MODEL_SUPPORTED_REASONING_EFFORT: ReasoningEffortConfig = {
default: ['low', 'medium', 'high'] as const,
o: ['low', 'medium', 'high'] as const,
gpt5: ['minimal', 'low', 'medium', 'high'] as const,
gpt5_codex: ['low', 'medium', 'high'] as const,
grok: ['low', 'high'] as const,
gemini: ['low', 'medium', 'high', 'auto'] as const,
gemini_pro: ['low', 'medium', 'high', 'auto'] as const,
@ -40,6 +41,7 @@ export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
default: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.default] as const,
o: MODEL_SUPPORTED_REASONING_EFFORT.o,
gpt5: [...MODEL_SUPPORTED_REASONING_EFFORT.gpt5] as const,
gpt5_codex: MODEL_SUPPORTED_REASONING_EFFORT.gpt5_codex,
grok: MODEL_SUPPORTED_REASONING_EFFORT.grok,
gemini: ['off', ...MODEL_SUPPORTED_REASONING_EFFORT.gemini] as const,
gemini_pro: MODEL_SUPPORTED_REASONING_EFFORT.gemini_pro,
@ -55,8 +57,13 @@ export const MODEL_SUPPORTED_OPTIONS: ThinkingOptionConfig = {
export const getThinkModelType = (model: Model): ThinkingModelType => {
let thinkingModelType: ThinkingModelType = 'default'
const modelId = getLowerBaseModelName(model.id)
if (isGPT5SeriesModel(model)) {
thinkingModelType = 'gpt5'
if (modelId.includes('codex')) {
thinkingModelType = 'gpt5_codex'
} else {
thinkingModelType = 'gpt5'
}
} else if (isSupportedReasoningEffortOpenAIModel(model)) {
thinkingModelType = 'o'
} else if (isSupportedThinkingTokenGeminiModel(model)) {

View File

@ -24,6 +24,7 @@ import GrokProviderLogo from '@renderer/assets/images/providers/grok.png'
import GroqProviderLogo from '@renderer/assets/images/providers/groq.png'
import HyperbolicProviderLogo from '@renderer/assets/images/providers/hyperbolic.png'
import InfiniProviderLogo from '@renderer/assets/images/providers/infini.png'
import IntelOvmsLogo from '@renderer/assets/images/providers/intel.png'
import JinaProviderLogo from '@renderer/assets/images/providers/jina.png'
import LanyunProviderLogo from '@renderer/assets/images/providers/lanyun.png'
import LMStudioProviderLogo from '@renderer/assets/images/providers/lmstudio.png'
@ -112,6 +113,16 @@ export const SYSTEM_PROVIDERS_CONFIG: Record<SystemProviderId, SystemProvider> =
isSystem: true,
enabled: false
},
ovms: {
id: 'ovms',
name: 'OpenVINO Model Server',
type: 'openai',
apiKey: '',
apiHost: 'http://localhost:8000/v3/',
models: SYSTEM_MODELS.ovms,
isSystem: true,
enabled: false
},
ocoolai: {
id: 'ocoolai',
name: 'ocoolAI',
@ -657,6 +668,7 @@ export const PROVIDER_LOGO_MAP: AtLeast<SystemProviderId, string> = {
yi: ZeroOneProviderLogo,
groq: GroqProviderLogo,
zhipu: ZhipuProviderLogo,
ovms: IntelOvmsLogo,
ollama: OllamaProviderLogo,
lmstudio: LMStudioProviderLogo,
moonshot: MoonshotProviderLogo,
@ -1042,6 +1054,16 @@ export const PROVIDER_URLS: Record<SystemProviderId, ProviderUrls> = {
models: 'https://console.groq.com/docs/models'
}
},
ovms: {
api: {
url: 'http://localhost:8000/v3/'
},
websites: {
official: 'https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/overview.html',
docs: 'https://docs.openvino.ai/2025/model-server/ovms_what_is_openvino_model_server.html',
models: 'https://www.modelscope.cn/organization/OpenVINO'
}
},
ollama: {
api: {
url: 'http://localhost:11434'

View File

@ -67,6 +67,7 @@ const providerKeyMap = {
nvidia: 'provider.nvidia',
o3: 'provider.o3',
ocoolai: 'provider.ocoolai',
ovms: 'provider.ovms',
ollama: 'provider.ollama',
openai: 'provider.openai',
openrouter: 'provider.openrouter',

View File

@ -449,6 +449,7 @@
"added": "Added",
"case_sensitive": "Case Sensitive",
"collapse": "Collapse",
"download": "Download",
"includes_user_questions": "Include Your Questions",
"manage": "Manage",
"select_model": "Select Model",
@ -2049,6 +2050,57 @@
},
"title": "Ollama"
},
"ovms": {
"action": {
"install": "Install",
"installing": "Installing",
"reinstall": "Re-Install",
"run": "Run OVMS",
"starting": "Starting",
"stop": "Stop OVMS",
"stopping": "Stopping"
},
"description": "<div><p>1. Download OV Models.</p><p>2. Add Models in 'Manager'.</p><p>Support Windows Only!</p><p>OVMS Install Path: '%USERPROFILE%\\.cherrystudio\\ovms' .</p><p>Please refer to <a href=https://github.com/openvinotoolkit/model_server/blob/c55551763d02825829337b62c2dcef9339706f79/docs/deploying_server_baremetal.md>Intel OVMS Guide</a></p></dev>",
"download": {
"button": "Download",
"error": "Download Error",
"model_id": {
"label": "Model ID:",
"model_id_pattern": "Model ID must start with OpenVINO/",
"placeholder": "Required e.g. OpenVINO/Qwen3-8B-int4-ov",
"required": "Please enter the model ID"
},
"model_name": {
"label": "Model Name:",
"placeholder": "Required e.g. Qwen3-8B-int4-ov",
"required": "Please enter the model name"
},
"model_source": "Model Source:",
"model_task": "Model Task:",
"success": "Download successful",
"success_desc": "Model \"{{modelName}}\"-\"{{modelId}}\" downloaded successfully, please go to the OVMS management interface to add the model",
"tip": "The model is downloading, sometimes it takes hours. Please be patient...",
"title": "Download Intel OpenVINO Model"
},
"failed": {
"install": "Install OVMS failed:",
"install_code_100": "Unknown Error",
"install_code_101": "Only supports Intel(R) Core(TM) Ultra CPU",
"install_code_102": "Only supports Windows",
"install_code_103": "Download OVMS runtime failed",
"install_code_104": "Uncompress OVMS runtime failed",
"install_code_105": "Clean OVMS runtime failed",
"run": "Run OVMS failed:",
"stop": "Stop OVMS failed:"
},
"status": {
"not_installed": "OVMS is not installed",
"not_running": "OVMS is not running",
"running": "OVMS is running",
"unknown": "OVMS status unknown"
},
"title": "Intel OVMS"
},
"paintings": {
"aspect_ratio": "Aspect Ratio",
"aspect_ratios": {
@ -2280,6 +2332,7 @@
"ollama": "Ollama",
"openai": "OpenAI",
"openrouter": "OpenRouter",
"ovms": "Intel OVMS",
"perplexity": "Perplexity",
"ph8": "PH8",
"poe": "Poe",

View File

@ -449,6 +449,7 @@
"added": "已添加",
"case_sensitive": "区分大小写",
"collapse": "收起",
"download": "下载",
"includes_user_questions": "包含用户提问",
"manage": "管理",
"select_model": "选择模型",
@ -2049,6 +2050,57 @@
},
"title": "Ollama"
},
"ovms": {
"action": {
"install": "安装",
"installing": "正在安装",
"reinstall": "重装",
"run": "运行 OVMS",
"starting": "启动中",
"stop": "停止 OVMS",
"stopping": "停止中"
},
"description": "<div><p>1. 下载 OV 模型.</p><p>2. 在 'Manager' 中添加模型.</p><p>仅支持 Windows!</p><p>OVMS 安装路径: '%USERPROFILE%\\.cherrystudio\\ovms' .</p><p>请参考 <a href=https://github.com/openvinotoolkit/model_server/blob/c55551763d02825829337b62c2dcef9339706f79/docs/deploying_server_baremetal.md>Intel OVMS 指南</a></p></dev>",
"download": {
"button": "下载",
"error": "选择失败",
"model_id": {
"label": "模型 ID",
"model_id_pattern": "模型 ID 必须以 OpenVINO/ 开头",
"placeholder": "必填,例如 OpenVINO/Qwen3-8B-int4-ov",
"required": "请输入模型 ID"
},
"model_name": {
"label": "模型名称",
"placeholder": "必填,例如 Qwen3-8B-int4-ov",
"required": "请输入模型名称"
},
"model_source": "模型来源:",
"model_task": "模型任务:",
"success": "下载成功",
"success_desc": "模型\"{{modelName}}\"-\"{{modelId}}\"下载成功,请前往 OVMS 管理界面添加模型",
"tip": "模型正在下载,有时需要几个小时。请耐心等待...",
"title": "下载 Intel OpenVINO 模型"
},
"failed": {
"install": "安装 OVMS 失败:",
"install_code_100": "未知错误",
"install_code_101": "仅支持 Intel(R) Core(TM) Ultra CPU",
"install_code_102": "仅支持 Windows",
"install_code_103": "下载 OVMS runtime 失败",
"install_code_104": "解压 OVMS runtime 失败",
"install_code_105": "清理 OVMS runtime 失败",
"run": "运行 OVMS 失败:",
"stop": "停止 OVMS 失败:"
},
"status": {
"not_installed": "OVMS 未安装",
"not_running": "OVMS 未运行",
"running": "OVMS 正在运行",
"unknown": "OVMS 状态未知"
},
"title": "Intel OVMS"
},
"paintings": {
"aspect_ratio": "画幅比例",
"aspect_ratios": {
@ -2280,6 +2332,7 @@
"ollama": "Ollama",
"openai": "OpenAI",
"openrouter": "OpenRouter",
"ovms": "Intel OVMS",
"perplexity": "Perplexity",
"ph8": "PH8 大模型开放平台",
"poe": "Poe",

View File

@ -449,6 +449,7 @@
"added": "已新增",
"case_sensitive": "區分大小寫",
"collapse": "折疊",
"download": "下載",
"includes_user_questions": "包含使用者提問",
"manage": "管理",
"select_model": "選擇模型",
@ -2046,6 +2047,57 @@
},
"title": "Ollama"
},
"ovms": {
"action": {
"install": "安裝",
"installing": "正在安裝",
"reinstall": "重新安裝",
"run": "執行 OVMS",
"starting": "啟動中",
"stop": "停止 OVMS",
"stopping": "停止中"
},
"description": "<div><p>1. 下載 OV 模型。</p><p>2. 在 'Manager' 中新增模型。</p><p>僅支援 Windows</p><p>OVMS 安裝路徑: '%USERPROFILE%\\.cherrystudio\\ovms' 。</p><p>請參考 <a href=https://github.com/openvinotoolkit/model_server/blob/c55551763d02825829337b62c2dcef9339706f79/docs/deploying_server_baremetal.md>Intel OVMS 指南</a></p></dev>",
"download": {
"button": "下載",
"error": "選擇失敗",
"model_id": {
"label": "模型 ID",
"model_id_pattern": "模型 ID 必須以 OpenVINO/ 開頭",
"placeholder": "必填,例如 OpenVINO/Qwen3-8B-int4-ov",
"required": "請輸入模型 ID"
},
"model_name": {
"label": "模型名稱",
"placeholder": "必填,例如 Qwen3-8B-int4-ov",
"required": "請輸入模型名稱"
},
"model_source": "模型來源:",
"model_task": "模型任務:",
"success": "下載成功",
"success_desc": "模型\"{{modelName}}\"-\"{{modelId}}\"下載成功,請前往 OVMS 管理界面添加模型",
"tip": "模型正在下載,有時需要幾個小時。請耐心等候...",
"title": "下載 Intel OpenVINO 模型"
},
"failed": {
"install": "安裝 OVMS 失敗:",
"install_code_100": "未知錯誤",
"install_code_101": "僅支援 Intel(R) Core(TM) Ultra CPU",
"install_code_102": "僅支援 Windows",
"install_code_103": "下載 OVMS runtime 失敗",
"install_code_104": "解壓 OVMS runtime 失敗",
"install_code_105": "清理 OVMS runtime 失敗",
"run": "執行 OVMS 失敗:",
"stop": "停止 OVMS 失敗:"
},
"status": {
"not_installed": "OVMS 未安裝",
"not_running": "OVMS 未執行",
"running": "OVMS 正在執行",
"unknown": "OVMS 狀態未知"
},
"title": "Intel OVMS"
},
"paintings": {
"aspect_ratio": "畫幅比例",
"aspect_ratios": {
@ -2277,6 +2329,7 @@
"ollama": "Ollama",
"openai": "OpenAI",
"openrouter": "OpenRouter",
"ovms": "Intel OVMS",
"perplexity": "Perplexity",
"ph8": "PH8 大模型開放平台",
"poe": "Poe",

View File

@ -449,6 +449,7 @@
"added": "προστέθηκε",
"case_sensitive": "Διάκριση πεζών/κεφαλαίων",
"collapse": "συμπεριλάβετε",
"download": "Λήψη",
"includes_user_questions": "Περιλαμβάνει ερωτήσεις χρήστη",
"manage": "χειριστείτε",
"select_model": "επιλογή μοντέλου",
@ -2046,6 +2047,57 @@
},
"title": "Ollama"
},
"ovms": {
"action": {
"install": "Εγκατάσταση",
"installing": "Εγκατάσταση σε εξέλιξη",
"reinstall": "Επανεγκατάσταση",
"run": "Εκτέλεση OVMS",
"starting": "Εκκίνηση σε εξέλιξη",
"stop": "Διακοπή OVMS",
"stopping": "Διακοπή σε εξέλιξη"
},
"description": "<div><p>1. Λήψη μοντέλου OV.</p><p>2. Προσθήκη μοντέλου στο 'Manager'.</p><p>Υποστηρίζεται μόνο στα Windows!</p><p>Διαδρομή εγκατάστασης OVMS: '%USERPROFILE%\\.cherrystudio\\ovms' .</p><p>Ανατρέξτε στον <a href=https://github.com/openvinotoolkit/model_server/blob/c55551763d02825829337b62c2dcef9339706f79/docs/deploying_server_baremetal.md>Οδηγό Intel OVMS</a></p></div>",
"download": {
"button": "Λήψη",
"error": "Η επιλογή απέτυχε",
"model_id": {
"label": "Αναγνωριστικό μοντέλου:",
"model_id_pattern": "Το αναγνωριστικό μοντέλου πρέπει να ξεκινά με OpenVINO/",
"placeholder": "Απαιτείται, π.χ. OpenVINO/Qwen3-8B-int4-ov",
"required": "Παρακαλώ εισάγετε το αναγνωριστικό μοντέλου"
},
"model_name": {
"label": "Όνομα μοντέλου:",
"placeholder": "Απαιτείται, π.χ. Qwen3-8B-int4-ov",
"required": "Παρακαλώ εισάγετε το όνομα του μοντέλου"
},
"model_source": "Πηγή μοντέλου:",
"model_task": "Εργασία μοντέλου:",
"success": "Η λήψη ολοκληρώθηκε με επιτυχία",
"success_desc": "Το μοντέλο \"{{modelName}}\"-\"{{modelId}}\" λήφθηκε επιτυχώς, παρακαλώ μεταβείτε στη διεπαφή διαχείρισης OVMS για να προσθέσετε το μοντέλο",
"tip": "Το μοντέλο κατεβαίνει, μερικές φορές χρειάζονται αρκετές ώρες. Παρακαλώ περιμένετε υπομονετικά...",
"title": "Λήψη μοντέλου Intel OpenVINO"
},
"failed": {
"install": "Η εγκατάσταση του OVMS απέτυχε:",
"install_code_100": "Άγνωστο σφάλμα",
"install_code_101": "Υποστηρίζεται μόνο σε Intel(R) Core(TM) Ultra CPU",
"install_code_102": "Υποστηρίζεται μόνο στα Windows",
"install_code_103": "Η λήψη του OVMS runtime απέτυχε",
"install_code_104": "Η αποσυμπίεση του OVMS runtime απέτυχε",
"install_code_105": "Ο καθαρισμός του OVMS runtime απέτυχε",
"run": "Η εκτέλεση του OVMS απέτυχε:",
"stop": "Η διακοπή του OVMS απέτυχε:"
},
"status": {
"not_installed": "Το OVMS δεν έχει εγκατασταθεί",
"not_running": "Το OVMS δεν εκτελείται",
"running": "Το OVMS εκτελείται",
"unknown": "Άγνωστη κατάσταση OVMS"
},
"title": "Intel OVMS"
},
"paintings": {
"aspect_ratio": "Λόγος διαστάσεων",
"aspect_ratios": {
@ -2277,6 +2329,7 @@
"ollama": "Ollama",
"openai": "OpenAI",
"openrouter": "OpenRouter",
"ovms": "Intel OVMS",
"perplexity": "Perplexity",
"ph8": "Πλατφόρμα Ανοιχτής Μεγάλης Μοντέλου PH8",
"poe": "Poe",

View File

@ -449,6 +449,7 @@
"added": "Agregado",
"case_sensitive": "Distingue mayúsculas y minúsculas",
"collapse": "Colapsar",
"download": "Descargar",
"includes_user_questions": "Incluye preguntas del usuario",
"manage": "Administrar",
"select_model": "Seleccionar Modelo",
@ -2046,6 +2047,57 @@
},
"title": "Ollama"
},
"ovms": {
"action": {
"install": "Instalar",
"installing": "Instalando",
"reinstall": "Reinstalar",
"run": "Ejecutar OVMS",
"starting": "Iniciando",
"stop": "Detener OVMS",
"stopping": "Deteniendo"
},
"description": "<div><p>1. Descargar modelo OV.</p><p>2. Agregar modelo en 'Administrador'.</p><p>¡Solo compatible con Windows!</p><p>Ruta de instalación de OVMS: '%USERPROFILE%\\.cherrystudio\\ovms' .</p><p>Consulte la <a href=https://github.com/openvinotoolkit/model_server/blob/c55551763d02825829337b62c2dcef9339706f79/docs/deploying_server_baremetal.md>Guía de Intel OVMS</a></p></dev>",
"download": {
"button": "Descargar",
"error": "Selección fallida",
"model_id": {
"label": "ID del modelo:",
"model_id_pattern": "El ID del modelo debe comenzar con OpenVINO/",
"placeholder": "Requerido, por ejemplo, OpenVINO/Qwen3-8B-int4-ov",
"required": "Por favor, ingrese el ID del modelo"
},
"model_name": {
"label": "Nombre del modelo:",
"placeholder": "Requerido, por ejemplo, Qwen3-8B-int4-ov",
"required": "Por favor, ingrese el nombre del modelo"
},
"model_source": "Fuente del modelo:",
"model_task": "Tarea del modelo:",
"success": "Descarga exitosa",
"success_desc": "El modelo \"{{modelName}}\"-\"{{modelId}}\" se descargó exitosamente, por favor vaya a la interfaz de administración de OVMS para agregar el modelo",
"tip": "El modelo se está descargando, a veces toma varias horas. Por favor espere pacientemente...",
"title": "Descargar modelo Intel OpenVINO"
},
"failed": {
"install": "Error al instalar OVMS:",
"install_code_100": "Error desconocido",
"install_code_101": "Solo compatible con CPU Intel(R) Core(TM) Ultra",
"install_code_102": "Solo compatible con Windows",
"install_code_103": "Error al descargar el tiempo de ejecución de OVMS",
"install_code_104": "Error al descomprimir el tiempo de ejecución de OVMS",
"install_code_105": "Error al limpiar el tiempo de ejecución de OVMS",
"run": "Error al ejecutar OVMS:",
"stop": "Error al detener OVMS:"
},
"status": {
"not_installed": "OVMS no instalado",
"not_running": "OVMS no está en ejecución",
"running": "OVMS en ejecución",
"unknown": "Estado de OVMS desconocido"
},
"title": "Intel OVMS"
},
"paintings": {
"aspect_ratio": "Relación de aspecto",
"aspect_ratios": {
@ -2277,6 +2329,7 @@
"ollama": "Ollama",
"openai": "OpenAI",
"openrouter": "OpenRouter",
"ovms": "Intel OVMS",
"perplexity": "Perplejidad",
"ph8": "Plataforma Abierta de Grandes Modelos PH8",
"poe": "Poe",

View File

@ -449,6 +449,7 @@
"added": "Ajouté",
"case_sensitive": "Respecter la casse",
"collapse": "Réduire",
"download": "Télécharger",
"includes_user_questions": "Inclure les questions de l'utilisateur",
"manage": "Gérer",
"select_model": "Sélectionner le Modèle",
@ -2046,6 +2047,57 @@
},
"title": "Ollama"
},
"ovms": {
"action": {
"install": "Installer",
"installing": "Installation en cours",
"reinstall": "Réinstaller",
"run": "Exécuter OVMS",
"starting": "Démarrage en cours",
"stop": "Arrêter OVMS",
"stopping": "Arrêt en cours"
},
"description": "<div><p>1. Télécharger le modèle OV.</p><p>2. Ajouter le modèle dans 'Manager'.</p><p>Uniquement compatible avec Windows !</p><p>Chemin d'installation d'OVMS : '%USERPROFILE%\\.cherrystudio\\ovms' .</p><p>Veuillez vous référer au <a href=https://github.com/openvinotoolkit/model_server/blob/c55551763d02825829337b62c2dcef9339706f79/docs/deploying_server_baremetal.md>Guide Intel OVMS</a></p></dev>",
"download": {
"button": "Télécharger",
"error": "Échec de la sélection",
"model_id": {
"label": "ID du modèle :",
"model_id_pattern": "L'ID du modèle doit commencer par OpenVINO/",
"placeholder": "Requis, par exemple OpenVINO/Qwen3-8B-int4-ov",
"required": "Veuillez saisir l'ID du modèle"
},
"model_name": {
"label": "Nom du modèle :",
"placeholder": "Requis, par exemple Qwen3-8B-int4-ov",
"required": "Veuillez saisir le nom du modèle"
},
"model_source": "Source du modèle :",
"model_task": "Tâche du modèle :",
"success": "Téléchargement réussi",
"success_desc": "Le modèle \"{{modelName}}\"-\"{{modelId}}\" a été téléchargé avec succès, veuillez vous rendre à l'interface de gestion OVMS pour ajouter le modèle",
"tip": "Le modèle est en cours de téléchargement, cela peut parfois prendre plusieurs heures. Veuillez patienter...",
"title": "Télécharger le modèle Intel OpenVINO"
},
"failed": {
"install": "Échec de l'installation d'OVMS :",
"install_code_100": "Erreur inconnue",
"install_code_101": "Uniquement compatible avec les processeurs Intel(R) Core(TM) Ultra",
"install_code_102": "Uniquement compatible avec Windows",
"install_code_103": "Échec du téléchargement du runtime OVMS",
"install_code_104": "Échec de la décompression du runtime OVMS",
"install_code_105": "Échec du nettoyage du runtime OVMS",
"run": "Échec de l'exécution d'OVMS :",
"stop": "Échec de l'arrêt d'OVMS :"
},
"status": {
"not_installed": "OVMS non installé",
"not_running": "OVMS n'est pas en cours d'exécution",
"running": "OVMS en cours d'exécution",
"unknown": "État d'OVMS inconnu"
},
"title": "Intel OVMS"
},
"paintings": {
"aspect_ratio": "Format d'image",
"aspect_ratios": {
@ -2277,6 +2329,7 @@
"ollama": "Ollama",
"openai": "OpenAI",
"openrouter": "OpenRouter",
"ovms": "Intel OVMS",
"perplexity": "Perplexité",
"ph8": "Plateforme ouverte de grands modèles PH8",
"poe": "Poe",

View File

@ -449,6 +449,7 @@
"added": "追加済み",
"case_sensitive": "大文字と小文字の区別",
"collapse": "折りたたむ",
"download": "ダウンロード",
"includes_user_questions": "ユーザーからの質問を含む",
"manage": "管理",
"select_model": "モデルを選択",
@ -2046,6 +2047,57 @@
},
"title": "Ollama"
},
"ovms": {
"action": {
"install": "インストール",
"installing": "インストール中",
"reinstall": "再インストール",
"run": "OVMSを実行",
"starting": "起動中",
"stop": "OVMSを停止",
"stopping": "停止中"
},
"description": "<div><p>1. OVモデルをダウンロードします。</p><p>2. 'マネージャー'でモデルを追加します。</p><p>Windowsのみサポート</p><p>OVMSインストールパス: '%USERPROFILE%\\.cherrystudio\\ovms' 。</p><p>詳細は<a href=https://github.com/openvinotoolkit/model_server/blob/c55551763d02825829337b62c2dcef9339706f79/docs/deploying_server_baremetal.md>Intel OVMSガイド</a>をご参照ください。</p></dev>",
"download": {
"button": "ダウンロード",
"error": "ダウンロードエラー",
"model_id": {
"label": "モデルID",
"model_id_pattern": "モデルIDはOpenVINO/で始まる必要があります",
"placeholder": "必須 例: OpenVINO/Qwen3-8B-int4-ov",
"required": "モデルIDを入力してください"
},
"model_name": {
"label": "モデル名",
"placeholder": "必須 例: Qwen3-8B-int4-ov",
"required": "モデル名を入力してください"
},
"model_source": "モデルソース:",
"model_task": "モデルタスク:",
"success": "ダウンロード成功",
"success_desc": "モデル\"{{modelName}}\"-\"{{modelId}}\"ダウンロード成功、OVMS管理インターフェースに移動してモデルを追加してください",
"tip": "モデルはダウンロードされていますが、時には数時間かかります。我慢してください...",
"title": "Intel OpenVINOモデルをダウンロード"
},
"failed": {
"install": "OVMSのインストールに失敗しました:",
"install_code_100": "不明なエラー",
"install_code_101": "Intel(R) Core(TM) Ultra CPUのみサポート",
"install_code_102": "Windowsのみサポート",
"install_code_103": "OVMSランタイムのダウンロードに失敗しました",
"install_code_104": "OVMSランタイムの解凍に失敗しました",
"install_code_105": "OVMSランタイムのクリーンアップに失敗しました",
"run": "OVMSの実行に失敗しました:",
"stop": "OVMSの停止に失敗しました:"
},
"status": {
"not_installed": "OVMSはインストールされていません",
"not_running": "OVMSは実行されていません",
"running": "OVMSは実行中です",
"unknown": "OVMSのステータスが不明です"
},
"title": "Intel OVMS"
},
"paintings": {
"aspect_ratio": "画幅比例",
"aspect_ratios": {
@ -2277,6 +2329,7 @@
"ollama": "Ollama",
"openai": "OpenAI",
"openrouter": "OpenRouter",
"ovms": "Intel OVMS",
"perplexity": "Perplexity",
"ph8": "PH8",
"poe": "Poe",

View File

@ -449,6 +449,7 @@
"added": "Adicionado",
"case_sensitive": "Diferenciar maiúsculas e minúsculas",
"collapse": "Recolher",
"download": "Baixar",
"includes_user_questions": "Incluir perguntas do usuário",
"manage": "Gerenciar",
"select_model": "Selecionar Modelo",
@ -2046,6 +2047,57 @@
},
"title": "Ollama"
},
"ovms": {
"action": {
"install": "Instalar",
"installing": "Instalando",
"reinstall": "Reinstalar",
"run": "Executar OVMS",
"starting": "Iniciando",
"stop": "Parar OVMS",
"stopping": "Parando"
},
"description": "<div><p>1. Baixe o modelo OV.</p><p>2. Adicione o modelo no 'Gerenciador'.</p><p>Compatível apenas com Windows!</p><p>Caminho de instalação do OVMS: '%USERPROFILE%\\.cherrystudio\\ovms' .</p><p>Consulte o <a href=https://github.com/openvinotoolkit/model_server/blob/c55551763d02825829337b62c2dcef9339706f79/docs/deploying_server_baremetal.md>Guia do Intel OVMS</a></p></dev>",
"download": {
"button": "Baixar",
"error": "Falha na seleção",
"model_id": {
"label": "ID do modelo:",
"model_id_pattern": "O ID do modelo deve começar com OpenVINO/",
"placeholder": "Obrigatório, por exemplo, OpenVINO/Qwen3-8B-int4-ov",
"required": "Por favor, insira o ID do modelo"
},
"model_name": {
"label": "Nome do modelo:",
"placeholder": "Obrigatório, por exemplo, Qwen3-8B-int4-ov",
"required": "Por favor, insira o nome do modelo"
},
"model_source": "Fonte do modelo:",
"model_task": "Tarefa do modelo:",
"success": "Download concluído com sucesso",
"success_desc": "O modelo \"{{modelName}}\"-\"{{modelId}}\" foi baixado com sucesso, por favor vá para a interface de gerenciamento OVMS para adicionar o modelo",
"tip": "O modelo está sendo baixado, às vezes leva várias horas. Por favor aguarde pacientemente...",
"title": "Baixar modelo Intel OpenVINO"
},
"failed": {
"install": "Falha na instalação do OVMS:",
"install_code_100": "Erro desconhecido",
"install_code_101": "Compatível apenas com CPU Intel(R) Core(TM) Ultra",
"install_code_102": "Compatível apenas com Windows",
"install_code_103": "Falha ao baixar o tempo de execução do OVMS",
"install_code_104": "Falha ao descompactar o tempo de execução do OVMS",
"install_code_105": "Falha ao limpar o tempo de execução do OVMS",
"run": "Falha ao executar o OVMS:",
"stop": "Falha ao parar o OVMS:"
},
"status": {
"not_installed": "OVMS não instalado",
"not_running": "OVMS não está em execução",
"running": "OVMS em execução",
"unknown": "Status do OVMS desconhecido"
},
"title": "Intel OVMS"
},
"paintings": {
"aspect_ratio": "Proporção da Imagem",
"aspect_ratios": {
@ -2277,6 +2329,7 @@
"ollama": "Ollama",
"openai": "OpenAI",
"openrouter": "OpenRouter",
"ovms": "Intel OVMS",
"perplexity": "Perplexidade",
"ph8": "Plataforma Aberta de Grandes Modelos PH8",
"poe": "Poe",

View File

@ -449,6 +449,7 @@
"added": "Добавлено",
"case_sensitive": "Чувствительность к регистру",
"collapse": "Свернуть",
"download": "Скачать",
"includes_user_questions": "Включает вопросы пользователей",
"manage": "Редактировать",
"select_model": "Выбрать модель",
@ -2046,6 +2047,57 @@
},
"title": "Ollama"
},
"ovms": {
"action": {
"install": "Установить",
"installing": "Установка",
"reinstall": "Переустановить",
"run": "Запустить OVMS",
"starting": "Запуск",
"stop": "Остановить OVMS",
"stopping": "Остановка"
},
"description": "<div><p>1. Загрузите модели OV.</p><p>2. Добавьте модели в 'Менеджер'.</p><p>Поддерживается только Windows!</p><p>Путь установки OVMS: '%USERPROFILE%\\.cherrystudio\\ovms'.</p><p>Пожалуйста, ознакомьтесь с <a href=https://github.com/openvinotoolkit/model_server/blob/c55551763d02825829337b62c2dcef9339706f79/docs/deploying_server_baremetal.md>руководством Intel OVMS</a></p></dev>",
"download": {
"button": "Скачать",
"error": "Ошибка загрузки",
"model_id": {
"label": "ID модели",
"model_id_pattern": "ID модели должен начинаться с OpenVINO/",
"placeholder": "Обязательно, например: OpenVINO/Qwen3-8B-int4-ov",
"required": "Пожалуйста, введите ID модели"
},
"model_name": {
"label": "Название модели:",
"placeholder": "Обязательно, например: Qwen3-8B-int4-ov",
"required": "Пожалуйста, введите название модели"
},
"model_source": "Источник модели:",
"model_task": "Задача модели:",
"success": "Скачивание успешно",
"success_desc": "Модель \"{{modelName}}\"-\"{{modelId}}\" успешно скачана, пожалуйста, перейдите в интерфейс управления OVMS, чтобы добавить модель",
"tip": "Модель загружается, иногда это занимает часы. Пожалуйста, будьте терпеливы...",
"title": "Скачать модель Intel OpenVINO"
},
"failed": {
"install": "Ошибка установки OVMS:",
"install_code_100": "Неизвестная ошибка",
"install_code_101": "Поддерживаются только процессоры Intel(R) Core(TM) Ultra CPU",
"install_code_102": "Поддерживается только Windows",
"install_code_103": "Ошибка загрузки среды выполнения OVMS",
"install_code_104": "Ошибка распаковки среды выполнения OVMS",
"install_code_105": "Ошибка очистки среды выполнения OVMS",
"run": "Ошибка запуска OVMS:",
"stop": "Ошибка остановки OVMS:"
},
"status": {
"not_installed": "OVMS не установлен",
"not_running": "OVMS не запущен",
"running": "OVMS запущен",
"unknown": "Статус OVMS неизвестен"
},
"title": "Intel OVMS"
},
"paintings": {
"aspect_ratio": "Пропорции изображения",
"aspect_ratios": {
@ -2277,6 +2329,7 @@
"ollama": "Ollama",
"openai": "OpenAI",
"openrouter": "OpenRouter",
"ovms": "Intel OVMS",
"perplexity": "Perplexity",
"ph8": "PH8",
"poe": "Poe",

View File

@ -0,0 +1,353 @@
import { loggerService } from '@logger'
import { TopView } from '@renderer/components/TopView'
import { Provider } from '@renderer/types'
import { AutoComplete, Button, Flex, Form, FormProps, Input, Modal, Progress, Select } from 'antd'
import { useState } from 'react'
import { useTranslation } from 'react-i18next'
import { useTimer } from '../../../../hooks/useTimer'
const logger = loggerService.withContext('OVMSClient')
interface ShowParams {
title: string
provider: Provider
}
interface Props extends ShowParams {
resolve: (data: any) => unknown
}
type FieldType = {
modelName: string
modelId: string
modelSource: string
task: string
}
interface PresetModel {
modelId: string
modelName: string
modelSource: string
task: string
label: string
}
const PRESET_MODELS: PresetModel[] = [
{
modelId: 'OpenVINO/Qwen3-8B-int4-ov',
modelName: 'Qwen3-8B-int4-ov',
modelSource: 'https://www.modelscope.cn/models',
task: 'text_generation',
label: 'Qwen3-8B-int4-ov (Text Generation)'
},
{
modelId: 'OpenVINO/bge-base-en-v1.5-fp16-ov',
modelName: 'bge-base-en-v1.5-fp16-ov',
modelSource: 'https://www.modelscope.cn/models',
task: 'embeddings',
label: 'bge-base-en-v1.5-fp16-ov (Embeddings)'
},
{
modelId: 'OpenVINO/bge-reranker-base-fp16-ov',
modelName: 'bge-reranker-base-fp16-ov',
modelSource: 'https://www.modelscope.cn/models',
task: 'rerank',
label: 'bge-reranker-base-fp16-ov (Rerank)'
},
{
modelId: 'OpenVINO/DeepSeek-R1-Distill-Qwen-7B-int4-ov',
modelName: 'DeepSeek-R1-Distill-Qwen-7B-int4-ov',
modelSource: 'https://www.modelscope.cn/models',
task: 'text_generation',
label: 'DeepSeek-R1-Distill-Qwen-7B-int4-ov (Text Generation)'
},
{
modelId: 'OpenVINO/stable-diffusion-v1-5-int8-ov',
modelName: 'stable-diffusion-v1-5-int8-ov',
modelSource: 'https://www.modelscope.cn/models',
task: 'image_generation',
label: 'stable-diffusion-v1-5-int8-ov (Image Generation)'
},
{
modelId: 'OpenVINO/FLUX.1-schnell-int4-ov',
modelName: 'FLUX.1-schnell-int4-ov',
modelSource: 'https://www.modelscope.cn/models',
task: 'image_generation',
label: 'FLUX.1-schnell-int4-ov (Image Generation)'
}
]
const PopupContainer: React.FC<Props> = ({ title, resolve }) => {
const [open, setOpen] = useState(true)
const [loading, setLoading] = useState(false)
const [progress, setProgress] = useState(0)
const [cancelled, setCancelled] = useState(false)
const [form] = Form.useForm()
const { t } = useTranslation()
const { setIntervalTimer, clearIntervalTimer, setTimeoutTimer } = useTimer()
const startFakeProgress = () => {
setProgress(0)
setIntervalTimer(
'progress',
() => {
setProgress((prev) => {
if (prev >= 95) {
return prev // Stop at 95% until actual completion
}
// Simulate realistic download progress with slowing speed
const increment =
prev < 30
? Math.random() * 1 + 0.25
: prev < 60
? Math.random() * 0.5 + 0.125
: Math.random() * 0.25 + 0.03125
return Math.min(prev + increment, 95)
})
},
500
)
}
const stopFakeProgress = (complete = false) => {
clearIntervalTimer('progress')
if (complete) {
setProgress(100)
// Reset progress after a short delay
setTimeoutTimer('progress-reset', () => setProgress(0), 1500)
} else {
setProgress(0)
}
}
const handlePresetSelect = (value: string) => {
const selectedPreset = PRESET_MODELS.find((model) => model.modelId === value)
if (selectedPreset) {
form.setFieldsValue({
modelId: selectedPreset.modelId,
modelName: selectedPreset.modelName,
modelSource: selectedPreset.modelSource,
task: selectedPreset.task
})
}
}
const handleModelIdChange = (value: string) => {
if (value) {
// Extract model name from model ID (part after last '/')
const lastSlashIndex = value.lastIndexOf('/')
if (lastSlashIndex !== -1 && lastSlashIndex < value.length - 1) {
const modelName = value.substring(lastSlashIndex + 1)
form.setFieldValue('modelName', modelName)
}
}
}
const onCancel = async () => {
if (loading) {
// Stop the download
try {
setCancelled(true) // Mark as cancelled by user
logger.info('Stopping download...')
await window.api.ovms.stopAddModel()
stopFakeProgress(false)
setLoading(false)
} catch (error) {
logger.error(`Failed to stop download: ${error}`)
}
return
}
setOpen(false)
}
const onClose = () => {
resolve({})
}
const onFinish: FormProps<FieldType>['onFinish'] = async (values) => {
setLoading(true)
setCancelled(false) // Reset cancelled state
startFakeProgress()
try {
const { modelName, modelId, modelSource, task } = values
logger.info(`🔄 Downloading model: ${modelName} with ID: ${modelId}, source: ${modelSource}, task: ${task}`)
const result = await window.api.ovms.addModel(modelName, modelId, modelSource, task)
if (result.success) {
stopFakeProgress(true) // Complete the progress bar
Modal.success({
title: t('ovms.download.success'),
content: t('ovms.download.success_desc', { modelName: modelName, modelId: modelId }),
onOk: () => {
setOpen(false)
}
})
} else {
stopFakeProgress(false) // Reset progress on error
logger.error(`Download failed, is it cancelled? ${cancelled}`)
// Only show error if not cancelled by user
if (!cancelled) {
Modal.error({
title: t('ovms.download.error'),
content: <div dangerouslySetInnerHTML={{ __html: result.message }}></div>,
onOk: () => {
// Keep the form open for retry
}
})
}
}
} catch (error: any) {
stopFakeProgress(false) // Reset progress on error
logger.error(`Download crashed, is it cancelled? ${cancelled}`)
// Only show error if not cancelled by user
if (!cancelled) {
Modal.error({
title: t('ovms.download.error'),
content: error.message,
onOk: () => {
// Keep the form open for retry
}
})
}
} finally {
setLoading(false)
}
}
return (
<Modal
title={title}
open={open}
onCancel={onCancel}
maskClosable={false}
afterClose={onClose}
footer={null}
transitionName="animation-move-down"
centered
closeIcon={!loading}>
<Form
form={form}
labelCol={{ flex: '110px' }}
labelAlign="left"
colon={false}
style={{ marginTop: 25 }}
onFinish={onFinish}
disabled={false}>
<Form.Item
name="modelId"
label={t('ovms.download.model_id.label')}
rules={[
{ required: true, message: t('ovms.download.model_id.required') },
{
pattern: /^OpenVINO\/.+/,
message: t('ovms.download.model_id.model_id_pattern')
}
]}>
<AutoComplete
placeholder={t('ovms.download.model_id.placeholder')}
options={PRESET_MODELS.map((model) => ({
value: model.modelId,
label: model.label
}))}
onSelect={handlePresetSelect}
onChange={handleModelIdChange}
disabled={loading}
allowClear
/>
</Form.Item>
<Form.Item
name="modelName"
label={t('ovms.download.model_name.label')}
rules={[{ required: true, message: t('ovms.download.model_name.required') }]}>
<Input
placeholder={t('ovms.download.model_name.placeholder')}
spellCheck={false}
maxLength={200}
disabled={loading}
/>
</Form.Item>
<Form.Item
name="modelSource"
label={t('ovms.download.model_source')}
initialValue="https://www.modelscope.cn/models"
rules={[{ required: false }]}>
<Select
options={[
{ value: '', label: 'HuggingFace' },
{ value: 'https://hf-mirror.com', label: 'HF-Mirror' },
{ value: 'https://www.modelscope.cn/models', label: 'ModelScope' }
]}
disabled={loading}
/>
</Form.Item>
<Form.Item
name="task"
label={t('ovms.download.model_task')}
initialValue="text_generation"
rules={[{ required: false }]}>
<Select
options={[
{ value: 'text_generation', label: 'Text Generation' },
{ value: 'embeddings', label: 'Embeddings' },
{ value: 'rerank', label: 'Rerank' },
{ value: 'image_generation', label: 'Image Generation' }
]}
disabled={loading}
/>
</Form.Item>
{loading && (
<Form.Item style={{ marginBottom: 16 }}>
<Progress
percent={Math.round(progress)}
status={progress === 100 ? 'success' : 'active'}
strokeColor={{
'0%': '#108ee9',
'100%': '#87d068'
}}
showInfo={true}
format={(percent) => `${percent}%`}
/>
<div style={{ textAlign: 'center', marginTop: 8, color: '#666', fontSize: '14px' }}>
{t('ovms.download.tip')}
</div>
</Form.Item>
)}
<Form.Item style={{ marginBottom: 8, textAlign: 'center' }}>
<Flex justify="end" align="center" style={{ position: 'relative' }}>
<Button
type="primary"
htmlType={loading ? 'button' : 'submit'}
size="middle"
loading={false}
onClick={loading ? onCancel : undefined}>
{loading ? t('common.cancel') : t('ovms.download.button')}
</Button>
</Flex>
</Form.Item>
</Form>
</Modal>
)
}
export default class DownloadOVMSModelPopup {
static topviewId = 0
static hide() {
TopView.hide('DownloadOVMSModelPopup')
}
static show(props: ShowParams) {
return new Promise<any>((resolve) => {
TopView.show(
<PopupContainer
{...props}
resolve={(v) => {
resolve(v)
this.hide()
}}
/>,
'DownloadOVMSModelPopup'
)
})
}
}

View File

@ -8,6 +8,7 @@ import { getProviderLabel } from '@renderer/i18n/label'
import { SettingHelpLink, SettingHelpText, SettingHelpTextRow, SettingSubtitle } from '@renderer/pages/settings'
import EditModelPopup from '@renderer/pages/settings/ProviderSettings/EditModelPopup/EditModelPopup'
import AddModelPopup from '@renderer/pages/settings/ProviderSettings/ModelList/AddModelPopup'
import DownloadOVMSModelPopup from '@renderer/pages/settings/ProviderSettings/ModelList/DownloadOVMSModelPopup'
import ManageModelsPopup from '@renderer/pages/settings/ProviderSettings/ModelList/ManageModelsPopup'
import NewApiAddModelPopup from '@renderer/pages/settings/ProviderSettings/ModelList/NewApiAddModelPopup'
import { Model } from '@renderer/types'
@ -93,6 +94,11 @@ const ModelList: React.FC<ModelListProps> = ({ providerId }) => {
}
}, [provider, t])
const onDownloadModel = useCallback(
() => DownloadOVMSModelPopup.show({ title: t('ovms.download.title'), provider }),
[provider, t]
)
const isLoading = useMemo(() => displayedModelGroups === null, [displayedModelGroups])
return (
@ -167,9 +173,15 @@ const ModelList: React.FC<ModelListProps> = ({ providerId }) => {
<Button type="primary" onClick={onManageModel} icon={<ListCheck size={16} />} disabled={isHealthChecking}>
{t('button.manage')}
</Button>
<Button type="default" onClick={onAddModel} icon={<Plus size={16} />} disabled={isHealthChecking}>
{t('button.add')}
</Button>
{provider.id !== 'ovms' ? (
<Button type="default" onClick={onAddModel} icon={<Plus size={16} />} disabled={isHealthChecking}>
{t('button.add')}
</Button>
) : (
<Button type="default" onClick={onDownloadModel} icon={<Plus size={16} />}>
{t('button.download')}
</Button>
)}
</Flex>
</>
)

View File

@ -0,0 +1,170 @@
import { VStack } from '@renderer/components/Layout'
import { Alert, Button } from 'antd'
import { FC, useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next'
import { SettingRow, SettingSubtitle } from '..'
const OVMSSettings: FC = () => {
const { t } = useTranslation()
const [ovmsStatus, setOvmsStatus] = useState<'not-installed' | 'not-running' | 'running'>('not-running')
const [isInstallingOvms, setIsInstallingOvms] = useState(false)
const [isRunningOvms, setIsRunningOvms] = useState(false)
const [isStoppingOvms, setIsStoppingOvms] = useState(false)
useEffect(() => {
const checkStatus = async () => {
const status = await window.api.ovms.getStatus()
setOvmsStatus(status)
}
checkStatus()
}, [])
const installOvms = async () => {
try {
setIsInstallingOvms(true)
await window.api.installOvmsBinary()
// 安装成功后重新检查状态
const status = await window.api.ovms.getStatus()
setOvmsStatus(status)
setIsInstallingOvms(false)
} catch (error: any) {
const errCodeMsg = {
'100': t('ovms.failed.install_code_100'),
'101': t('ovms.failed.install_code_101'),
'102': t('ovms.failed.install_code_102'),
'103': t('ovms.failed.install_code_103'),
'104': t('ovms.failed.install_code_104'),
'105': t('ovms.failed.install_code_105')
}
const match = error.message.match(/code (\d+)/)
const code = match ? match[1] : 'unknown'
const errorMsg = errCodeMsg[code as keyof typeof errCodeMsg] || error.message
window.toast.error(t('ovms.failed.install') + errorMsg)
setIsInstallingOvms(false)
}
}
const runOvms = async () => {
try {
setIsRunningOvms(true)
await window.api.ovms.runOvms()
// 运行成功后重新检查状态
const status = await window.api.ovms.getStatus()
setOvmsStatus(status)
setIsRunningOvms(false)
} catch (error: any) {
window.toast.error(t('ovms.failed.run') + error.message)
setIsRunningOvms(false)
}
}
const stopOvms = async () => {
try {
setIsStoppingOvms(true)
await window.api.ovms.stopOvms()
// 停止成功后重新检查状态
const status = await window.api.ovms.getStatus()
setOvmsStatus(status)
setIsStoppingOvms(false)
} catch (error: any) {
window.toast.error(t('ovms.failed.stop') + error.message)
setIsStoppingOvms(false)
}
}
const getAlertType = () => {
switch (ovmsStatus) {
case 'running':
return 'success'
case 'not-running':
return 'warning'
case 'not-installed':
return 'error'
default:
return 'warning'
}
}
const getStatusMessage = () => {
switch (ovmsStatus) {
case 'running':
return t('ovms.status.running')
case 'not-running':
return t('ovms.status.not_running')
case 'not-installed':
return t('ovms.status.not_installed')
default:
return t('ovms.status.unknown')
}
}
return (
<>
<Alert
type={getAlertType()}
banner
style={{ borderRadius: 'var(--list-item-border-radius)' }}
description={
<VStack>
<SettingRow style={{ width: '100%' }}>
<SettingSubtitle style={{ margin: 0, fontWeight: 'normal' }}>{getStatusMessage()}</SettingSubtitle>
{ovmsStatus === 'not-installed' && (
<Button
type="primary"
onClick={installOvms}
loading={isInstallingOvms}
disabled={isInstallingOvms}
size="small">
{isInstallingOvms ? t('ovms.action.installing') : t('ovms.action.install')}
</Button>
)}
{ovmsStatus === 'not-running' && (
<div style={{ display: 'flex', gap: '8px' }}>
<Button
type="primary"
onClick={installOvms}
loading={isInstallingOvms}
disabled={isInstallingOvms || isRunningOvms}
size="small">
{isInstallingOvms ? t('ovms.action.installing') : t('ovms.action.reinstall')}
</Button>
<Button
type="primary"
onClick={runOvms}
loading={isRunningOvms}
disabled={isRunningOvms}
size="small">
{isRunningOvms ? t('ovms.action.starting') : t('ovms.action.run')}
</Button>
</div>
)}
{ovmsStatus === 'running' && (
<Button
type="primary"
danger
onClick={stopOvms}
loading={isStoppingOvms}
disabled={isStoppingOvms}
size="small">
{isStoppingOvms ? t('ovms.action.stopping') : t('ovms.action.stop')}
</Button>
)}
</SettingRow>
</VStack>
}
/>
<Alert
type="info"
style={{ marginTop: 5 }}
message={'Intel OVMS Guide:'}
description={<div dangerouslySetInnerHTML={{ __html: t('ovms.description') }}></div>}
showIcon
/>
</>
)
}
export default OVMSSettings

View File

@ -27,6 +27,8 @@ import UrlSchemaInfoPopup from './UrlSchemaInfoPopup'
const logger = loggerService.withContext('ProviderList')
const BUTTON_WRAPPER_HEIGHT = 50
const systemType = await window.api.system.getDeviceType()
const cpuName = await window.api.system.getCpuName()
const ProviderList: FC = () => {
const [searchParams, setSearchParams] = useSearchParams()
@ -273,6 +275,10 @@ const ProviderList: FC = () => {
}
const filteredProviders = providers.filter((provider) => {
if (provider.id === 'ovms' && (systemType !== 'windows' || !cpuName.toLowerCase().includes('intel'))) {
return false
}
const keywords = searchText.toLowerCase().split(/\s+/).filter(Boolean)
const isProviderMatch = matchKeywordsInProvider(keywords, provider)
const isModelMatch = provider.models.some((model) => matchKeywordsInModel(keywords, model))

View File

@ -47,6 +47,7 @@ import DMXAPISettings from './DMXAPISettings'
import GithubCopilotSettings from './GithubCopilotSettings'
import GPUStackSettings from './GPUStackSettings'
import LMStudioSettings from './LMStudioSettings'
import OVMSSettings from './OVMSSettings'
import ProviderOAuth from './ProviderOAuth'
import SelectProviderModelPopup from './SelectProviderModelPopup'
import VertexAISettings from './VertexAISettings'
@ -324,6 +325,7 @@ const ProviderSetting: FC<Props> = ({ providerId }) => {
<Divider style={{ width: '100%', margin: '10px 0' }} />
{isProviderSupportAuth(provider) && <ProviderOAuth providerId={provider.id} />}
{provider.id === 'openai' && <OpenAIAlert />}
{provider.id === 'ovms' && <OVMSSettings />}
{isDmxapi && <DMXAPISettings providerId={provider.id} />}
{provider.id === 'anthropic' && (
<>

View File

@ -2578,6 +2578,15 @@ const migrateConfig = {
}
},
'159': (state: RootState) => {
try {
addProvider(state, 'ovms')
return state
} catch (error) {
logger.error('migrate 158 error', error as Error)
return state
}
},
'160': (state: RootState) => {
try {
// @ts-ignore
if (state?.agents?.agents) {
@ -2619,11 +2628,9 @@ const migrateConfig = {
break
case 'aihubmix':
provider.anthropicApiHost = 'https://aihubmix.com'
provider.isAnthropicModel = (m: Model) => m.id.includes('claude')
break
}
})
return state
} catch (error) {
logger.error('migrate 159 error', error as Error)

View File

@ -79,6 +79,7 @@ const ThinkModelTypes = [
'default',
'o',
'gpt5',
'gpt5_codex',
'grok',
'gemini',
'gemini_pro',