Merge remote-tracking branch 'origin/main' into migrate/v6

This commit is contained in:
suyao 2026-01-29 15:48:34 +08:00
commit 85b4bbc551
No known key found for this signature in database
267 changed files with 15353 additions and 4230 deletions

View File

@ -90,3 +90,30 @@ jobs:
- name: 📢 Notify if no changes
if: steps.git_status.outputs.has_changes != 'true'
run: echo "Bot script ran, but no changes were detected. No PR created."
- name: Send failure notification to Feishu
if: always() && (failure() || cancelled())
shell: bash
env:
FEISHU_WEBHOOK_URL: ${{ secrets.FEISHU_WEBHOOK_URL }}
FEISHU_WEBHOOK_SECRET: ${{ secrets.FEISHU_WEBHOOK_SECRET }}
RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
JOB_STATUS: ${{ job.status }}
run: |
# Determine status and color
if [ "$JOB_STATUS" = "cancelled" ]; then
STATUS_TEXT="已取消"
COLOR="orange"
else
STATUS_TEXT="失败"
COLOR="red"
fi
# Build description using printf
DESCRIPTION=$(printf "**状态:** %s\n\n**工作流:** [查看详情](%s)" "$STATUS_TEXT" "$RUN_URL")
# Send notification
pnpm tsx scripts/feishu-notify.ts send \
-t "自动国际化${STATUS_TEXT}" \
-d "$DESCRIPTION" \
-c "${COLOR}"

View File

@ -58,14 +58,34 @@ jobs:
with:
node-version: 22
- name: Install pnpm
uses: pnpm/action-setup@v4
- name: Get pnpm store directory
id: pnpm-cache
shell: bash
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_OUTPUT
- name: Cache pnpm dependencies
uses: actions/cache@v4
with:
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-
- name: Install dependencies
if: steps.check_time.outputs.should_delay == 'false'
run: pnpm install
- name: Process issue with Claude
if: steps.check_time.outputs.should_delay == 'false'
uses: anthropics/claude-code-action@main
uses: anthropics/claude-code-action@v1
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
allowed_non_write_users: "*"
anthropic_api_key: ${{ secrets.CLAUDE_TRANSLATOR_APIKEY }}
claude_args: "--allowed-tools Bash(gh issue:*),Bash(node scripts/feishu-notify.js)"
claude_args: "--allowed-tools Bash(gh issue:*),Bash(pnpm tsx scripts/feishu-notify.ts*)"
prompt: |
你是一个GitHub Issue自动化处理助手。请完成以下任务
@ -74,9 +94,14 @@ jobs:
- 标题:${{ github.event.issue.title }}
- 作者:${{ github.event.issue.user.login }}
- URL${{ github.event.issue.html_url }}
- 内容:${{ github.event.issue.body }}
- 标签:${{ join(github.event.issue.labels.*.name, ', ') }}
### Issue body
`````md
${{ github.event.issue.body }}
`````
## 任务步骤
1. **分析并总结issue**
@ -86,20 +111,20 @@ jobs:
- 重要的技术细节
2. **发送飞书通知**
使用以下命令发送飞书通知注意ISSUE_SUMMARY需要用引号包裹
使用CLI工具发送飞书通知参考以下示例
```bash
ISSUE_URL="${{ github.event.issue.html_url }}" \
ISSUE_NUMBER="${{ github.event.issue.number }}" \
ISSUE_TITLE="${{ github.event.issue.title }}" \
ISSUE_AUTHOR="${{ github.event.issue.user.login }}" \
ISSUE_LABELS="${{ join(github.event.issue.labels.*.name, ',') }}" \
ISSUE_SUMMARY="<你生成的中文总结>" \
node scripts/feishu-notify.js
pnpm tsx scripts/feishu-notify.ts issue \
-u "${{ github.event.issue.html_url }}" \
-n "${{ github.event.issue.number }}" \
-t "${{ github.event.issue.title }}" \
-a "${{ github.event.issue.user.login }}" \
-l "${{ join(github.event.issue.labels.*.name, ',') }}" \
-m "<你生成的中文总结>"
```
## 注意事项
- 总结必须使用简体中文
- ISSUE_SUMMARY 在传递给 node 命令时需要正确转义特殊字符
- 命令行参数需要正确转义特殊字符
- 如果issue内容为空也要提供一个简短的说明
请开始执行任务!
@ -125,13 +150,32 @@ jobs:
with:
node-version: 22
- name: Install pnpm
uses: pnpm/action-setup@v4
- name: Get pnpm store directory
id: pnpm-cache
shell: bash
run: echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_OUTPUT
- name: Cache pnpm dependencies
uses: actions/cache@v4
with:
path: ${{ steps.pnpm-cache.outputs.STORE_PATH }}
key: ${{ runner.os }}-pnpm-${{ hashFiles('**/pnpm-lock.yaml') }}
restore-keys: |
${{ runner.os }}-pnpm-
- name: Install dependencies
run: pnpm install
- name: Process pending issues with Claude
uses: anthropics/claude-code-action@main
uses: anthropics/claude-code-action@v1
with:
anthropic_api_key: ${{ secrets.CLAUDE_TRANSLATOR_APIKEY }}
allowed_non_write_users: "*"
github_token: ${{ secrets.GITHUB_TOKEN }}
claude_args: "--allowed-tools Bash(gh issue:*),Bash(gh api:*),Bash(node scripts/feishu-notify.js)"
claude_args: "--allowed-tools Bash(gh issue:*),Bash(gh api:*),Bash(pnpm tsx scripts/feishu-notify.ts*)"
prompt: |
你是一个GitHub Issue自动化处理助手。请完成以下任务
@ -153,15 +197,15 @@ jobs:
- 重要的技术细节
3. **发送飞书通知**
对于每个issue使用以下命令发送飞书通知
使用CLI工具发送飞书通知参考以下示例
```bash
ISSUE_URL="<issue的html_url>" \
ISSUE_NUMBER="<issue编号>" \
ISSUE_TITLE="<issue标题>" \
ISSUE_AUTHOR="<issue作者>" \
ISSUE_LABELS="<逗号分隔的标签列表排除pending-feishu-notification>" \
ISSUE_SUMMARY="<你生成的中文总结>" \
node scripts/feishu-notify.js
pnpm tsx scripts/feishu-notify.ts issue \
-u "<issue的html_url>" \
-n "<issue编号>" \
-t "<issue标题>" \
-a "<issue作者>" \
-l "<逗号分隔的标签列表排除pending-feishu-notification>" \
-m "<你生成的中文总结>"
```
4. **移除标签**

View File

@ -59,5 +59,8 @@ jobs:
- name: i18n Check
run: pnpm i18n:check
- name: Hardcoded Strings Check
run: pnpm i18n:hardcoded:strict
- name: Test
run: pnpm test

View File

@ -7,6 +7,16 @@ on:
description: "Release tag (e.g. v1.0.0)"
required: true
default: "v1.0.0"
platform:
description: "Build platform"
required: true
default: "all"
type: choice
options:
- all
- windows
- mac
- linux
push:
tags:
- v*.*.*
@ -20,7 +30,14 @@ jobs:
strategy:
matrix:
os: [macos-latest, windows-latest, ubuntu-latest]
os: ${{ fromJSON(
github.event_name == 'push' && '["macos-latest", "windows-latest", "ubuntu-latest"]' ||
github.event.inputs.platform == 'all' && '["macos-latest", "windows-latest", "ubuntu-latest"]' ||
github.event.inputs.platform == 'linux' && '["ubuntu-latest"]' ||
github.event.inputs.platform == 'windows' && '["windows-latest"]' ||
github.event.inputs.platform == 'mac' && '["macos-latest"]' ||
'["macos-latest", "windows-latest", "ubuntu-latest"]'
) }}
fail-fast: false
steps:

View File

@ -79,7 +79,7 @@ jobs:
shell: bash
run: |
echo "Built Windows artifacts:"
ls -la dist/*.exe dist/*.blockmap dist/latest*.yml
ls -la dist/*.exe dist/latest*.yml
- name: Download GitHub release assets
shell: bash
@ -112,12 +112,10 @@ jobs:
fi
# Remove unsigned Windows files from downloaded assets
# *.exe, *.exe.blockmap, latest.yml (Windows only)
rm -f release-assets/*.exe release-assets/*.exe.blockmap release-assets/latest.yml 2>/dev/null || true
rm -f release-assets/*.exe release-assets/latest.yml 2>/dev/null || true
# Copy signed Windows files with error checking
cp dist/*.exe release-assets/ || { echo "ERROR: Failed to copy .exe files"; exit 1; }
cp dist/*.exe.blockmap release-assets/ || { echo "ERROR: Failed to copy .blockmap files"; exit 1; }
cp dist/latest.yml release-assets/ || { echo "ERROR: Failed to copy latest.yml"; exit 1; }
echo "Final release assets:"
@ -302,3 +300,31 @@ jobs:
run: |
rm -f /tmp/release_payload.json /tmp/upload_headers.txt release_body.txt
rm -rf release-assets/
- name: Send failure notification to Feishu
if: always() && (failure() || cancelled())
shell: bash
env:
FEISHU_WEBHOOK_URL: ${{ secrets.FEISHU_WEBHOOK_URL }}
FEISHU_WEBHOOK_SECRET: ${{ secrets.FEISHU_WEBHOOK_SECRET }}
TAG_NAME: ${{ steps.get-tag.outputs.tag }}
RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
JOB_STATUS: ${{ job.status }}
run: |
# Determine status and color
if [ "$JOB_STATUS" = "cancelled" ]; then
STATUS_TEXT="已取消"
COLOR="orange"
else
STATUS_TEXT="失败"
COLOR="red"
fi
# Build description using printf
DESCRIPTION=$(printf "**标签:** %s\n\n**状态:** %s\n\n**工作流:** [查看详情](%s)" "$TAG_NAME" "$STATUS_TEXT" "$RUN_URL")
# Send notification
pnpm tsx scripts/feishu-notify.ts send \
-t "GitCode 同步${STATUS_TEXT}" \
-d "$DESCRIPTION" \
-c "${COLOR}"

View File

@ -154,9 +154,10 @@ jobs:
with:
node-version: 22
- name: Install pnpm
- name: Enable corepack
if: steps.check.outputs.should_run == 'true'
uses: pnpm/action-setup@v4
working-directory: main
run: corepack enable pnpm
- name: Install dependencies
if: steps.check.outputs.should_run == 'true'

View File

@ -1 +0,0 @@
pnpm lint-staged

23
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,23 @@
repos:
- repo: local
hooks:
- id: biome-format-js
name: Biome format (JS/TS)
language: system
entry: pnpm biome format --write --no-errors-on-unmatched
files: '\.(js|jsx|ts|tsx|cjs|mjs|cts|mts)$'
pass_filenames: true
- id: eslint-fix
name: ESLint fix
language: system
entry: pnpm eslint --fix
files: '\.(js|jsx|ts|tsx|cjs|mjs|cts|mts)$'
pass_filenames: true
- id: biome-format-other
name: Biome format (JSON/YAML/CSS/HTML)
language: system
entry: pnpm biome format --write --no-errors-on-unmatched
files: '\.(json|yml|yaml|css|html)$'
pass_filenames: true

View File

@ -0,0 +1,155 @@
# Feishu Notification Script
`scripts/feishu-notify.ts` is a CLI tool for sending notifications to Feishu (Lark) Webhook. This script is primarily used in GitHub Actions workflows to enable automatic notifications.
## Features
- Subcommand-based CLI structure for different notification types
- HMAC-SHA256 signature verification
- Sends Feishu interactive card messages
- Full TypeScript type support
- Credentials via environment variables for security
## Usage
### Prerequisites
```bash
pnpm install
```
### CLI Structure
```bash
pnpm tsx scripts/feishu-notify.ts [command] [options]
```
### Environment Variables (Required)
| Variable | Description |
|----------|-------------|
| `FEISHU_WEBHOOK_URL` | Feishu Webhook URL |
| `FEISHU_WEBHOOK_SECRET` | Feishu Webhook signing secret |
## Commands
### `send` - Send Simple Notification
Send a generic notification without business-specific logic.
```bash
pnpm tsx scripts/feishu-notify.ts send [options]
```
| Option | Short | Description | Required |
|--------|-------|-------------|----------|
| `--title` | `-t` | Card title | Yes |
| `--description` | `-d` | Card description (supports markdown) | Yes |
| `--color` | `-c` | Header color template | No (default: turquoise) |
**Available colors:** `blue`, `wathet`, `turquoise`, `green`, `yellow`, `orange`, `red`, `carmine`, `violet`, `purple`, `indigo`, `grey`, `default`
#### Example
```bash
# Use $'...' syntax for proper newlines
pnpm tsx scripts/feishu-notify.ts send \
-t "Deployment Completed" \
-d $'**Status:** Success\n\n**Environment:** Production\n\n**Version:** v1.2.3' \
-c green
```
```bash
# Send an error alert (red color)
pnpm tsx scripts/feishu-notify.ts send \
-t "Error Alert" \
-d $'**Error Type:** Connection failed\n\n**Severity:** High\n\nPlease check the system status' \
-c red
```
**Note:** For proper newlines in the description, use bash's `$'...'` syntax. Do not use literal `\n` in double quotes, as it will be displayed as-is in the Feishu card.
### `issue` - Send GitHub Issue Notification
```bash
pnpm tsx scripts/feishu-notify.ts issue [options]
```
| Option | Short | Description | Required |
|--------|-------|-------------|----------|
| `--url` | `-u` | GitHub issue URL | Yes |
| `--number` | `-n` | Issue number | Yes |
| `--title` | `-t` | Issue title | Yes |
| `--summary` | `-m` | Issue summary | Yes |
| `--author` | `-a` | Issue author | No (default: "Unknown") |
| `--labels` | `-l` | Issue labels (comma-separated) | No |
#### Example
```bash
pnpm tsx scripts/feishu-notify.ts issue \
-u "https://github.com/owner/repo/issues/123" \
-n "123" \
-t "Bug: Something is broken" \
-m "This is a bug report about a feature" \
-a "username" \
-l "bug,high-priority"
```
## Usage in GitHub Actions
This script is primarily used in `.github/workflows/github-issue-tracker.yml`:
```yaml
- name: Install dependencies
run: pnpm install
- name: Send notification
run: |
pnpm tsx scripts/feishu-notify.ts issue \
-u "${{ github.event.issue.html_url }}" \
-n "${{ github.event.issue.number }}" \
-t "${{ github.event.issue.title }}" \
-a "${{ github.event.issue.user.login }}" \
-l "${{ join(github.event.issue.labels.*.name, ',') }}" \
-m "Issue summary content"
env:
FEISHU_WEBHOOK_URL: ${{ secrets.FEISHU_WEBHOOK_URL }}
FEISHU_WEBHOOK_SECRET: ${{ secrets.FEISHU_WEBHOOK_SECRET }}
```
## Feishu Card Message Format
The `issue` command sends an interactive card containing:
- **Header**: `#<issue_number> - <issue_title>`
- **Author**: Issue creator
- **Labels**: Issue labels (if any)
- **Summary**: Issue content summary
- **Action Button**: "View Issue" button linking to the GitHub Issue page
## Configuring Feishu Webhook
1. Add a custom bot to your Feishu group
2. Obtain the Webhook URL and signing secret
3. Configure them in GitHub Secrets:
- `FEISHU_WEBHOOK_URL`: Webhook address
- `FEISHU_WEBHOOK_SECRET`: Signing secret
## Error Handling
The script exits with a non-zero code when:
- Required environment variables are missing (`FEISHU_WEBHOOK_URL`, `FEISHU_WEBHOOK_SECRET`)
- Required command options are missing
- Feishu API returns a non-2xx status code
- Network request fails
## Extending with New Commands
The CLI is designed to support multiple notification types. To add a new command:
1. Define the command options interface
2. Create a card builder function
3. Add a new command handler
4. Register the command with `program.command()`

View File

@ -0,0 +1,155 @@
# 飞书通知脚本
`scripts/feishu-notify.ts` 是一个 CLI 工具,用于向飞书 Webhook 发送通知。该脚本主要在 GitHub Actions 工作流中使用,实现自动通知功能。
## 功能特性
- 基于子命令的 CLI 结构,支持不同类型的通知
- 使用 HMAC-SHA256 签名验证
- 发送飞书交互式卡片消息
- 完整的 TypeScript 类型支持
- 通过环境变量传递凭证,确保安全性
## 使用方式
### 前置依赖
```bash
pnpm install
```
### CLI 结构
```bash
pnpm tsx scripts/feishu-notify.ts [command] [options]
```
### 环境变量(必需)
| 变量 | 说明 |
|------|------|
| `FEISHU_WEBHOOK_URL` | 飞书 Webhook URL |
| `FEISHU_WEBHOOK_SECRET` | 飞书 Webhook 签名密钥 |
## 命令
### `send` - 发送简单通知
发送通用通知,不涉及具体业务逻辑。
```bash
pnpm tsx scripts/feishu-notify.ts send [options]
```
| 参数 | 短选项 | 说明 | 必需 |
|------|--------|------|------|
| `--title` | `-t` | 卡片标题 | 是 |
| `--description` | `-d` | 卡片描述(支持 markdown | 是 |
| `--color` | `-c` | 标题栏颜色模板 | 否默认turquoise |
**可用颜色:** `blue`(蓝色), `wathet`(浅蓝), `turquoise`(青绿), `green`(绿色), `yellow`(黄色), `orange`(橙色), `red`(红色), `carmine`(深红), `violet`(紫罗兰), `purple`(紫色), `indigo`(靛蓝), `grey`(灰色), `default`(默认)
#### 示例
```bash
# 使用 $'...' 语法实现正确的换行
pnpm tsx scripts/feishu-notify.ts send \
-t "部署完成" \
-d $'**状态:** 成功\n\n**环境:** 生产环境\n\n**版本:** v1.2.3' \
-c green
```
```bash
# 发送错误警报(红色)
pnpm tsx scripts/feishu-notify.ts send \
-t "错误警报" \
-d $'**错误类型:** 连接失败\n\n**严重程度:** 高\n\n请及时检查系统状态' \
-c red
```
**注意:** 如需在描述中换行,请使用 bash 的 `$'...'` 语法。不要在双引号中使用字面量 `\n`,否则会原样显示在飞书卡片中。
### `issue` - 发送 GitHub Issue 通知
```bash
pnpm tsx scripts/feishu-notify.ts issue [options]
```
| 参数 | 短选项 | 说明 | 必需 |
|------|--------|------|------|
| `--url` | `-u` | GitHub Issue URL | 是 |
| `--number` | `-n` | Issue 编号 | 是 |
| `--title` | `-t` | Issue 标题 | 是 |
| `--summary` | `-m` | Issue 摘要 | 是 |
| `--author` | `-a` | Issue 作者 | 否(默认:"Unknown" |
| `--labels` | `-l` | Issue 标签(逗号分隔) | 否 |
#### 示例
```bash
pnpm tsx scripts/feishu-notify.ts issue \
-u "https://github.com/owner/repo/issues/123" \
-n "123" \
-t "Bug: Something is broken" \
-m "这是一个关于某功能的 bug 报告" \
-a "username" \
-l "bug,high-priority"
```
## 在 GitHub Actions 中使用
该脚本主要在 `.github/workflows/github-issue-tracker.yml` 工作流中使用:
```yaml
- name: Install dependencies
run: pnpm install
- name: Send notification
run: |
pnpm tsx scripts/feishu-notify.ts issue \
-u "${{ github.event.issue.html_url }}" \
-n "${{ github.event.issue.number }}" \
-t "${{ github.event.issue.title }}" \
-a "${{ github.event.issue.user.login }}" \
-l "${{ join(github.event.issue.labels.*.name, ',') }}" \
-m "Issue 摘要内容"
env:
FEISHU_WEBHOOK_URL: ${{ secrets.FEISHU_WEBHOOK_URL }}
FEISHU_WEBHOOK_SECRET: ${{ secrets.FEISHU_WEBHOOK_SECRET }}
```
## 飞书卡片消息格式
`issue` 命令发送的交互式卡片包含以下内容:
- **标题**: `#<issue编号> - <issue标题>`
- **作者**: Issue 创建者
- **标签**: Issue 标签列表(如有)
- **摘要**: Issue 内容摘要
- **操作按钮**: "View Issue" 按钮,点击跳转到 GitHub Issue 页面
## 配置飞书 Webhook
1. 在飞书群组中添加自定义机器人
2. 获取 Webhook URL 和签名密钥
3. 将 URL 和密钥配置到 GitHub Secrets
- `FEISHU_WEBHOOK_URL`: Webhook 地址
- `FEISHU_WEBHOOK_SECRET`: 签名密钥
## 错误处理
脚本在以下情况会返回非零退出码:
- 缺少必需的环境变量(`FEISHU_WEBHOOK_URL`、`FEISHU_WEBHOOK_SECRET`
- 缺少必需的命令参数
- 飞书 API 返回非 2xx 状态码
- 网络请求失败
## 扩展新命令
CLI 设计支持多种通知类型。添加新命令的步骤:
1. 定义命令选项接口
2. 创建卡片构建函数
3. 添加新的命令处理函数
4. 使用 `program.command()` 注册命令

View File

@ -96,6 +96,7 @@ nsis:
oneClick: false
include: build/nsis-installer.nsh
buildUniversalInstaller: false
differentialPackage: false
portable:
artifactName: ${productName}-${version}-${arch}-portable.${ext}
buildUniversalInstaller: false
@ -111,8 +112,11 @@ mac:
target:
- target: dmg
- target: zip
dmg:
writeUpdateInfo: false
linux:
artifactName: ${productName}-${version}-${arch}.${ext}
executableName: CherryStudio
target:
- target: AppImage
- target: deb
@ -121,6 +125,7 @@ linux:
category: Utility
desktop:
entry:
Name: Cherry Studio
StartupWMClass: CherryStudio
mimeTypes:
- x-scheme-handler/cherrystudio
@ -140,44 +145,60 @@ artifactBuildCompleted: scripts/artifact-build-completed.js
releaseInfo:
releaseNotes: |
<!--LANG:en-->
Cherry Studio 1.7.9 - New Features & Bug Fixes
Cherry Studio 1.7.14 - New Features & Improvements
✨ New Features
- [Agent] Add 302.AI provider support
- [Browser] Browser data now persists and supports multiple tabs
- [Language] Add Romanian language support
- [Search] Add fuzzy search for file list
- [Models] Add latest Zhipu models
- [Image] Improve text-to-image functionality
- [Notes] Add export to Word document functionality
- [Code Tools] Add Kimi CLI support with auto-configuration
- [Code Tools] Support custom providers
- [Settings] Support viewing detailed error messages when model detection fails
- [Topics] Display year in topic timestamps (YYYY/MM/DD format)
- [Linux] Add system title bar setting option
- [Models] Add Baichuan m3/m3-plus models
- [Models] Add Qwen text-embedding models
🎨 Improvements
- [Translate] Simplify translation with single target language selector
- [Topics] Unpinned topics now move to top with auto-scroll
- [Minapps] Add locale-based filtering support
- [i18n] Update Romanian localization
🐛 Bug Fixes
- [Mac] Fix mini window unexpected closing issue
- [Preview] Fix HTML preview controls not working in fullscreen
- [Translate] Fix translation duplicate execution issue
- [Zoom] Fix page zoom reset issue during navigation
- [Agent] Fix crash when switching between agent and assistant
- [Agent] Fix navigation in agent mode
- [Copy] Fix markdown copy button issue
- [Windows] Fix compatibility issues on non-Windows systems
- [Linux] Fix icon display and deb/rpm installation issues
- [Linux] Fix window not coming to front when clicking tray
- [Linux] Add Alpine Linux (musl) support
- [Code Tools] Fix Windows Terminal issues
- [Azure] Fix API preview link for completion mode
- [Images] Fix trailing slashes in API URLs for image generation
- [OpenRouter] Fix MCP tools support
- [Chat] Fix image enhancement model conversation history
<!--LANG:zh-CN-->
Cherry Studio 1.7.9 - 新功能与问题修复
Cherry Studio 1.7.14 - 新功能与改进
✨ 新功能
- [Agent] 新增 302.AI 服务商支持
- [浏览器] 浏览器数据现在可以保存,支持多标签页
- [语言] 新增罗马尼亚语支持
- [搜索] 文件列表新增模糊搜索功能
- [模型] 新增最新智谱模型
- [图片] 优化文生图功能
- [笔记] 支持导出为 Word 文档
- [代码工具] 新增 Kimi CLI 支持,自动配置环境
- [代码工具] 支持自定义服务商
- [设置] 模型检测失败时可查看详细错误信息
- [话题] 时间戳显示年份YYYY/MM/DD 格式)
- [Linux] 新增系统标题栏设置选项
- [模型] 新增百川 m3/m3-plus 模型
- [模型] 新增通义 Embedding 模型
🎨 改进
- [翻译] 简化翻译操作,使用单一目标语言选择
- [话题] 取消置顶的话题移动到顶部并自动滚动
- [小程序] 支持按语言筛选小程序
- [国际化] 更新罗马尼亚语翻译
🐛 问题修复
- [Mac] 修复迷你窗口意外关闭的问题
- [预览] 修复全屏模式下 HTML 预览控件无法使用的问题
- [翻译] 修复翻译重复执行的问题
- [缩放] 修复页面导航时缩放被重置的问题
- [智能体] 修复在智能体和助手间切换时崩溃的问题
- [智能体] 修复智能体模式下的导航问题
- [复制] 修复 Markdown 复制按钮问题
- [兼容性] 修复非 Windows 系统的兼容性问题
- [Linux] 修复图标显示和 deb/rpm 安装问题
- [Linux] 修复点击托盘后窗口无法置顶的问题
- [Linux] 新增 Alpine Linux (musl) 支持
- [代码工具] 修复 Windows 终端问题
- [Azure] 修复完成模式下 API 预览链接
- [图片生成] 修复 API 地址结尾斜杠问题
- [OpenRouter] 修复 MCP 工具支持
- [对话] 修复图片增强模型对话历史丢失问题
<!--LANG:END-->

View File

@ -84,7 +84,7 @@ export default defineConfig([
{
selector: 'CallExpression[callee.object.name="console"]',
message:
'❗CherryStudio uses unified LoggerService: 📖 docs/technical/how-to-use-logger-en.md\n❗CherryStudio 使用统一的日志服务:📖 docs/technical/how-to-use-logger-zh.md\n\n'
'❗CherryStudio uses unified LoggerService: 📖 docs/en/guides/logging.md\n❗CherryStudio 使用统一的日志服务:📖 docs/zh/guides/logging.md\n\n'
}
]
}

View File

@ -1,8 +1,9 @@
{
"name": "CherryStudio",
"version": "1.7.9",
"version": "1.7.15",
"private": true,
"description": "A powerful AI assistant for producer.",
"desktopName": "CherryStudio.desktop",
"main": "./out/main/index.js",
"author": "support@cherry-ai.com",
"homepage": "https://github.com/CherryHQ/cherry-studio",
@ -33,16 +34,17 @@
"agents:push": "NODE_ENV='development' drizzle-kit push --config src/main/services/agents/drizzle.config.ts",
"agents:studio": "NODE_ENV='development' drizzle-kit studio --config src/main/services/agents/drizzle.config.ts",
"agents:drop": "NODE_ENV='development' drizzle-kit drop --config src/main/services/agents/drizzle.config.ts",
"generate:icons": "electron-icon-builder --input=./build/logo.png --output=build",
"analyze:renderer": "VISUALIZER_RENDERER=true pnpm build",
"analyze:main": "VISUALIZER_MAIN=true pnpm build",
"typecheck": "concurrently -n \"node,web\" -c \"cyan,magenta\" \"npm run typecheck:node\" \"npm run typecheck:web\"",
"typecheck:node": "tsgo --noEmit -p tsconfig.node.json --composite false",
"typecheck:web": "tsgo --noEmit -p tsconfig.web.json --composite false",
"i18n:check": "dotenv -e .env -- tsx scripts/check-i18n.ts",
"i18n:hardcoded": "tsx scripts/check-hardcoded-strings.ts",
"i18n:hardcoded:strict": "I18N_STRICT=true tsx scripts/check-hardcoded-strings.ts",
"i18n:sync": "dotenv -e .env -- tsx scripts/sync-i18n.ts",
"i18n:translate": "dotenv -e .env -- tsx scripts/auto-translate-i18n.ts",
"i18n:all": "pnpm i18n:check && pnpm i18n:sync && pnpm i18n:translate",
"i18n:all": "pnpm i18n:sync && pnpm i18n:translate",
"update:languages": "tsx scripts/update-languages.ts",
"update:upgrade-config": "tsx scripts/update-app-upgrade-config.ts",
"test": "vitest run --silent",
@ -59,7 +61,7 @@
"lint": "oxlint --fix && eslint . --ext .js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix --cache && pnpm typecheck && pnpm i18n:check && pnpm format:check",
"format": "biome format --write && biome lint --write",
"format:check": "biome format && biome lint",
"prepare": "git config blame.ignoreRevsFile .git-blame-ignore-revs && husky",
"prepare": "git config blame.ignoreRevsFile .git-blame-ignore-revs && prek install",
"claude": "dotenv -e .env -- claude",
"release:aicore:alpha": "pnpm --filter @cherrystudio/ai-core version prerelease --preid alpha && pnpm --filter @cherrystudio/ai-core build && pnpm --filter @cherrystudio/ai-core publish --tag alpha --access public",
"release:aicore:beta": "pnpm --filter @cherrystudio/ai-core version prerelease --preid beta && pnpm --filter @cherrystudio/ai-core build && pnpm --filter @cherrystudio/ai-core publish --tag beta --access public",
@ -67,7 +69,7 @@
"release:ai-sdk-provider": "pnpm --filter @cherrystudio/ai-sdk-provider version patch && pnpm --filter @cherrystudio/ai-sdk-provider build && pnpm --filter @cherrystudio/ai-sdk-provider publish --access public"
},
"dependencies": {
"@anthropic-ai/claude-agent-sdk": "0.1.62",
"@anthropic-ai/claude-agent-sdk": "0.1.76",
"@libsql/client": "0.14.0",
"@napi-rs/system-ocr": "1.0.2",
"@paymoapp/electron-shutdown-handler": "1.1.2",
@ -87,9 +89,6 @@
"turndown": "7.2.0"
},
"devDependencies": {
"js-yaml": "4.1.0",
"bonjour-service": "1.3.0",
"emoji-picker-element-data": "1",
"@agentic/exa": "^7.3.3",
"@agentic/searxng": "^7.3.3",
"@agentic/tavily": "^7.3.3",
@ -146,6 +145,7 @@
"@floating-ui/dom": "1.7.3",
"@google/genai": "1.0.1",
"@hello-pangea/dnd": "^18.0.1",
"@j178/prek": "^0.2.28",
"@kangfenmao/keyv-storage": "^0.1.3",
"@langchain/community": "^1.0.0",
"@langchain/core": "1.0.2",
@ -247,6 +247,7 @@
"archiver": "^7.0.1",
"async-mutex": "^0.5.0",
"axios": "^1.7.3",
"bonjour-service": "1.3.0",
"browser-image-compression": "^2.0.2",
"builder-util-runtime": "9.5.0",
"chalk": "4.1.2",
@ -260,6 +261,7 @@
"code-inspector-plugin": "^0.20.14",
"codemirror-lang-mermaid": "0.5.0",
"color": "^5.0.0",
"commander": "^14.0.2",
"concurrently": "^9.2.1",
"cors": "2.8.5",
"country-flag-emoji-polyfill": "0.1.8",
@ -283,6 +285,7 @@
"electron-window-state": "^5.0.3",
"emittery": "^1.0.3",
"emoji-picker-element": "^1.22.1",
"emoji-picker-element-data": "1",
"epub": "1.3.0",
"eslint": "^9.22.0",
"eslint-plugin-import-zod": "^1.2.0",
@ -304,7 +307,6 @@
"html-to-image": "^1.11.13",
"html-to-text": "^9.0.5",
"htmlparser2": "^10.0.0",
"husky": "^9.1.7",
"i18next": "^23.11.5",
"iconv-lite": "^0.6.3",
"ipaddr.js": "^2.2.0",
@ -312,11 +314,11 @@
"jaison": "^2.0.2",
"jest-styled-components": "^7.2.0",
"js-base64": "3.7.7",
"js-yaml": "4.1.0",
"json-schema": "0.4.0",
"katex": "0.16.22",
"ky": "1.8.1",
"linguist-languages": "^8.1.0",
"lint-staged": "^15.5.0",
"lodash": "^4.17.21",
"lru-cache": "^11.1.0",
"lucide-react": "^0.525.0",
@ -336,6 +338,7 @@
"oxlint-tsgolint": "^0.2.0",
"p-queue": "^8.1.0",
"pako": "1.0.11",
"partial-json": "0.1.7",
"pdf-lib": "^1.17.1",
"pdf-parse": "^1.1.1",
"prosemirror-model": "1.25.2",
@ -385,6 +388,7 @@
"tar": "^7.4.3",
"tiny-pinyin": "^1.3.2",
"tokenx": "^1.1.0",
"ts-morph": "^27.0.2",
"tsx": "^4.20.3",
"turndown-plugin-gfm": "^1.0.2",
"tw-animate-css": "^1.3.8",
@ -425,10 +429,10 @@
"@img/sharp-linux-x64": "0.34.3",
"@img/sharp-win32-x64": "0.34.3",
"@langchain/core": "1.0.2",
"@ai-sdk/openai-compatible@1.0.27": "1.0.28"
"@ai-sdk/openai-compatible@1.0.27": "1.0.28",
"@ai-sdk/openai-compatible@1.0.30": "1.0.28"
},
"patchedDependencies": {
"@anthropic-ai/claude-agent-sdk@0.1.62": "patches/@anthropic-ai-claude-agent-sdk-npm-0.1.62-23ae56f8c8.patch",
"@napi-rs/system-ocr@1.0.2": "patches/@napi-rs-system-ocr-npm-1.0.2-59e7a78e8b.patch",
"tesseract.js@6.0.1": "patches/tesseract.js-npm-6.0.1-2562a7e46d.patch",
"@ai-sdk/google@3.0.0": "patches/@ai-sdk-google-npm-3.0.0-ef668576ff.patch",
@ -446,9 +450,13 @@
"atomically@1.7.0": "patches/atomically-npm-1.7.0-e742e5293b.patch",
"file-stream-rotator@0.6.1": "patches/file-stream-rotator-npm-0.6.1-eab45fb13d.patch",
"libsql@0.4.7": "patches/libsql-npm-0.4.7-444e260fb1.patch",
"pdf-parse@1.1.1": "patches/pdf-parse-npm-1.1.1-04a6109b2a.patch"
"pdf-parse@1.1.1": "patches/pdf-parse-npm-1.1.1-04a6109b2a.patch",
"@ai-sdk/openai-compatible@1.0.28": "patches/@ai-sdk__openai-compatible@1.0.28.patch",
"@anthropic-ai/claude-agent-sdk@0.1.76": "patches/@anthropic-ai__claude-agent-sdk@0.1.76.patch",
"@openrouter/ai-sdk-provider": "patches/@openrouter__ai-sdk-provider.patch"
},
"onlyBuiltDependencies": [
"@j178/prek",
"@kangfenmao/keyv-storage",
"@paymoapp/electron-shutdown-handler",
"@scarf/scarf",
@ -466,13 +474,32 @@
]
},
"packageManager": "pnpm@10.27.0",
"lint-staged": {
"*.{js,jsx,ts,tsx,cjs,mjs,cts,mts}": [
"biome format --write --no-errors-on-unmatched",
"eslint --fix"
],
"*.{json,yml,yaml,css,html}": [
"biome format --write --no-errors-on-unmatched"
]
"optionalDependencies": {
"@img/sharp-darwin-arm64": "0.34.3",
"@img/sharp-darwin-x64": "0.34.3",
"@img/sharp-libvips-darwin-arm64": "1.2.0",
"@img/sharp-libvips-darwin-x64": "1.2.0",
"@img/sharp-libvips-linux-arm64": "1.2.0",
"@img/sharp-libvips-linux-x64": "1.2.0",
"@img/sharp-libvips-linuxmusl-arm64": "1.2.0",
"@img/sharp-libvips-linuxmusl-x64": "1.2.0",
"@img/sharp-linux-arm64": "0.34.3",
"@img/sharp-linux-x64": "0.34.3",
"@img/sharp-linuxmusl-arm64": "0.34.3",
"@img/sharp-linuxmusl-x64": "0.34.3",
"@img/sharp-win32-arm64": "0.34.3",
"@img/sharp-win32-x64": "0.34.3",
"@libsql/darwin-arm64": "0.4.7",
"@libsql/darwin-x64": "0.4.7",
"@libsql/linux-arm64-gnu": "0.4.7",
"@libsql/linux-arm64-musl": "0.4.7",
"@libsql/linux-x64-gnu": "0.4.7",
"@libsql/linux-x64-musl": "0.4.7",
"@libsql/win32-x64-msvc": "0.4.7",
"@napi-rs/system-ocr-darwin-arm64": "1.0.2",
"@napi-rs/system-ocr-darwin-x64": "1.0.2",
"@napi-rs/system-ocr-win32-arm64-msvc": "1.0.2",
"@napi-rs/system-ocr-win32-x64-msvc": "1.0.2",
"@strongtz/win32-arm64-msvc": "0.4.7"
}
}

View File

@ -21,9 +21,6 @@ const TOOL_USE_TAG_CONFIG: TagConfig = {
separator: '\n'
}
/**
*
*/
export const DEFAULT_SYSTEM_PROMPT = `In this environment you have access to a set of tools you can use to answer the user's question. \
You can use one or more tools per message, and will receive the result of that tool use in the user's response. You use tools step-by-step to accomplish a given task, with each tool use informed by the result of the previous tool use.
@ -38,10 +35,16 @@ Tool use is formatted using XML-style tags. The tool name is enclosed in opening
The tool name should be the exact name of the tool you are using, and the arguments should be a JSON object containing the parameters required by that tool. For example:
<tool_use>
<name>python_interpreter</name>
<arguments>{"code": "5 + 3 + 1294.678"}</arguments>
<name>search</name>
<arguments>{ "query": "browser,fetch" }</arguments>
</tool_use>
<tool_use>
<name>exec</name>
<arguments>{ "code": "const page = await CherryBrowser_fetch({ url: "https://example.com" })\nreturn page" }</arguments>
</tool_use>
The user will respond with the result of the tool use, which should be formatted as follows:
<tool_use_result>
@ -59,13 +62,6 @@ For example, if the result of the tool use is an image file, you can use it in t
Always adhere to this format for the tool use to ensure proper parsing and execution.
## Tool Use Examples
{{ TOOL_USE_EXAMPLES }}
## Tool Use Available Tools
Above example were using notional tools that might not exist for you. You only have access to these tools:
{{ AVAILABLE_TOOLS }}
## Tool Use Rules
Here are the rules you should always follow to solve your task:
1. Always use the right arguments for the tools. Never use variable names as the action arguments, use the value instead.
@ -74,6 +70,8 @@ Here are the rules you should always follow to solve your task:
4. Never re-do a tool call that you previously did with the exact same parameters.
5. For tool use, MAKE SURE use XML tag format as shown in the examples above. Do not use any other format.
{{ TOOLS_INFO }}
## Response rules
Respond in the language of the user's query, unless the user instructions specify additional requirements for the language to be used.
@ -154,7 +152,8 @@ User: <tool_use_result>
<name>search</name>
<result>26 million (2019)</result>
</tool_use_result>
Assistant: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.`
A: The population of Shanghai is 26 million, while Guangzhou has a population of 15 million. Therefore, Shanghai has the highest population.`
/**
* Cherry Studio
@ -184,13 +183,30 @@ ${result}
/**
* Cherry Studio
*/
function defaultBuildSystemPrompt(userSystemPrompt: string, tools: ToolSet): string {
function defaultBuildSystemPrompt(userSystemPrompt: string, tools: ToolSet, mcpMode?: string): string {
const availableTools = buildAvailableTools(tools)
if (availableTools === null) return userSystemPrompt
const fullPrompt = DEFAULT_SYSTEM_PROMPT.replace('{{ TOOL_USE_EXAMPLES }}', DEFAULT_TOOL_USE_EXAMPLES)
if (mcpMode == 'auto') {
return DEFAULT_SYSTEM_PROMPT.replace('{{ TOOLS_INFO }}', '').replace(
'{{ USER_SYSTEM_PROMPT }}',
userSystemPrompt || ''
)
}
const toolsInfo = `
## Tool Use Examples
{{ TOOL_USE_EXAMPLES }}
## Tool Use Available Tools
Above example were using notional tools that might not exist for you. You only have access to these tools:
{{ AVAILABLE_TOOLS }}`
.replace('{{ TOOL_USE_EXAMPLES }}', DEFAULT_TOOL_USE_EXAMPLES)
.replace('{{ AVAILABLE_TOOLS }}', availableTools)
.replace('{{ USER_SYSTEM_PROMPT }}', userSystemPrompt || '')
const fullPrompt = DEFAULT_SYSTEM_PROMPT.replace('{{ TOOLS_INFO }}', toolsInfo).replace(
'{{ USER_SYSTEM_PROMPT }}',
userSystemPrompt || ''
)
return fullPrompt
}
@ -223,7 +239,17 @@ function defaultParseToolUse(content: string, tools: ToolSet): { results: ToolUs
// Find all tool use blocks
while ((match = toolUsePattern.exec(contentToProcess)) !== null) {
const fullMatch = match[0]
const toolName = match[2].trim()
let toolName = match[2].trim()
switch (toolName.toLowerCase()) {
case 'search':
toolName = 'mcp__CherryHub__search'
break
case 'exec':
toolName = 'mcp__CherryHub__exec'
break
default:
break
}
const toolArgs = match[4].trim()
// Try to parse the arguments as JSON
@ -257,7 +283,12 @@ function defaultParseToolUse(content: string, tools: ToolSet): { results: ToolUs
export const createPromptToolUsePlugin = (
config: PromptToolUseConfig = {}
): AiPlugin<StreamTextParams, StreamTextResult> => {
const { enabled = true, buildSystemPrompt = defaultBuildSystemPrompt, parseToolUse = defaultParseToolUse } = config
const {
enabled = true,
buildSystemPrompt = defaultBuildSystemPrompt,
parseToolUse = defaultParseToolUse,
mcpMode
} = config
return definePlugin<StreamTextParams, StreamTextResult>({
name: 'built-in:prompt-tool-use',
@ -287,7 +318,12 @@ export const createPromptToolUsePlugin = (
// 构建系统提示符(只包含非 provider 工具)
const userSystemPrompt = typeof params.system === 'string' ? params.system : ''
const systemPrompt = buildSystemPrompt(userSystemPrompt, promptTools)
const systemPrompt = buildSystemPrompt(userSystemPrompt, promptTools, mcpMode)
let systemMessage: string | null = systemPrompt
if (config.createSystemMessage) {
// 🎯 如果用户提供了自定义处理函数,使用它
systemMessage = config.createSystemMessage(systemPrompt, params, context)
}
// 保留 provide tools移除其他 tools
const transformedParams = {

View File

@ -22,6 +22,7 @@ export interface PromptToolUseConfig extends BaseToolUsePluginConfig {
buildSystemPrompt?: (userSystemPrompt: string, tools: ToolSet) => string
// 自定义工具解析函数(可选,有默认实现)
parseToolUse?: (content: string, tools: ToolSet) => { results: ToolUseResult[]; content: string }
mcpMode?: string
}
/**

View File

@ -48,6 +48,7 @@ export enum IpcChannel {
App_QuoteToMain = 'app:quote-to-main',
App_SetDisableHardwareAcceleration = 'app:set-disable-hardware-acceleration',
App_SetUseSystemTitleBar = 'app:set-use-system-title-bar',
Notification_Send = 'notification:send',
Notification_OnClick = 'notification:on-click',

View File

@ -233,7 +233,8 @@ export enum codeTools {
geminiCli = 'gemini-cli',
openaiCodex = 'openai-codex',
iFlowCli = 'iflow-cli',
githubCopilotCli = 'github-copilot-cli'
githubCopilotCli = 'github-copilot-cli',
kimiCli = 'kimi-cli'
}
export enum terminalApps {

116
packages/shared/mcp.ts Normal file
View File

@ -0,0 +1,116 @@
/**
* Convert a string to camelCase, ensuring it's a valid JavaScript identifier.
*
* - Normalizes to lowercase first, then capitalizes word boundaries
* - Non-alphanumeric characters are treated as word separators
* - Non-ASCII characters are dropped (ASCII-only output)
* - If result starts with a digit, prefixes with underscore
*
* @example
* toCamelCase('my-server') // 'myServer'
* toCamelCase('MY_SERVER') // 'myServer'
* toCamelCase('123tool') // '_123tool'
*/
export function toCamelCase(str: string): string {
let result = str
.trim()
.toLowerCase()
.replace(/[^a-z0-9]+(.)/g, (_, char) => char.toUpperCase())
.replace(/[^a-zA-Z0-9]/g, '')
if (result && !/^[a-zA-Z_]/.test(result)) {
result = '_' + result
}
return result
}
export type McpToolNameOptions = {
/** Prefix added before the name (e.g., 'mcp__'). Must be JS-identifier-safe. */
prefix?: string
/** Delimiter between server and tool parts (e.g., '_' or '__'). Must be JS-identifier-safe. */
delimiter?: string
/** Maximum length of the final name. Suffix numbers for uniqueness are included in this limit. */
maxLength?: number
/** Mutable Set for collision detection. The final name will be added to this Set. */
existingNames?: Set<string>
}
/**
* Build a valid JavaScript function name from server and tool names.
* Uses camelCase for both parts.
*
* @param serverName - The MCP server name (optional)
* @param toolName - The tool name
* @param options - Configuration options
* @returns A valid JS identifier
*/
export function buildMcpToolName(
serverName: string | undefined,
toolName: string,
options: McpToolNameOptions = {}
): string {
const { prefix = '', delimiter = '_', maxLength, existingNames } = options
const serverPart = serverName ? toCamelCase(serverName) : ''
const toolPart = toCamelCase(toolName)
const baseName = serverPart ? `${prefix}${serverPart}${delimiter}${toolPart}` : `${prefix}${toolPart}`
if (!existingNames) {
return maxLength ? truncateToLength(baseName, maxLength) : baseName
}
let name = maxLength ? truncateToLength(baseName, maxLength) : baseName
let counter = 1
while (existingNames.has(name)) {
const suffix = String(counter)
const truncatedBase = maxLength ? truncateToLength(baseName, maxLength - suffix.length) : baseName
name = `${truncatedBase}${suffix}`
counter++
}
existingNames.add(name)
return name
}
function truncateToLength(str: string, maxLength: number): string {
if (str.length <= maxLength) {
return str
}
return str.slice(0, maxLength).replace(/_+$/, '')
}
/**
* Generate a unique function name from server name and tool name.
* Format: serverName_toolName (camelCase)
*
* @example
* generateMcpToolFunctionName('github', 'search_issues') // 'github_searchIssues'
*/
export function generateMcpToolFunctionName(
serverName: string | undefined,
toolName: string,
existingNames?: Set<string>
): string {
return buildMcpToolName(serverName, toolName, { existingNames })
}
/**
* Builds a valid JavaScript function name for MCP tool calls.
* Format: mcp__{serverName}__{toolName}
*
* @param serverName - The MCP server name
* @param toolName - The tool name from the server
* @returns A valid JS identifier in format mcp__{server}__{tool}, max 63 chars
*
* @example
* buildFunctionCallToolName('github', 'search_issues') // 'mcp__github__searchIssues'
*/
export function buildFunctionCallToolName(serverName: string, toolName: string): string {
return buildMcpToolName(serverName, toolName, {
prefix: 'mcp__',
delimiter: '__',
maxLength: 63
})
}

View File

@ -9,9 +9,9 @@ index 48e2f6263c6ee4c75d7e5c28733e64f6ebe92200..00d0729c4a3cbf9a48e8e1e962c7e2b2
+ sendReasoning: z.ZodOptional<z.ZodBoolean>;
}, z.core.$strip>;
type OpenAICompatibleProviderOptions = z.infer<typeof openaiCompatibleProviderOptions>;
diff --git a/dist/index.js b/dist/index.js
index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e5bfe0f9a 100644
index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..88349c614a69a268a2e4f3b157cb5e328ca1d347 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -41,7 +41,7 @@ function getOpenAIMetadata(message) {
@ -52,17 +52,38 @@ index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e
tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
...metadata
});
@@ -200,7 +208,8 @@ var openaiCompatibleProviderOptions = import_v4.z.object({
@@ -200,7 +208,9 @@ var openaiCompatibleProviderOptions = import_v4.z.object({
/**
* Controls the verbosity of the generated text. Defaults to `medium`.
*/
- textVerbosity: import_v4.z.string().optional()
+ textVerbosity: import_v4.z.string().optional(),
+ sendReasoning: import_v4.z.boolean().optional()
+ sendReasoning: import_v4.z.boolean().optional(),
+ strictJsonSchema: z.boolean().optional()
});
// src/openai-compatible-error.ts
@@ -378,7 +387,7 @@ var OpenAICompatibleChatLanguageModel = class {
@@ -225,7 +235,8 @@ var defaultOpenAICompatibleErrorStructure = {
var import_provider2 = require("@ai-sdk/provider");
function prepareTools({
tools,
- toolChoice
+ toolChoice,
+ strictJsonSchema
}) {
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
const toolWarnings = [];
@@ -242,7 +253,8 @@ function prepareTools({
function: {
name: tool.name,
description: tool.description,
- parameters: tool.inputSchema
+ parameters: tool.inputSchema,
+ strict: strictJsonSchema
}
});
}
@@ -378,7 +390,7 @@ var OpenAICompatibleChatLanguageModel = class {
reasoning_effort: compatibleOptions.reasoningEffort,
verbosity: compatibleOptions.textVerbosity,
// messages:
@ -71,7 +92,7 @@ index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e
// tools:
tools: openaiTools,
tool_choice: openaiToolChoice
@@ -421,6 +430,17 @@ var OpenAICompatibleChatLanguageModel = class {
@@ -421,6 +433,17 @@ var OpenAICompatibleChatLanguageModel = class {
text: reasoning
});
}
@ -89,7 +110,7 @@ index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e
if (choice.message.tool_calls != null) {
for (const toolCall of choice.message.tool_calls) {
content.push({
@@ -598,6 +618,17 @@ var OpenAICompatibleChatLanguageModel = class {
@@ -598,6 +621,17 @@ var OpenAICompatibleChatLanguageModel = class {
delta: delta.content
});
}
@ -107,7 +128,7 @@ index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e
if (delta.tool_calls != null) {
for (const toolCallDelta of delta.tool_calls) {
const index = toolCallDelta.index;
@@ -765,6 +796,14 @@ var OpenAICompatibleChatResponseSchema = import_v43.z.object({
@@ -765,6 +799,14 @@ var OpenAICompatibleChatResponseSchema = import_v43.z.object({
arguments: import_v43.z.string()
})
})
@ -122,7 +143,7 @@ index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e
).nullish()
}),
finish_reason: import_v43.z.string().nullish()
@@ -795,6 +834,14 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union(
@@ -795,6 +837,14 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => import_v43.z.union(
arguments: import_v43.z.string().nullish()
})
})
@ -138,7 +159,7 @@ index da237bb35b7fa8e24b37cd861ee73dfc51cdfc72..b3060fbaf010e30b64df55302807828e
}).nullish(),
finish_reason: import_v43.z.string().nullish()
diff --git a/dist/index.mjs b/dist/index.mjs
index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..565b605eeacd9830b2b0e817e58ad0c5700264de 100644
index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..fca65c04000ce4c01fb90e93326ac179c2378055 100644
--- a/dist/index.mjs
+++ b/dist/index.mjs
@@ -23,7 +23,7 @@ function getOpenAIMetadata(message) {
@ -179,17 +200,38 @@ index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..565b605eeacd9830b2b0e817e58ad0c5
tool_calls: toolCalls.length > 0 ? toolCalls : void 0,
...metadata
});
@@ -182,7 +190,8 @@ var openaiCompatibleProviderOptions = z.object({
@@ -182,7 +190,9 @@ var openaiCompatibleProviderOptions = z.object({
/**
* Controls the verbosity of the generated text. Defaults to `medium`.
*/
- textVerbosity: z.string().optional()
+ textVerbosity: z.string().optional(),
+ sendReasoning: z.boolean().optional()
+ sendReasoning: z.boolean().optional(),
+ strictJsonSchema: z.boolean().optional()
});
// src/openai-compatible-error.ts
@@ -362,7 +371,7 @@ var OpenAICompatibleChatLanguageModel = class {
@@ -209,7 +219,8 @@ import {
} from "@ai-sdk/provider";
function prepareTools({
tools,
- toolChoice
+ toolChoice,
+ strictJsonSchema
}) {
tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
const toolWarnings = [];
@@ -226,7 +237,8 @@ function prepareTools({
function: {
name: tool.name,
description: tool.description,
- parameters: tool.inputSchema
+ parameters: tool.inputSchema,
+ strict: strictJsonSchema
}
});
}
@@ -362,7 +374,7 @@ var OpenAICompatibleChatLanguageModel = class {
reasoning_effort: compatibleOptions.reasoningEffort,
verbosity: compatibleOptions.textVerbosity,
// messages:
@ -198,7 +240,7 @@ index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..565b605eeacd9830b2b0e817e58ad0c5
// tools:
tools: openaiTools,
tool_choice: openaiToolChoice
@@ -405,6 +414,17 @@ var OpenAICompatibleChatLanguageModel = class {
@@ -405,6 +417,17 @@ var OpenAICompatibleChatLanguageModel = class {
text: reasoning
});
}
@ -216,7 +258,7 @@ index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..565b605eeacd9830b2b0e817e58ad0c5
if (choice.message.tool_calls != null) {
for (const toolCall of choice.message.tool_calls) {
content.push({
@@ -582,6 +602,17 @@ var OpenAICompatibleChatLanguageModel = class {
@@ -582,6 +605,17 @@ var OpenAICompatibleChatLanguageModel = class {
delta: delta.content
});
}
@ -234,7 +276,7 @@ index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..565b605eeacd9830b2b0e817e58ad0c5
if (delta.tool_calls != null) {
for (const toolCallDelta of delta.tool_calls) {
const index = toolCallDelta.index;
@@ -749,6 +780,14 @@ var OpenAICompatibleChatResponseSchema = z3.object({
@@ -749,6 +783,14 @@ var OpenAICompatibleChatResponseSchema = z3.object({
arguments: z3.string()
})
})
@ -249,7 +291,7 @@ index a809a7aa0e148bfd43e01dd7b018568b151c8ad5..565b605eeacd9830b2b0e817e58ad0c5
).nullish()
}),
finish_reason: z3.string().nullish()
@@ -779,6 +818,14 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
@@ -779,6 +821,14 @@ var createOpenAICompatibleChatChunkSchema = (errorSchema) => z3.union([
arguments: z3.string().nullish()
})
})

View File

@ -1,35 +0,0 @@
diff --git a/sdk.mjs b/sdk.mjs
index dea7766a3432a1e809f12d6daba4f2834a219689..e0b02ef73da177ba32b903887d7bbbeaa08cc6d3 100755
--- a/sdk.mjs
+++ b/sdk.mjs
@@ -6250,7 +6250,7 @@ function createAbortController(maxListeners = DEFAULT_MAX_LISTENERS) {
}
// ../src/transport/ProcessTransport.ts
-import { spawn } from "child_process";
+import { fork } from "child_process";
import { createInterface } from "readline";
// ../src/utils/fsOperations.ts
@@ -6644,18 +6644,11 @@ class ProcessTransport {
const errorMessage = isNativeBinary(pathToClaudeCodeExecutable) ? `Claude Code native binary not found at ${pathToClaudeCodeExecutable}. Please ensure Claude Code is installed via native installer or specify a valid path with options.pathToClaudeCodeExecutable.` : `Claude Code executable not found at ${pathToClaudeCodeExecutable}. Is options.pathToClaudeCodeExecutable set?`;
throw new ReferenceError(errorMessage);
}
- const isNative = isNativeBinary(pathToClaudeCodeExecutable);
- const spawnCommand = isNative ? pathToClaudeCodeExecutable : executable;
- const spawnArgs = isNative ? [...executableArgs, ...args] : [...executableArgs, pathToClaudeCodeExecutable, ...args];
- const spawnMessage = isNative ? `Spawning Claude Code native binary: ${spawnCommand} ${spawnArgs.join(" ")}` : `Spawning Claude Code process: ${spawnCommand} ${spawnArgs.join(" ")}`;
- logForSdkDebugging(spawnMessage);
- if (stderr) {
- stderr(spawnMessage);
- }
+ logForSdkDebugging(`Forking Claude Code Node.js process: ${pathToClaudeCodeExecutable} ${args.join(" ")}`);
const stderrMode = env.DEBUG_CLAUDE_AGENT_SDK || stderr ? "pipe" : "ignore";
- this.child = spawn(spawnCommand, spawnArgs, {
+ this.child = fork(pathToClaudeCodeExecutable, args, {
cwd,
- stdio: ["pipe", "pipe", stderrMode],
+ stdio: stderrMode === "pipe" ? ["pipe", "pipe", "pipe", "ipc"] : ["pipe", "pipe", "ignore", "ipc"],
signal: this.abortController.signal,
env
});

View File

@ -0,0 +1,33 @@
diff --git a/sdk.mjs b/sdk.mjs
index 1e1c3e4e3f81db622fb2789d17f3d421f212306e..5d193cdb6a43c7799fd5eff2d8af80827bfbdf1e 100755
--- a/sdk.mjs
+++ b/sdk.mjs
@@ -11985,7 +11985,7 @@ function createAbortController(maxListeners = DEFAULT_MAX_LISTENERS) {
}
// ../src/transport/ProcessTransport.ts
-import { spawn } from "child_process";
+import { fork } from "child_process";
import { createInterface } from "readline";
// ../src/utils/fsOperations.ts
@@ -12999,14 +12999,14 @@ class ProcessTransport {
return isRunningWithBun() ? "bun" : "node";
}
spawnLocalProcess(spawnOptions) {
- const { command, args, cwd: cwd2, env, signal } = spawnOptions;
+ const { args, cwd: cwd2, env, signal } = spawnOptions;
const stderrMode = env.DEBUG_CLAUDE_AGENT_SDK || this.options.stderr ? "pipe" : "ignore";
- const childProcess = spawn(command, args, {
+ logForSdkDebugging(`Forking Claude Code Node.js process: ${args[0]} ${args.slice(1).join(" ")}`);
+ const childProcess = fork(args[0], args.slice(1), {
cwd: cwd2,
- stdio: ["pipe", "pipe", stderrMode],
+ stdio: stderrMode === "pipe" ? ["pipe", "pipe", "pipe", "ipc"] : ["pipe", "pipe", "ignore", "ipc"],
signal,
- env,
- windowsHide: true
+ env
});
if (env.DEBUG_CLAUDE_AGENT_SDK || this.options.stderr) {
childProcess.stderr.on("data", (data) => {

View File

@ -0,0 +1,140 @@
diff --git a/dist/index.js b/dist/index.js
index f33510a50d11a2cb92a90ea70cc0ac84c89f29b9..db0af7e2cc05c47baeb29c0a3974a155316fbd05 100644
--- a/dist/index.js
+++ b/dist/index.js
@@ -1050,7 +1050,8 @@ var OpenRouterProviderMetadataSchema = import_v43.z.object({
var OpenRouterProviderOptionsSchema = import_v43.z.object({
openrouter: import_v43.z.object({
reasoning_details: import_v43.z.array(ReasoningDetailUnionSchema).optional(),
- annotations: import_v43.z.array(FileAnnotationSchema).optional()
+ annotations: import_v43.z.array(FileAnnotationSchema).optional(),
+ strictJsonSchema: import_v43.z.boolean().optional()
}).optional()
}).optional();
@@ -1658,7 +1659,8 @@ var OpenRouterChatLanguageModel = class {
responseFormat,
topK,
tools,
- toolChoice
+ toolChoice,
+ providerOptions
}) {
var _a15;
const baseArgs = __spreadValues(__spreadValues({
@@ -1712,7 +1714,8 @@ var OpenRouterChatLanguageModel = class {
function: {
name: tool.name,
description: tool.description,
- parameters: tool.inputSchema
+ parameters: tool.inputSchema,
+ strict: providerOptions?.openrouter?.strictJsonSchema
}
}));
return __spreadProps(__spreadValues({}, baseArgs), {
@@ -1725,7 +1728,7 @@ var OpenRouterChatLanguageModel = class {
async doGenerate(options) {
var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
const providerOptions = options.providerOptions || {};
- const openrouterOptions = providerOptions.openrouter || {};
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
const { value: responseValue, responseHeaders } = await postJsonToApi({
url: this.config.url({
@@ -1931,7 +1934,7 @@ var OpenRouterChatLanguageModel = class {
async doStream(options) {
var _a15;
const providerOptions = options.providerOptions || {};
- const openrouterOptions = providerOptions.openrouter || {};
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
const { value: response, responseHeaders } = await postJsonToApi({
url: this.config.url({
@@ -2564,7 +2567,7 @@ var OpenRouterCompletionLanguageModel = class {
async doGenerate(options) {
var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o;
const providerOptions = options.providerOptions || {};
- const openrouterOptions = providerOptions.openrouter || {};
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
const { value: response, responseHeaders } = await postJsonToApi({
url: this.config.url({
@@ -2623,7 +2626,7 @@ var OpenRouterCompletionLanguageModel = class {
}
async doStream(options) {
const providerOptions = options.providerOptions || {};
- const openrouterOptions = providerOptions.openrouter || {};
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
const { value: response, responseHeaders } = await postJsonToApi({
url: this.config.url({
diff --git a/dist/index.mjs b/dist/index.mjs
index 8a688331b88b4af738ee4ca8062b5f24124d3d81..a2aa299a44352addc26f8891d839ea31a2150ee2 100644
--- a/dist/index.mjs
+++ b/dist/index.mjs
@@ -1015,7 +1015,8 @@ var OpenRouterProviderMetadataSchema = z3.object({
var OpenRouterProviderOptionsSchema = z3.object({
openrouter: z3.object({
reasoning_details: z3.array(ReasoningDetailUnionSchema).optional(),
- annotations: z3.array(FileAnnotationSchema).optional()
+ annotations: z3.array(FileAnnotationSchema).optional(),
+ strictJsonSchema: z3.boolean().optional()
}).optional()
}).optional();
@@ -1623,7 +1624,8 @@ var OpenRouterChatLanguageModel = class {
responseFormat,
topK,
tools,
- toolChoice
+ toolChoice,
+ providerOptions
}) {
var _a15;
const baseArgs = __spreadValues(__spreadValues({
@@ -1677,7 +1679,8 @@ var OpenRouterChatLanguageModel = class {
function: {
name: tool.name,
description: tool.description,
- parameters: tool.inputSchema
+ parameters: tool.inputSchema,
+ strict: providerOptions?.openrouter?.strictJsonSchema
}
}));
return __spreadProps(__spreadValues({}, baseArgs), {
@@ -1690,7 +1693,7 @@ var OpenRouterChatLanguageModel = class {
async doGenerate(options) {
var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _s, _t, _u, _v, _w;
const providerOptions = options.providerOptions || {};
- const openrouterOptions = providerOptions.openrouter || {};
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
const { value: responseValue, responseHeaders } = await postJsonToApi({
url: this.config.url({
@@ -1896,7 +1899,7 @@ var OpenRouterChatLanguageModel = class {
async doStream(options) {
var _a15;
const providerOptions = options.providerOptions || {};
- const openrouterOptions = providerOptions.openrouter || {};
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
const { value: response, responseHeaders } = await postJsonToApi({
url: this.config.url({
@@ -2529,7 +2532,7 @@ var OpenRouterCompletionLanguageModel = class {
async doGenerate(options) {
var _a15, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o;
const providerOptions = options.providerOptions || {};
- const openrouterOptions = providerOptions.openrouter || {};
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
const { value: response, responseHeaders } = await postJsonToApi({
url: this.config.url({
@@ -2588,7 +2591,7 @@ var OpenRouterCompletionLanguageModel = class {
}
async doStream(options) {
const providerOptions = options.providerOptions || {};
- const openrouterOptions = providerOptions.openrouter || {};
+ const { strictJsonSchema: _strictJsonSchema, ...openrouterOptions } = providerOptions.openrouter || {};
const args = __spreadValues(__spreadValues({}, this.getArgs(options)), openrouterOptions);
const { value: response, responseHeaders } = await postJsonToApi({
url: this.config.url({

706
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@ -1,2 +1,8 @@
packages:
- 'packages/*'
supportedArchitectures:
os:
- current
cpu:
- current

View File

@ -0,0 +1,360 @@
import * as path from 'path'
import { Node, Project } from 'ts-morph'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import {
HardcodedStringDetector,
hasCJK,
hasEnglishUIText,
isInCodeContext,
isNonUIString,
shouldSkipNode
} from '../check-hardcoded-strings'
function createTestProject() {
return new Project({
skipAddingFilesFromTsConfig: true,
skipFileDependencyResolution: true,
compilerOptions: { jsx: 2 } // React JSX
})
}
function findStringLiteral(project: Project, code: string, targetString: string): Node | undefined {
const sourceFile = project.createSourceFile('test.tsx', code, { overwrite: true })
let found: Node | undefined
sourceFile.forEachDescendant((node) => {
if (Node.isStringLiteral(node) && node.getLiteralValue() === targetString) {
found = node
}
})
return found
}
function findTemplateLiteral(project: Project, code: string): Node | undefined {
const sourceFile = project.createSourceFile('test.tsx', code, { overwrite: true })
let found: Node | undefined
sourceFile.forEachDescendant((node) => {
if (Node.isNoSubstitutionTemplateLiteral(node) || Node.isTemplateExpression(node)) {
found = node
}
})
return found
}
// Mock fs module
vi.mock('fs')
describe('check-hardcoded-strings', () => {
const mockSrcDir = '/mock/src/renderer/src'
beforeEach(() => {
vi.resetAllMocks()
})
afterEach(() => {
vi.restoreAllMocks()
})
describe('hasCJK', () => {
it('should detect Chinese characters', () => {
expect(hasCJK('测试文本')).toBe(true)
expect(hasCJK('Hello 世界')).toBe(true)
expect(hasCJK('中文')).toBe(true)
})
it('should detect Japanese characters', () => {
expect(hasCJK('こんにちは')).toBe(true) // Hiragana
expect(hasCJK('カタカナ')).toBe(true) // Katakana
expect(hasCJK('日本語')).toBe(true) // Kanji
})
it('should detect Korean characters', () => {
expect(hasCJK('한국어')).toBe(true) // Hangul
expect(hasCJK('안녕하세요')).toBe(true)
})
it('should return false for non-CJK text', () => {
expect(hasCJK('Hello World')).toBe(false)
expect(hasCJK('12345')).toBe(false)
expect(hasCJK('')).toBe(false)
})
})
describe('hasEnglishUIText', () => {
it('should detect English UI text patterns', () => {
expect(hasEnglishUIText('Create New File')).toBe(true)
expect(hasEnglishUIText('Save As')).toBe(true)
expect(hasEnglishUIText('Open Project')).toBe(true)
})
it('should reject single words', () => {
expect(hasEnglishUIText('Save')).toBe(false)
expect(hasEnglishUIText('Cancel')).toBe(false)
})
it('should reject lowercase text', () => {
expect(hasEnglishUIText('create new file')).toBe(false)
expect(hasEnglishUIText('save as')).toBe(false)
})
it('should reject too long phrases', () => {
expect(hasEnglishUIText('This Is A Very Long Phrase With Many Words')).toBe(false)
})
})
describe('isNonUIString', () => {
it('should identify empty strings', () => {
expect(isNonUIString('')).toBe(true)
})
it('should identify pure numbers', () => {
expect(isNonUIString('123')).toBe(true)
expect(isNonUIString('0')).toBe(true)
expect(isNonUIString('999')).toBe(true)
})
it('should not mark regular UI text as non-UI', () => {
expect(isNonUIString('Hello World')).toBe(false)
expect(isNonUIString('Save')).toBe(false)
expect(isNonUIString('确认')).toBe(false)
expect(isNonUIString('请输入内容')).toBe(false)
expect(isNonUIString('-')).toBe(false) // Even short strings may be UI in specific contexts
})
it('should not filter technical strings (now handled by AST context)', () => {
// With AST-based detection, these are no longer filtered
// because we only check specific UI contexts where they rarely appear
expect(isNonUIString('./path/to/file')).toBe(false)
expect(isNonUIString('https://example.com')).toBe(false)
expect(isNonUIString('#fff')).toBe(false)
expect(isNonUIString('snake_case_id')).toBe(false)
})
})
describe('File filtering', () => {
const IGNORED_DIRS = ['__tests__', 'node_modules', 'i18n', 'locales', 'types', 'assets']
const IGNORED_FILES = ['*.test.ts', '*.test.tsx', '*.d.ts']
const mockShouldSkipFile = (filePath: string): boolean => {
const relativePath = filePath.replace(mockSrcDir + '/', '')
if (IGNORED_DIRS.some((dir) => relativePath.includes(dir))) {
return true
}
const fileName = path.basename(filePath)
if (
IGNORED_FILES.some((pattern) => {
const regex = new RegExp(pattern.replace('*', '.*'))
return regex.test(fileName)
})
) {
return true
}
return false
}
it('should skip test files', () => {
expect(mockShouldSkipFile(`${mockSrcDir}/components/Button.test.tsx`)).toBe(true)
expect(mockShouldSkipFile(`${mockSrcDir}/utils/helper.test.ts`)).toBe(true)
})
it('should skip type definition files', () => {
expect(mockShouldSkipFile(`${mockSrcDir}/types/index.d.ts`)).toBe(true)
})
it('should skip i18n/locales directories', () => {
expect(mockShouldSkipFile(`${mockSrcDir}/i18n/locales/en-us.json`)).toBe(true)
expect(mockShouldSkipFile(`${mockSrcDir}/locales/zh-cn.json`)).toBe(true)
})
it('should skip __tests__ directories', () => {
expect(mockShouldSkipFile(`${mockSrcDir}/components/__tests__/Button.test.tsx`)).toBe(true)
})
it('should NOT skip regular component files', () => {
expect(mockShouldSkipFile(`${mockSrcDir}/components/Button.tsx`)).toBe(false)
expect(mockShouldSkipFile(`${mockSrcDir}/pages/Home.tsx`)).toBe(false)
})
it('should NOT skip regular TypeScript files', () => {
expect(mockShouldSkipFile(`${mockSrcDir}/utils/helper.ts`)).toBe(false)
})
})
describe('HardcodedStringDetector', () => {
// These are integration tests that would require actual files
// For unit testing, we test the exported utility functions instead
it('should be instantiable', () => {
const detector = new HardcodedStringDetector()
expect(detector).toBeDefined()
})
})
describe('Legacy pattern compatibility (regex patterns for reference)', () => {
// Keep legacy pattern tests for backward compatibility reference
const CHINESE_PATTERNS = [
{ regex: />([^<]*[\u4e00-\u9fff][^<]*)</g, name: 'JSX text content' },
{
regex: /(?:placeholder|title|label|message|description|tooltip)=["']([^"']*[\u4e00-\u9fff][^"']*)["']/g,
name: 'attribute'
}
]
it('should detect Chinese characters in JSX text content (regex)', () => {
const testLine = '<span>测试文本</span>'
const matches = testLine.match(CHINESE_PATTERNS[0].regex)
expect(matches).not.toBeNull()
})
it('should detect Chinese characters in placeholder attribute (regex)', () => {
const testLine = 'placeholder="请输入内容"'
const matches = testLine.match(CHINESE_PATTERNS[1].regex)
expect(matches).not.toBeNull()
})
it('should detect Chinese characters in title attribute (regex)', () => {
const testLine = 'title="提示信息"'
const matches = testLine.match(CHINESE_PATTERNS[1].regex)
expect(matches).not.toBeNull()
})
})
describe('shouldSkipNode', () => {
let project: Project
beforeEach(() => {
project = createTestProject()
})
it('should skip import declarations', () => {
const node = findStringLiteral(project, `import { foo } from 'some-module'`, 'some-module')
expect(node).toBeDefined()
expect(shouldSkipNode(node!)).toBe(true)
})
it('should skip export declarations', () => {
const node = findStringLiteral(project, `export { foo } from 'some-module'`, 'some-module')
expect(node).toBeDefined()
expect(shouldSkipNode(node!)).toBe(true)
})
it('should skip logger calls', () => {
const node = findStringLiteral(project, `logger.info('测试日志')`, '测试日志')
expect(node).toBeDefined()
expect(shouldSkipNode(node!)).toBe(true)
})
it('should skip console calls', () => {
const node = findStringLiteral(project, `console.log('测试日志')`, '测试日志')
expect(node).toBeDefined()
expect(shouldSkipNode(node!)).toBe(true)
})
it('should skip t() translation function calls', () => {
const node = findStringLiteral(project, `t('common.save')`, 'common.save')
expect(node).toBeDefined()
expect(shouldSkipNode(node!)).toBe(true)
})
it('should skip type alias declarations', () => {
const node = findStringLiteral(project, `type Status = '成功' | '失败'`, '成功')
expect(node).toBeDefined()
expect(shouldSkipNode(node!)).toBe(true)
})
it('should skip interface declarations', () => {
const node = findStringLiteral(project, `interface Foo { status: '成功' }`, '成功')
expect(node).toBeDefined()
expect(shouldSkipNode(node!)).toBe(true)
})
it('should skip enum members', () => {
const node = findStringLiteral(project, `enum Status { Success = '成功' }`, '成功')
expect(node).toBeDefined()
expect(shouldSkipNode(node!)).toBe(true)
})
it('should skip language/locale variable declarations', () => {
const node = findStringLiteral(project, `const languageOptions = ['中文', 'English']`, '中文')
expect(node).toBeDefined()
expect(shouldSkipNode(node!)).toBe(true)
})
it('should NOT skip regular string literals', () => {
const node = findStringLiteral(project, `const message = '测试消息'`, '测试消息')
expect(node).toBeDefined()
expect(shouldSkipNode(node!)).toBe(false)
})
})
describe('isInCodeContext', () => {
let project: Project
beforeEach(() => {
project = createTestProject()
})
it('should detect tagged template expressions with css tag', () => {
const node = findTemplateLiteral(project, 'const style = css`color: red;`')
expect(node).toBeDefined()
expect(isInCodeContext(node!)).toBe(true)
})
it('should detect tagged template expressions with styled tag', () => {
const node = findTemplateLiteral(project, 'const Button = styled.button`padding: 10px;`')
expect(node).toBeDefined()
expect(isInCodeContext(node!)).toBe(true)
})
it('should detect CSS variable names', () => {
const node = findStringLiteral(project, `const customStyle = 'color: blue'`, 'color: blue')
expect(node).toBeDefined()
expect(isInCodeContext(node!)).toBe(true)
})
it('should detect code variable names', () => {
const node = findStringLiteral(project, `const pythonCode = 'print("hello")'`, 'print("hello")')
expect(node).toBeDefined()
expect(isInCodeContext(node!)).toBe(true)
})
it('should detect CSS property assignments', () => {
const node = findStringLiteral(project, `const obj = { style: 'color: red' }`, 'color: red')
expect(node).toBeDefined()
expect(isInCodeContext(node!)).toBe(true)
})
it('should detect code property assignments', () => {
const node = findStringLiteral(project, `const obj = { script: 'console.log(1)' }`, 'console.log(1)')
expect(node).toBeDefined()
expect(isInCodeContext(node!)).toBe(true)
})
it('should detect JSX style attributes', () => {
const node = findStringLiteral(project, `<div style={'color: red'} />`, 'color: red')
expect(node).toBeDefined()
expect(isInCodeContext(node!)).toBe(true)
})
it('should detect executeJavaScript calls', () => {
const node = findStringLiteral(project, `webview.executeJavaScript('document.title')`, 'document.title')
expect(node).toBeDefined()
expect(isInCodeContext(node!)).toBe(true)
})
it('should detect executeJavaScript with string concatenation', () => {
const node = findStringLiteral(project, `webview.executeJavaScript('var x = ' + value + ';')`, 'var x = ')
expect(node).toBeDefined()
expect(isInCodeContext(node!)).toBe(true)
})
it('should NOT detect regular strings', () => {
const node = findStringLiteral(project, `const message = '普通消息'`, '普通消息')
expect(node).toBeDefined()
expect(isInCodeContext(node!)).toBe(false)
})
})
})

View File

@ -1,105 +1,126 @@
const { Arch } = require('electron-builder')
const { downloadNpmPackage } = require('./utils')
const { execSync } = require('child_process')
const fs = require('fs')
const path = require('path')
const yaml = require('js-yaml')
const workspaceConfigPath = path.join(__dirname, '..', 'pnpm-workspace.yaml')
// if you want to add new prebuild binaries packages with different architectures, you can add them here
// please add to allX64 and allArm64 from pnpm-lock.yaml
const allArm64 = {
'@img/sharp-darwin-arm64': '0.34.3',
'@img/sharp-win32-arm64': '0.34.3',
'@img/sharp-linux-arm64': '0.34.3',
'@img/sharp-libvips-darwin-arm64': '1.2.4',
'@img/sharp-libvips-linux-arm64': '1.2.4',
'@libsql/darwin-arm64': '0.4.7',
'@libsql/linux-arm64-gnu': '0.4.7',
'@strongtz/win32-arm64-msvc': '0.4.7',
'@napi-rs/system-ocr-darwin-arm64': '1.0.2',
'@napi-rs/system-ocr-win32-arm64-msvc': '1.0.2'
}
const allX64 = {
'@img/sharp-darwin-x64': '0.34.3',
'@img/sharp-linux-x64': '0.34.3',
'@img/sharp-win32-x64': '0.34.3',
'@img/sharp-libvips-darwin-x64': '1.2.4',
'@img/sharp-libvips-linux-x64': '1.2.4',
'@libsql/darwin-x64': '0.4.7',
'@libsql/linux-x64-gnu': '0.4.7',
'@libsql/win32-x64-msvc': '0.4.7',
'@napi-rs/system-ocr-darwin-x64': '1.0.2',
'@napi-rs/system-ocr-win32-x64-msvc': '1.0.2'
}
const claudeCodeVenderPath = '@anthropic-ai/claude-agent-sdk/vendor'
const claudeCodeVenders = ['arm64-darwin', 'arm64-linux', 'x64-darwin', 'x64-linux', 'x64-win32']
const packages = [
'@img/sharp-darwin-arm64',
'@img/sharp-darwin-x64',
'@img/sharp-libvips-darwin-arm64',
'@img/sharp-libvips-darwin-x64',
'@img/sharp-libvips-linux-arm64',
'@img/sharp-libvips-linuxmusl-arm64',
'@img/sharp-libvips-linux-x64',
'@img/sharp-libvips-linuxmusl-x64',
'@img/sharp-linux-arm64',
'@img/sharp-linux-x64',
'@img/sharp-linuxmusl-arm64',
'@img/sharp-linuxmusl-x64',
'@img/sharp-win32-arm64',
'@img/sharp-win32-x64',
'@libsql/darwin-arm64',
'@libsql/darwin-x64',
'@libsql/linux-arm64-gnu',
'@libsql/linux-x64-gnu',
'@libsql/linux-arm64-musl',
'@libsql/linux-x64-musl',
'@libsql/win32-x64-msvc',
'@napi-rs/system-ocr-darwin-arm64',
'@napi-rs/system-ocr-darwin-x64',
'@napi-rs/system-ocr-win32-arm64-msvc',
'@napi-rs/system-ocr-win32-x64-msvc',
'@strongtz/win32-arm64-msvc'
]
const platformToArch = {
mac: 'darwin',
windows: 'win32',
linux: 'linux'
linux: 'linux',
linuxmusl: 'linuxmusl'
}
exports.default = async function (context) {
const arch = context.arch
const archType = arch === Arch.arm64 ? 'arm64' : 'x64'
const platform = context.packager.platform.name
const arch = context.arch === Arch.arm64 ? 'arm64' : 'x64'
const platformName = context.packager.platform.name
const platform = platformToArch[platformName]
const downloadPackages = async (packages) => {
console.log('downloading packages ......')
const downloadPromises = []
for (const name of Object.keys(packages)) {
if (name.includes(`${platformToArch[platform]}`) && name.includes(`-${archType}`)) {
downloadPromises.push(
downloadNpmPackage(
name,
`https://registry.npmjs.org/${name}/-/${name.split('/').pop()}-${packages[name]}.tgz`
)
)
}
const downloadPackages = async () => {
// Skip if target platform and architecture match current system
if (platform === process.platform && arch === process.arch) {
console.log(`Skipping install: target (${platform}/${arch}) matches current system`)
return
}
await Promise.all(downloadPromises)
console.log(`Installing packages for target platform=${platform} arch=${arch}...`)
// Backup and modify pnpm-workspace.yaml to add target platform support
const originalWorkspaceConfig = fs.readFileSync(workspaceConfigPath, 'utf-8')
const workspaceConfig = yaml.load(originalWorkspaceConfig)
// Add target platform to supportedArchitectures.os
if (!workspaceConfig.supportedArchitectures.os.includes(platform)) {
workspaceConfig.supportedArchitectures.os.push(platform)
}
// Add target architecture to supportedArchitectures.cpu
if (!workspaceConfig.supportedArchitectures.cpu.includes(arch)) {
workspaceConfig.supportedArchitectures.cpu.push(arch)
}
const modifiedWorkspaceConfig = yaml.dump(workspaceConfig)
console.log('Modified workspace config:', modifiedWorkspaceConfig)
fs.writeFileSync(workspaceConfigPath, modifiedWorkspaceConfig)
try {
execSync(`pnpm install`, { stdio: 'inherit' })
} finally {
// Restore original pnpm-workspace.yaml
fs.writeFileSync(workspaceConfigPath, originalWorkspaceConfig)
}
}
const changeFilters = async (filtersToExclude, filtersToInclude) => {
// remove filters for the target architecture (allow inclusion)
let filters = context.packager.config.files[0].filter
filters = filters.filter((filter) => !filtersToInclude.includes(filter))
await downloadPackages()
const excludePackages = async (packagesToExclude) => {
// 从项目根目录的 electron-builder.yml 读取 files 配置,避免多次覆盖配置导致出错
const electronBuilderConfigPath = path.join(__dirname, '..', 'electron-builder.yml')
const electronBuilderConfig = yaml.load(fs.readFileSync(electronBuilderConfigPath, 'utf-8'))
let filters = electronBuilderConfig.files
// add filters for other architectures (exclude them)
filters.push(...filtersToExclude)
filters.push(...packagesToExclude)
context.packager.config.files[0].filter = filters
}
await downloadPackages(arch === Arch.arm64 ? allArm64 : allX64)
const arm64KeepPackages = packages.filter((p) => p.includes('arm64') && p.includes(platform))
const arm64ExcludePackages = packages
.filter((p) => !arm64KeepPackages.includes(p))
.map((p) => '!node_modules/' + p + '/**')
const arm64Filters = Object.keys(allArm64).map((f) => '!node_modules/' + f + '/**')
const x64Filters = Object.keys(allX64).map((f) => '!node_modules/' + f + '/*')
const excludeClaudeCodeRipgrepFilters = claudeCodeVenders
.filter((f) => f !== `${archType}-${platformToArch[platform]}`)
.map((f) => '!node_modules/' + claudeCodeVenderPath + '/ripgrep/' + f + '/**')
const excludeClaudeCodeJBPlutins = ['!node_modules/' + claudeCodeVenderPath + '/' + 'claude-code-jetbrains-plugin']
const x64KeepPackages = packages.filter((p) => p.includes('x64') && p.includes(platform))
const x64ExcludePackages = packages
.filter((p) => !x64KeepPackages.includes(p))
.map((p) => '!node_modules/' + p + '/**')
const includeClaudeCodeFilters = [
'!node_modules/' + claudeCodeVenderPath + '/ripgrep/' + `${archType}-${platformToArch[platform]}/**`
]
const excludeRipgrepFilters = ['arm64-darwin', 'arm64-linux', 'x64-darwin', 'x64-linux', 'x64-win32']
.filter((f) => {
// On Windows ARM64, also keep x64-win32 for emulation compatibility
if (platform === 'win32' && context.arch === Arch.arm64 && f === 'x64-win32') {
return false
}
return f !== `${arch}-${platform}`
})
.map((f) => '!node_modules/@anthropic-ai/claude-agent-sdk/vendor/ripgrep/' + f + '/**')
if (arch === Arch.arm64) {
await changeFilters(
[...x64Filters, ...excludeClaudeCodeRipgrepFilters, ...excludeClaudeCodeJBPlutins],
[...arm64Filters, ...includeClaudeCodeFilters]
)
if (context.arch === Arch.arm64) {
await excludePackages([...arm64ExcludePackages, ...excludeRipgrepFilters])
} else {
await changeFilters(
[...arm64Filters, ...excludeClaudeCodeRipgrepFilters, ...excludeClaudeCodeJBPlutins],
[...x64Filters, ...includeClaudeCodeFilters]
)
await excludePackages([...x64ExcludePackages, ...excludeRipgrepFilters])
}
}

View File

@ -0,0 +1,464 @@
/**
* AST-based hardcoded string detection for i18n
*/
import * as fs from 'fs'
import * as path from 'path'
import type { SourceFile } from 'ts-morph'
import { Node, Project } from 'ts-morph'
const RENDERER_DIR = path.join(__dirname, '../src/renderer/src')
const MAIN_DIR = path.join(__dirname, '../src/main')
const EXTENSIONS = ['.tsx', '.ts']
const IGNORED_DIRS = ['__tests__', 'node_modules', 'i18n', 'locales', 'types', 'assets']
const IGNORED_FILES = ['*.test.ts', '*.test.tsx', '*.d.ts', '*prompts*.ts']
// 'content' is handled specially - only checked for specific components
const UI_ATTRIBUTES = [
'placeholder',
'title',
'label',
'message',
'description',
'tooltip',
'buttonLabel',
'name',
'detail',
'body'
]
const CONTEXT_SENSITIVE_ATTRIBUTES: Record<string, string[]> = {
content: ['Tooltip', 'Popover', 'Modal', 'Popconfirm', 'Alert', 'Notification', 'Message']
}
const UI_PROPERTIES = ['message', 'text', 'title', 'label', 'placeholder', 'description', 'detail']
interface Finding {
file: string
line: number
content: string
type: 'chinese' | 'english'
source: 'renderer' | 'main'
nodeType: string
}
const CJK_RANGES = [
'\u3000-\u303f', // CJK Symbols and Punctuation
'\u3040-\u309f', // Hiragana
'\u30a0-\u30ff', // Katakana
'\u3100-\u312f', // Bopomofo
'\u3400-\u4dbf', // CJK Unified Ideographs Extension A
'\u4e00-\u9fff', // CJK Unified Ideographs
'\uac00-\ud7af', // Hangul Syllables
'\uf900-\ufaff' // CJK Compatibility Ideographs
].join('')
function hasCJK(text: string): boolean {
return new RegExp(`[${CJK_RANGES}]`).test(text)
}
function hasEnglishUIText(text: string): boolean {
const words = text.trim().split(/\s+/)
if (words.length < 2 || words.length > 6) return false
return /^[A-Z][a-z]+(\s+[A-Za-z]+){1,5}$/.test(text.trim())
}
function createFinding(
node: Node,
sourceFile: SourceFile,
type: 'chinese' | 'english',
source: 'renderer' | 'main',
nodeType: string
): Finding {
return {
file: sourceFile.getFilePath(),
line: sourceFile.getLineAndColumnAtPos(node.getStart()).line,
content: node.getText().slice(0, 100),
type,
source,
nodeType
}
}
function shouldSkipNode(node: Node): boolean {
let current: Node | undefined = node
while (current) {
const parent = current.getParent()
if (!parent) break
if (Node.isImportDeclaration(parent) || Node.isExportDeclaration(parent)) {
return true
}
if (Node.isCallExpression(parent)) {
const callText = parent.getExpression().getText()
if (/^(logger|console)\.(log|error|warn|info|debug|silly|trace|withContext)/.test(callText)) {
return true
}
const callee = parent.getExpression()
if (Node.isIdentifier(callee) && callee.getText() === 't') {
return true
}
}
if (Node.isTypeNode(parent) || Node.isTypeAliasDeclaration(parent) || Node.isInterfaceDeclaration(parent)) {
return true
}
if (Node.isPropertySignature(parent)) {
return true
}
if (Node.isEnumMember(parent)) {
return true
}
// Native language names should stay in native form
if (Node.isVariableDeclaration(parent)) {
const varName = parent.getName()
if (/language|locale/i.test(varName)) {
return true
}
}
current = parent
}
return false
}
function isNonUIString(text: string): boolean {
if (text.length === 0) return true
if (/^\d+$/.test(text)) return true
return false
}
const CODE_CONTEXT = {
cssTags: /^(css|keyframes|injectGlobal|createGlobalStyle|styled\.\w+)$/,
cssNames: /style|css|animation/i,
codeNames: /code|script|python|sql|query|html|template|regex|pattern|shim/i,
jsxAttrs: new Set(['style', 'css']),
execCalls: /\.(executeJavaScript|eval|Function|runPython|runPythonAsync)$/
}
function isInCodeContext(node: Node): boolean {
const parent = node.getParent()
if (!parent) return false
if (Node.isTaggedTemplateExpression(parent)) {
return CODE_CONTEXT.cssTags.test(parent.getTag().getText())
}
if (Node.isVariableDeclaration(parent)) {
const name = parent.getName()
return CODE_CONTEXT.cssNames.test(name) || CODE_CONTEXT.codeNames.test(name)
}
if (Node.isPropertyAssignment(parent)) {
const name = parent.getName()
return CODE_CONTEXT.cssNames.test(name) || CODE_CONTEXT.codeNames.test(name)
}
if (Node.isJsxExpression(parent)) {
const attr = parent.getParent()
if (attr && Node.isJsxAttribute(attr)) {
return CODE_CONTEXT.jsxAttrs.has(attr.getNameNode().getText())
}
}
// Traverse up for code execution calls (handles string concatenation)
let current: Node | undefined = parent
while (current) {
if (Node.isCallExpression(current)) {
if (CODE_CONTEXT.execCalls.test(current.getExpression().getText())) {
return true
}
break
}
if (!Node.isBinaryExpression(current) && !Node.isParenthesizedExpression(current)) {
break
}
current = current.getParent()
}
return false
}
function getJsxElementName(attrNode: Node): string | null {
const parent = attrNode.getParent()
if (!parent) return null
if (Node.isJsxOpeningElement(parent) || Node.isJsxSelfClosingElement(parent)) {
return parent.getTagNameNode().getText()
}
return null
}
function shouldCheckAttribute(attrName: string, elementName: string | null): boolean {
if (UI_ATTRIBUTES.includes(attrName)) {
return true
}
const allowedComponents = CONTEXT_SENSITIVE_ATTRIBUTES[attrName]
if (allowedComponents && elementName) {
return allowedComponents.includes(elementName)
}
return false
}
class HardcodedStringDetector {
private project: Project
constructor() {
this.project = new Project({
skipAddingFilesFromTsConfig: true,
skipFileDependencyResolution: true
})
}
scanFile(filePath: string, source: 'renderer' | 'main'): Finding[] {
const findings: Finding[] = []
try {
const sourceFile = this.project.addSourceFileAtPath(filePath)
sourceFile.forEachDescendant((node) => {
this.checkNode(node, sourceFile, source, findings)
})
this.project.removeSourceFile(sourceFile)
} catch (error) {
console.error(`Error parsing ${filePath}:`, error)
}
return findings
}
private checkNode(node: Node, sourceFile: SourceFile, source: 'renderer' | 'main', findings: Finding[]): void {
if (shouldSkipNode(node)) return
if (Node.isJsxText(node)) {
const text = node.getText().trim()
if (text && hasCJK(text)) {
// Skip SVG internal elements
const parent = node.getParent()
if (parent && (Node.isJsxElement(parent) || Node.isJsxSelfClosingElement(parent))) {
const tagName = Node.isJsxElement(parent)
? parent.getOpeningElement().getTagNameNode().getText()
: parent.getTagNameNode().getText()
if (['title', 'desc', 'text', 'tspan'].includes(tagName)) {
return
}
}
findings.push(createFinding(node, sourceFile, 'chinese', source, 'JsxText'))
}
}
if (Node.isJsxAttribute(node)) {
const attrName = node.getNameNode().getText()
const elementName = getJsxElementName(node)
if (shouldCheckAttribute(attrName, elementName)) {
const initializer = node.getInitializer()
if (initializer && Node.isStringLiteral(initializer)) {
const value = initializer.getLiteralValue()
if (!isNonUIString(value)) {
if (hasCJK(value)) {
findings.push(createFinding(node, sourceFile, 'chinese', source, 'JsxAttribute'))
} else if (source === 'renderer' && hasEnglishUIText(value)) {
findings.push(createFinding(node, sourceFile, 'english', source, 'JsxAttribute'))
}
}
}
}
}
if (Node.isStringLiteral(node)) {
if (isInCodeContext(node)) return
const value = node.getLiteralValue()
if (isNonUIString(value)) return
const parent = node.getParent()
if (parent && Node.isPropertyAssignment(parent)) {
const propName = parent.getName()
if (UI_PROPERTIES.includes(propName)) {
if (hasCJK(value)) {
findings.push(createFinding(node, sourceFile, 'chinese', source, 'PropertyAssignment'))
}
}
}
if (parent && Node.isCallExpression(parent)) {
const callText = parent.getExpression().getText()
if (
/^(window\.toast|message|antdMessage|Modal|notification)\.(success|error|warning|info|confirm)/.test(callText)
) {
if (hasCJK(value)) {
findings.push(createFinding(node, sourceFile, 'chinese', source, 'CallExpression'))
}
}
}
}
if (Node.isTemplateExpression(node) || Node.isNoSubstitutionTemplateLiteral(node)) {
if (isInCodeContext(node)) return
const text = node.getText()
if (hasCJK(text)) {
findings.push(createFinding(node, sourceFile, 'chinese', source, 'TemplateLiteral'))
}
}
}
}
function shouldSkipFile(filePath: string, baseDir: string): boolean {
const relativePath = path.relative(baseDir, filePath)
if (IGNORED_DIRS.some((dir) => relativePath.includes(dir))) {
return true
}
const fileName = path.basename(filePath)
if (
IGNORED_FILES.some((pattern) => {
const regex = new RegExp(pattern.replace('*', '.*'))
return regex.test(fileName)
})
) {
return true
}
return false
}
function scanDirectory(dir: string, source: 'renderer' | 'main', detector: HardcodedStringDetector): Finding[] {
const findings: Finding[] = []
if (!fs.existsSync(dir)) {
return findings
}
const entries = fs.readdirSync(dir, { withFileTypes: true })
for (const entry of entries) {
const fullPath = path.join(dir, entry.name)
if (entry.isDirectory()) {
if (!IGNORED_DIRS.includes(entry.name)) {
findings.push(...scanDirectory(fullPath, source, detector))
}
} else if (entry.isFile() && EXTENSIONS.some((ext) => entry.name.endsWith(ext))) {
if (!shouldSkipFile(fullPath, source === 'renderer' ? RENDERER_DIR : MAIN_DIR)) {
findings.push(...detector.scanFile(fullPath, source))
}
}
}
return findings
}
function formatFindings(findings: Finding[]): string {
if (findings.length === 0) {
return '✅ No hardcoded strings found!'
}
const rendererFindings = findings.filter((f) => f.source === 'renderer')
const mainFindings = findings.filter((f) => f.source === 'main')
const chineseFindings = findings.filter((f) => f.type === 'chinese')
const englishFindings = findings.filter((f) => f.type === 'english')
let output = ''
if (rendererFindings.length > 0) {
output += '\n📦 Renderer Process:\n'
output += '-'.repeat(50) + '\n'
const rendererChinese = rendererFindings.filter((f) => f.type === 'chinese')
const rendererEnglish = rendererFindings.filter((f) => f.type === 'english')
if (rendererChinese.length > 0) {
output += '\n⚠ Hardcoded Chinese strings:\n'
rendererChinese.forEach((f) => {
const relativePath = path.relative(RENDERER_DIR, f.file)
output += `\n📍 ${relativePath}:${f.line} [${f.nodeType}]\n`
output += ` ${f.content}\n`
})
}
if (rendererEnglish.length > 0) {
output += '\n⚠ Potential hardcoded English strings:\n'
rendererEnglish.forEach((f) => {
const relativePath = path.relative(RENDERER_DIR, f.file)
output += `\n📍 ${relativePath}:${f.line} [${f.nodeType}]\n`
output += ` ${f.content}\n`
})
}
}
if (mainFindings.length > 0) {
output += '\n📦 Main Process:\n'
output += '-'.repeat(50) + '\n'
const mainChinese = mainFindings.filter((f) => f.type === 'chinese')
if (mainChinese.length > 0) {
output += '\n⚠ Hardcoded Chinese strings:\n'
mainChinese.forEach((f) => {
const relativePath = path.relative(MAIN_DIR, f.file)
output += `\n📍 ${relativePath}:${f.line} [${f.nodeType}]\n`
output += ` ${f.content}\n`
})
}
}
output += '\n' + '='.repeat(50) + '\n'
output += `Total: ${findings.length} potential issues found\n`
output += ` - Renderer: ${rendererFindings.length} (Chinese: ${rendererFindings.filter((f) => f.type === 'chinese').length}, English: ${rendererFindings.filter((f) => f.type === 'english').length})\n`
output += ` - Main: ${mainFindings.length} (Chinese: ${mainFindings.length})\n`
output += ` - Total Chinese: ${chineseFindings.length}\n`
output += ` - Total English: ${englishFindings.length}\n`
return output
}
export function main(): void {
console.log('🔍 Scanning for hardcoded strings using AST analysis...\n')
const detector = new HardcodedStringDetector()
const rendererFindings = scanDirectory(RENDERER_DIR, 'renderer', detector)
const mainFindings = scanDirectory(MAIN_DIR, 'main', detector)
const findings = [...rendererFindings, ...mainFindings]
const output = formatFindings(findings)
console.log(output)
// Strict mode for CI
const strictMode = process.env.I18N_STRICT === 'true' || process.argv.includes('--strict')
const chineseCount = findings.filter((f) => f.type === 'chinese').length
if (strictMode && chineseCount > 0) {
console.error('\n❌ Hardcoded Chinese strings detected in strict mode!')
console.error('Please replace these with i18n keys using the t() function.')
process.exit(1)
}
if (findings.length > 0) {
console.log('\n💡 Tip: Consider replacing these strings with i18n keys.')
console.log(' Use the t() function from react-i18next for translations.')
}
}
export {
HardcodedStringDetector,
hasCJK,
hasEnglishUIText,
isInCodeContext,
isNonUIString,
shouldSkipFile,
shouldSkipNode,
UI_ATTRIBUTES,
UI_PROPERTIES
}
main()

View File

@ -1,211 +0,0 @@
/**
* Feishu (Lark) Webhook Notification Script
* Sends GitHub issue summaries to Feishu with signature verification
*/
const crypto = require('crypto')
const https = require('https')
/**
* Generate Feishu webhook signature
* @param {string} secret - Feishu webhook secret
* @param {number} timestamp - Unix timestamp in seconds
* @returns {string} Base64 encoded signature
*/
function generateSignature(secret, timestamp) {
const stringToSign = `${timestamp}\n${secret}`
const hmac = crypto.createHmac('sha256', stringToSign)
return hmac.digest('base64')
}
/**
* Send message to Feishu webhook
* @param {string} webhookUrl - Feishu webhook URL
* @param {string} secret - Feishu webhook secret
* @param {object} content - Message content
* @returns {Promise<void>}
*/
function sendToFeishu(webhookUrl, secret, content) {
return new Promise((resolve, reject) => {
const timestamp = Math.floor(Date.now() / 1000)
const sign = generateSignature(secret, timestamp)
const payload = JSON.stringify({
timestamp: timestamp.toString(),
sign: sign,
msg_type: 'interactive',
card: content
})
const url = new URL(webhookUrl)
const options = {
hostname: url.hostname,
path: url.pathname + url.search,
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Content-Length': Buffer.byteLength(payload)
}
}
const req = https.request(options, (res) => {
let data = ''
res.on('data', (chunk) => {
data += chunk
})
res.on('end', () => {
if (res.statusCode >= 200 && res.statusCode < 300) {
console.log('✅ Successfully sent to Feishu:', data)
resolve()
} else {
reject(new Error(`Feishu API error: ${res.statusCode} - ${data}`))
}
})
})
req.on('error', (error) => {
reject(error)
})
req.write(payload)
req.end()
})
}
/**
* Create Feishu card message from issue data
* @param {object} issueData - GitHub issue data
* @returns {object} Feishu card content
*/
function createIssueCard(issueData) {
const { issueUrl, issueNumber, issueTitle, issueSummary, issueAuthor, labels } = issueData
// Build labels section if labels exist
const labelElements =
labels && labels.length > 0
? labels.map((label) => ({
tag: 'markdown',
content: `\`${label}\``
}))
: []
return {
elements: [
{
tag: 'div',
text: {
tag: 'lark_md',
content: `**👤 Author:** ${issueAuthor}`
}
},
...(labelElements.length > 0
? [
{
tag: 'div',
text: {
tag: 'lark_md',
content: `**🏷️ Labels:** ${labels.join(', ')}`
}
}
]
: []),
{
tag: 'hr'
},
{
tag: 'div',
text: {
tag: 'lark_md',
content: `**📋 Summary:**\n${issueSummary}`
}
},
{
tag: 'hr'
},
{
tag: 'action',
actions: [
{
tag: 'button',
text: {
tag: 'plain_text',
content: '🔗 View Issue'
},
type: 'primary',
url: issueUrl
}
]
}
],
header: {
template: 'blue',
title: {
tag: 'plain_text',
content: `#${issueNumber} - ${issueTitle}`
}
}
}
}
/**
* Main function
*/
async function main() {
try {
// Get environment variables
const webhookUrl = process.env.FEISHU_WEBHOOK_URL
const secret = process.env.FEISHU_WEBHOOK_SECRET
const issueUrl = process.env.ISSUE_URL
const issueNumber = process.env.ISSUE_NUMBER
const issueTitle = process.env.ISSUE_TITLE
const issueSummary = process.env.ISSUE_SUMMARY
const issueAuthor = process.env.ISSUE_AUTHOR
const labelsStr = process.env.ISSUE_LABELS || ''
// Validate required environment variables
if (!webhookUrl) {
throw new Error('FEISHU_WEBHOOK_URL environment variable is required')
}
if (!secret) {
throw new Error('FEISHU_WEBHOOK_SECRET environment variable is required')
}
if (!issueUrl || !issueNumber || !issueTitle || !issueSummary) {
throw new Error('Issue data environment variables are required')
}
// Parse labels
const labels = labelsStr
? labelsStr
.split(',')
.map((l) => l.trim())
.filter(Boolean)
: []
// Create issue data object
const issueData = {
issueUrl,
issueNumber,
issueTitle,
issueSummary,
issueAuthor: issueAuthor || 'Unknown',
labels
}
// Create card content
const card = createIssueCard(issueData)
console.log('📤 Sending notification to Feishu...')
console.log(`Issue #${issueNumber}: ${issueTitle}`)
// Send to Feishu
await sendToFeishu(webhookUrl, secret, card)
console.log('✅ Notification sent successfully!')
} catch (error) {
console.error('❌ Error:', error.message)
process.exit(1)
}
}
// Run main function
main()

421
scripts/feishu-notify.ts Normal file
View File

@ -0,0 +1,421 @@
#!/usr/bin/env npx tsx
/**
* @fileoverview Feishu (Lark) Webhook Notification CLI Tool
* @description Sends notifications to Feishu with signature verification.
* Supports subcommands for different notification types.
* @module feishu-notify
* @example
* // Send GitHub issue notification
* pnpm tsx feishu-notify.ts issue -u "https://..." -n "123" -t "Title" -m "Summary"
*
* // Using environment variables for credentials
* FEISHU_WEBHOOK_URL="..." FEISHU_WEBHOOK_SECRET="..." pnpm tsx feishu-notify.ts issue ...
*/
import { Command } from 'commander'
import crypto from 'crypto'
import dotenv from 'dotenv'
import https from 'https'
import * as z from 'zod'
// Load environment variables from .env file
dotenv.config()
/** CLI tool version */
const VERSION = '1.0.0'
/** GitHub issue data structure */
interface IssueData {
/** GitHub issue URL */
issueUrl: string
/** Issue number */
issueNumber: string
/** Issue title */
issueTitle: string
/** Issue summary/description */
issueSummary: string
/** Issue author username */
issueAuthor: string
/** Issue labels */
labels: string[]
}
/** Feishu card text element */
interface FeishuTextElement {
tag: 'div'
text: {
tag: 'lark_md'
content: string
}
}
/** Feishu card horizontal rule element */
interface FeishuHrElement {
tag: 'hr'
}
/** Feishu card action button */
interface FeishuActionElement {
tag: 'action'
actions: Array<{
tag: 'button'
text: {
tag: 'plain_text'
content: string
}
type: 'primary' | 'default'
url: string
}>
}
/** Feishu card element union type */
type FeishuCardElement = FeishuTextElement | FeishuHrElement | FeishuActionElement
/** Zod schema for Feishu header color template */
const FeishuHeaderTemplateSchema = z.enum([
'blue',
'wathet',
'turquoise',
'green',
'yellow',
'orange',
'red',
'carmine',
'violet',
'purple',
'indigo',
'grey',
'default'
])
/** Feishu card header color template (inferred from schema) */
type FeishuHeaderTemplate = z.infer<typeof FeishuHeaderTemplateSchema>
/** Feishu interactive card structure */
interface FeishuCard {
elements: FeishuCardElement[]
header: {
template: FeishuHeaderTemplate
title: {
tag: 'plain_text'
content: string
}
}
}
/** Feishu webhook request payload */
interface FeishuPayload {
timestamp: string
sign: string
msg_type: 'interactive'
card: FeishuCard
}
/** Issue subcommand options */
interface IssueOptions {
url: string
number: string
title: string
summary: string
author?: string
labels?: string
}
/** Send subcommand options */
interface SendOptions {
title: string
description: string
color?: string
}
/**
* Generate Feishu webhook signature using HMAC-SHA256
* @param secret - Feishu webhook secret
* @param timestamp - Unix timestamp in seconds
* @returns Base64 encoded signature
*/
function generateSignature(secret: string, timestamp: number): string {
const stringToSign = `${timestamp}\n${secret}`
const hmac = crypto.createHmac('sha256', stringToSign)
return hmac.digest('base64')
}
/**
* Send message to Feishu webhook
* @param webhookUrl - Feishu webhook URL
* @param secret - Feishu webhook secret
* @param content - Feishu card message content
* @returns Resolves when message is sent successfully
* @throws When Feishu API returns non-2xx status code or network error occurs
*/
function sendToFeishu(webhookUrl: string, secret: string, content: FeishuCard): Promise<void> {
return new Promise((resolve, reject) => {
const timestamp = Math.floor(Date.now() / 1000)
const sign = generateSignature(secret, timestamp)
const payload: FeishuPayload = {
timestamp: timestamp.toString(),
sign,
msg_type: 'interactive',
card: content
}
const payloadStr = JSON.stringify(payload)
const url = new URL(webhookUrl)
const options: https.RequestOptions = {
hostname: url.hostname,
path: url.pathname + url.search,
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Content-Length': Buffer.byteLength(payloadStr)
}
}
const req = https.request(options, (res) => {
let data = ''
res.on('data', (chunk: Buffer) => {
data += chunk.toString()
})
res.on('end', () => {
if (res.statusCode && res.statusCode >= 200 && res.statusCode < 300) {
console.log('Successfully sent to Feishu:', data)
resolve()
} else {
reject(new Error(`Feishu API error: ${res.statusCode} - ${data}`))
}
})
})
req.on('error', (error: Error) => {
reject(error)
})
req.write(payloadStr)
req.end()
})
}
/**
* Create Feishu card message from issue data
* @param issueData - GitHub issue data
* @returns Feishu card content
*/
function createIssueCard(issueData: IssueData): FeishuCard {
const { issueUrl, issueNumber, issueTitle, issueSummary, issueAuthor, labels } = issueData
const elements: FeishuCardElement[] = [
{
tag: 'div',
text: {
tag: 'lark_md',
content: `**Author:** ${issueAuthor}`
}
}
]
if (labels.length > 0) {
elements.push({
tag: 'div',
text: {
tag: 'lark_md',
content: `**Labels:** ${labels.join(', ')}`
}
})
}
elements.push(
{ tag: 'hr' },
{
tag: 'div',
text: {
tag: 'lark_md',
content: `**Summary:**\n${issueSummary}`
}
},
{ tag: 'hr' },
{
tag: 'action',
actions: [
{
tag: 'button',
text: {
tag: 'plain_text',
content: 'View Issue'
},
type: 'primary',
url: issueUrl
}
]
}
)
return {
elements,
header: {
template: 'blue',
title: {
tag: 'plain_text',
content: `#${issueNumber} - ${issueTitle}`
}
}
}
}
/**
* Create a simple Feishu card message
* @param title - Card title
* @param description - Card description content
* @param color - Header color template (default: 'turquoise')
* @returns Feishu card content
*/
function createSimpleCard(title: string, description: string, color: FeishuHeaderTemplate = 'turquoise'): FeishuCard {
return {
elements: [
{
tag: 'div',
text: {
tag: 'lark_md',
content: description
}
}
],
header: {
template: color,
title: {
tag: 'plain_text',
content: title
}
}
}
}
/**
* Get Feishu credentials from environment variables
*/
function getCredentials(): { webhookUrl: string; secret: string } {
const webhookUrl = process.env.FEISHU_WEBHOOK_URL
const secret = process.env.FEISHU_WEBHOOK_SECRET
if (!webhookUrl) {
console.error('Error: FEISHU_WEBHOOK_URL environment variable is required')
process.exit(1)
}
if (!secret) {
console.error('Error: FEISHU_WEBHOOK_SECRET environment variable is required')
process.exit(1)
}
return { webhookUrl, secret }
}
/**
* Handle send subcommand
*/
async function handleSendCommand(options: SendOptions): Promise<void> {
const { webhookUrl, secret } = getCredentials()
const { title, description, color = 'turquoise' } = options
// Validate color parameter
const colorValidation = FeishuHeaderTemplateSchema.safeParse(color)
if (!colorValidation.success) {
console.error(`Error: Invalid color "${color}". Valid colors: ${FeishuHeaderTemplateSchema.options.join(', ')}`)
process.exit(1)
}
const card = createSimpleCard(title, description, colorValidation.data)
console.log('Sending notification to Feishu...')
console.log(`Title: ${title}`)
await sendToFeishu(webhookUrl, secret, card)
console.log('Notification sent successfully!')
}
/**
* Handle issue subcommand
*/
async function handleIssueCommand(options: IssueOptions): Promise<void> {
const { webhookUrl, secret } = getCredentials()
const { url, number, title, summary, author = 'Unknown', labels: labelsStr = '' } = options
if (!url || !number || !title || !summary) {
console.error('Error: --url, --number, --title, and --summary are required')
process.exit(1)
}
const labels = labelsStr
? labelsStr
.split(',')
.map((l) => l.trim())
.filter(Boolean)
: []
const issueData: IssueData = {
issueUrl: url,
issueNumber: number,
issueTitle: title,
issueSummary: summary,
issueAuthor: author,
labels
}
const card = createIssueCard(issueData)
console.log('Sending notification to Feishu...')
console.log(`Issue #${number}: ${title}`)
await sendToFeishu(webhookUrl, secret, card)
console.log('Notification sent successfully!')
}
// Configure CLI
const program = new Command()
program.name('feishu-notify').description('Send notifications to Feishu webhook').version(VERSION)
// Send subcommand (generic)
program
.command('send')
.description('Send a simple notification to Feishu')
.requiredOption('-t, --title <title>', 'Card title')
.requiredOption('-d, --description <description>', 'Card description (supports markdown)')
.option(
'-c, --color <color>',
`Header color template (default: turquoise). Options: ${FeishuHeaderTemplateSchema.options.join(', ')}`,
'turquoise'
)
.action(async (options: SendOptions) => {
try {
await handleSendCommand(options)
} catch (error) {
console.error('Error:', error instanceof Error ? error.message : error)
process.exit(1)
}
})
// Issue subcommand
program
.command('issue')
.description('Send GitHub issue notification to Feishu')
.requiredOption('-u, --url <url>', 'GitHub issue URL')
.requiredOption('-n, --number <number>', 'Issue number')
.requiredOption('-t, --title <title>', 'Issue title')
.requiredOption('-m, --summary <summary>', 'Issue summary')
.option('-a, --author <author>', 'Issue author', 'Unknown')
.option('-l, --labels <labels>', 'Issue labels, comma-separated')
.action(async (options: IssueOptions) => {
try {
await handleIssueCommand(options)
} catch (error) {
console.error('Error:', error instanceof Error ? error.message : error)
process.exit(1)
}
})
program.parse()

View File

@ -1,64 +0,0 @@
const fs = require('fs')
const path = require('path')
const os = require('os')
const zlib = require('zlib')
const tar = require('tar')
const { pipeline } = require('stream/promises')
async function downloadNpmPackage(packageName, url) {
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'npm-download-'))
const targetDir = path.join('./node_modules/', packageName)
const filename = path.join(tempDir, packageName.replace('/', '-') + '.tgz')
const extractDir = path.join(tempDir, 'extract')
// Skip if directory already exists
if (fs.existsSync(targetDir)) {
console.log(`${targetDir} already exists, skipping download...`)
return
}
try {
console.log(`Downloading ${packageName}...`, url)
// Download file using fetch API
const response = await fetch(url)
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`)
}
const fileStream = fs.createWriteStream(filename)
await pipeline(response.body, fileStream)
console.log(`Extracting ${filename}...`)
// Create extraction directory
fs.mkdirSync(extractDir, { recursive: true })
// Extract tar.gz file using Node.js streams
await pipeline(fs.createReadStream(filename), zlib.createGunzip(), tar.extract({ cwd: extractDir }))
// Remove the downloaded file
fs.rmSync(filename, { force: true })
// Create target directory
fs.mkdirSync(targetDir, { recursive: true })
// Move extracted package contents to target directory
const packageDir = path.join(extractDir, 'package')
if (fs.existsSync(packageDir)) {
fs.cpSync(packageDir, targetDir, { recursive: true })
}
} catch (error) {
console.error(`Error processing ${packageName}: ${error.message}`)
throw error
} finally {
// Clean up temp directory
if (fs.existsSync(tempDir)) {
fs.rmSync(tempDir, { recursive: true, force: true })
}
}
}
module.exports = {
downloadNpmPackage
}

View File

@ -0,0 +1,240 @@
import { buildFunctionCallToolName, buildMcpToolName, generateMcpToolFunctionName, toCamelCase } from '@shared/mcp'
import { describe, expect, it } from 'vitest'
describe('toCamelCase', () => {
it('should convert hyphenated strings', () => {
expect(toCamelCase('my-server')).toBe('myServer')
expect(toCamelCase('my-tool-name')).toBe('myToolName')
})
it('should convert underscored strings', () => {
expect(toCamelCase('my_server')).toBe('myServer')
expect(toCamelCase('search_issues')).toBe('searchIssues')
})
it('should handle mixed delimiters', () => {
expect(toCamelCase('my-server_name')).toBe('myServerName')
})
it('should handle leading numbers by prefixing underscore', () => {
expect(toCamelCase('123server')).toBe('_123server')
})
it('should handle special characters', () => {
expect(toCamelCase('test@server!')).toBe('testServer')
expect(toCamelCase('tool#name$')).toBe('toolName')
})
it('should trim whitespace', () => {
expect(toCamelCase(' server ')).toBe('server')
})
it('should handle empty string', () => {
expect(toCamelCase('')).toBe('')
})
it('should handle uppercase snake case', () => {
expect(toCamelCase('MY_SERVER')).toBe('myServer')
expect(toCamelCase('SEARCH_ISSUES')).toBe('searchIssues')
})
it('should handle mixed case', () => {
expect(toCamelCase('MyServer')).toBe('myserver')
expect(toCamelCase('myTOOL')).toBe('mytool')
})
})
describe('buildMcpToolName', () => {
it('should build basic name with defaults', () => {
expect(buildMcpToolName('github', 'search_issues')).toBe('github_searchIssues')
})
it('should handle undefined server name', () => {
expect(buildMcpToolName(undefined, 'search_issues')).toBe('searchIssues')
})
it('should apply custom prefix and delimiter', () => {
expect(buildMcpToolName('github', 'search', { prefix: 'mcp__', delimiter: '__' })).toBe('mcp__github__search')
})
it('should respect maxLength', () => {
const result = buildMcpToolName('veryLongServerName', 'veryLongToolName', { maxLength: 20 })
expect(result.length).toBeLessThanOrEqual(20)
})
it('should handle collision with existingNames', () => {
const existingNames = new Set(['github_search'])
const result = buildMcpToolName('github', 'search', { existingNames })
expect(result).toBe('github_search1')
expect(existingNames.has('github_search1')).toBe(true)
})
it('should respect maxLength when adding collision suffix', () => {
const existingNames = new Set(['a'.repeat(20)])
const result = buildMcpToolName('a'.repeat(20), '', { maxLength: 20, existingNames })
expect(result.length).toBeLessThanOrEqual(20)
expect(existingNames.has(result)).toBe(true)
})
it('should handle multiple collisions with maxLength', () => {
const existingNames = new Set(['abcd', 'abc1', 'abc2'])
const result = buildMcpToolName('abcd', '', { maxLength: 4, existingNames })
expect(result).toBe('abc3')
expect(result.length).toBeLessThanOrEqual(4)
})
})
describe('generateMcpToolFunctionName', () => {
it('should return format serverName_toolName in camelCase', () => {
expect(generateMcpToolFunctionName('github', 'search_issues')).toBe('github_searchIssues')
})
it('should handle hyphenated names', () => {
expect(generateMcpToolFunctionName('my-server', 'my-tool')).toBe('myServer_myTool')
})
it('should handle undefined server name', () => {
expect(generateMcpToolFunctionName(undefined, 'search_issues')).toBe('searchIssues')
})
it('should handle collision detection', () => {
const existingNames = new Set<string>()
const first = generateMcpToolFunctionName('github', 'search', existingNames)
const second = generateMcpToolFunctionName('github', 'search', existingNames)
expect(first).toBe('github_search')
expect(second).toBe('github_search1')
})
})
describe('buildFunctionCallToolName', () => {
describe('basic format', () => {
it('should return format mcp__{server}__{tool} in camelCase', () => {
const result = buildFunctionCallToolName('github', 'search_issues')
expect(result).toBe('mcp__github__searchIssues')
})
it('should handle simple server and tool names', () => {
expect(buildFunctionCallToolName('fetch', 'get_page')).toBe('mcp__fetch__getPage')
expect(buildFunctionCallToolName('database', 'query')).toBe('mcp__database__query')
})
})
describe('valid JavaScript identifier', () => {
it('should always start with mcp__ prefix (valid JS identifier start)', () => {
const result = buildFunctionCallToolName('123server', '456tool')
expect(result).toMatch(/^mcp__/)
})
it('should handle hyphenated names with camelCase', () => {
const result = buildFunctionCallToolName('my-server', 'my-tool')
expect(result).toBe('mcp__myServer__myTool')
})
it('should be a valid JavaScript identifier', () => {
const testCases = [
['github', 'create_issue'],
['my-server', 'fetch-data'],
['test@server', 'tool#name'],
['server.name', 'tool.action']
]
for (const [server, tool] of testCases) {
const result = buildFunctionCallToolName(server, tool)
expect(result).toMatch(/^[a-zA-Z_][a-zA-Z0-9_]*$/)
}
})
})
describe('character sanitization', () => {
it('should convert special characters to camelCase boundaries', () => {
expect(buildFunctionCallToolName('my-server', 'my-tool-name')).toBe('mcp__myServer__myToolName')
expect(buildFunctionCallToolName('test@server!', 'tool#name$')).toBe('mcp__testServer__toolName')
expect(buildFunctionCallToolName('server.name', 'tool.action')).toBe('mcp__serverName__toolAction')
})
it('should handle spaces', () => {
const result = buildFunctionCallToolName('my server', 'my tool')
expect(result).toBe('mcp__myServer__myTool')
})
})
describe('length constraints', () => {
it('should not exceed 63 characters', () => {
const longServerName = 'a'.repeat(50)
const longToolName = 'b'.repeat(50)
const result = buildFunctionCallToolName(longServerName, longToolName)
expect(result.length).toBeLessThanOrEqual(63)
})
it('should not end with underscores after truncation', () => {
const longServerName = 'a'.repeat(30)
const longToolName = 'b'.repeat(30)
const result = buildFunctionCallToolName(longServerName, longToolName)
expect(result).not.toMatch(/_+$/)
expect(result.length).toBeLessThanOrEqual(63)
})
})
describe('edge cases', () => {
it('should handle empty server name', () => {
const result = buildFunctionCallToolName('', 'tool')
expect(result).toBe('mcp__tool')
})
it('should handle empty tool name', () => {
const result = buildFunctionCallToolName('server', '')
expect(result).toBe('mcp__server__')
})
it('should trim whitespace from names', () => {
const result = buildFunctionCallToolName(' server ', ' tool ')
expect(result).toBe('mcp__server__tool')
})
it('should handle mixed case by normalizing to lowercase first', () => {
const result = buildFunctionCallToolName('MyServer', 'MyTool')
expect(result).toBe('mcp__myserver__mytool')
})
it('should handle uppercase snake case', () => {
const result = buildFunctionCallToolName('MY_SERVER', 'SEARCH_ISSUES')
expect(result).toBe('mcp__myServer__searchIssues')
})
})
describe('deterministic output', () => {
it('should produce consistent results for same input', () => {
const result1 = buildFunctionCallToolName('github', 'search_repos')
const result2 = buildFunctionCallToolName('github', 'search_repos')
expect(result1).toBe(result2)
})
it('should produce different results for different inputs', () => {
const result1 = buildFunctionCallToolName('server1', 'tool')
const result2 = buildFunctionCallToolName('server2', 'tool')
expect(result1).not.toBe(result2)
})
})
describe('real-world scenarios', () => {
it('should handle GitHub MCP server', () => {
expect(buildFunctionCallToolName('github', 'create_issue')).toBe('mcp__github__createIssue')
expect(buildFunctionCallToolName('github', 'search_repositories')).toBe('mcp__github__searchRepositories')
})
it('should handle filesystem MCP server', () => {
expect(buildFunctionCallToolName('filesystem', 'read_file')).toBe('mcp__filesystem__readFile')
expect(buildFunctionCallToolName('filesystem', 'write_file')).toBe('mcp__filesystem__writeFile')
})
it('should handle hyphenated server names (common in npm packages)', () => {
expect(buildFunctionCallToolName('cherry-fetch', 'get_page')).toBe('mcp__cherryFetch__getPage')
expect(buildFunctionCallToolName('mcp-server-github', 'search')).toBe('mcp__mcpServerGithub__search')
})
it('should handle scoped npm package style names', () => {
const result = buildFunctionCallToolName('@anthropic/mcp-server', 'chat')
expect(result).toBe('mcp__AnthropicMcpServer__chat')
})
})
})

View File

@ -1,6 +1,10 @@
import { loggerService } from '@logger'
import { MESSAGE_STREAM_TIMEOUT_MS } from '@main/apiServer/config/timeouts'
import { createStreamAbortController, STREAM_TIMEOUT_REASON } from '@main/apiServer/utils/createStreamAbortController'
import {
createStreamAbortController,
STREAM_TIMEOUT_REASON,
type StreamAbortController
} from '@main/apiServer/utils/createStreamAbortController'
import { agentService, sessionMessageService, sessionService } from '@main/services/agents'
import type { Request, Response } from 'express'
@ -26,7 +30,7 @@ const verifyAgentAndSession = async (agentId: string, sessionId: string) => {
}
export const createMessage = async (req: Request, res: Response): Promise<void> => {
let clearAbortTimeout: (() => void) | undefined
let streamController: StreamAbortController | undefined
try {
const { agentId, sessionId } = req.params
@ -45,14 +49,10 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
res.setHeader('Access-Control-Allow-Origin', '*')
res.setHeader('Access-Control-Allow-Headers', 'Cache-Control')
const {
abortController,
registerAbortHandler,
clearAbortTimeout: helperClearAbortTimeout
} = createStreamAbortController({
streamController = createStreamAbortController({
timeoutMs: MESSAGE_STREAM_TIMEOUT_MS
})
clearAbortTimeout = helperClearAbortTimeout
const { abortController, registerAbortHandler, dispose } = streamController
const { stream, completion } = await sessionMessageService.createSessionMessage(
session,
messageData,
@ -64,8 +64,8 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
let responseEnded = false
let streamFinished = false
const cleanupAbortTimeout = () => {
clearAbortTimeout?.()
const cleanup = () => {
dispose()
}
const finalizeResponse = () => {
@ -78,7 +78,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
}
responseEnded = true
cleanupAbortTimeout()
cleanup()
try {
// res.write('data: {"type":"finish"}\n\n')
res.write('data: [DONE]\n\n')
@ -108,7 +108,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
* - Mark the response as ended to prevent further writes
*/
registerAbortHandler((abortReason) => {
cleanupAbortTimeout()
cleanup()
if (responseEnded) return
@ -189,7 +189,7 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
logger.error('Error writing stream error to SSE', { error: writeError })
}
responseEnded = true
cleanupAbortTimeout()
cleanup()
res.end()
}
}
@ -221,14 +221,14 @@ export const createMessage = async (req: Request, res: Response): Promise<void>
logger.error('Error writing completion error to SSE stream', { error: writeError })
}
responseEnded = true
cleanupAbortTimeout()
cleanup()
res.end()
})
// Clear timeout when response ends
res.on('close', cleanupAbortTimeout)
res.on('finish', cleanupAbortTimeout)
res.on('close', cleanup)
res.on('finish', cleanup)
} catch (error: any) {
clearAbortTimeout?.()
streamController?.dispose()
logger.error('Error in streaming message handler', {
error,
agentId: req.params.agentId,

View File

@ -4,6 +4,7 @@ export interface StreamAbortController {
abortController: AbortController
registerAbortHandler: (handler: StreamAbortHandler) => void
clearAbortTimeout: () => void
dispose: () => void
}
export const STREAM_TIMEOUT_REASON = 'stream timeout'
@ -40,6 +41,15 @@ export const createStreamAbortController = (options: CreateStreamAbortController
signal.addEventListener('abort', handleAbort, { once: true })
let disposed = false
const dispose = () => {
if (disposed) return
disposed = true
clearAbortTimeout()
signal.removeEventListener('abort', handleAbort)
}
const registerAbortHandler = (handler: StreamAbortHandler) => {
abortHandler = handler
@ -59,6 +69,7 @@ export const createStreamAbortController = (options: CreateStreamAbortController
return {
abortController,
registerAbortHandler,
clearAbortTimeout
clearAbortTimeout,
dispose
}
}

View File

@ -75,6 +75,15 @@ if (isLinux && process.env.XDG_SESSION_TYPE === 'wayland') {
app.commandLine.appendSwitch('enable-features', 'GlobalShortcutsPortal')
}
/**
* Set window class and name for Linux
* This ensures the window manager identifies the app correctly on both X11 and Wayland
*/
if (isLinux) {
app.commandLine.appendSwitch('class', 'CherryStudio')
app.commandLine.appendSwitch('name', 'CherryStudio')
}
// DocumentPolicyIncludeJSCallStacksInCrashReports: Enable features for unresponsive renderer js call stacks
// EarlyEstablishGpuChannel,EstablishGpuChannelAsync: Enable features for early establish gpu channel
// speed up the startup time

View File

@ -900,6 +900,9 @@ export async function registerIpc(mainWindow: BrowserWindow, app: Electron.App)
ipcMain.handle(IpcChannel.App_SetDisableHardwareAcceleration, (_, isDisable: boolean) => {
configManager.setDisableHardwareAcceleration(isDisable)
})
ipcMain.handle(IpcChannel.App_SetUseSystemTitleBar, (_, isActive: boolean) => {
configManager.setUseSystemTitleBar(isActive)
})
ipcMain.handle(IpcChannel.TRACE_SAVE_DATA, (_, topicId: string) => saveSpans(topicId))
ipcMain.handle(IpcChannel.TRACE_GET_DATA, (_, topicId: string, traceId: string, modelName?: string) =>
getSpans(topicId, traceId, modelName)

View File

@ -4,7 +4,7 @@ import path from 'node:path'
import { loggerService } from '@logger'
import { windowService } from '@main/services/WindowService'
import { getFileExt, getTempDir } from '@main/utils/file'
import type { FileMetadata, PreprocessProvider } from '@types'
import type { FileMetadata, PreprocessProvider, PreprocessReadPdfResult } from '@types'
import { PDFDocument } from 'pdf-lib'
const logger = loggerService.withContext('BasePreprocessProvider')
@ -90,7 +90,7 @@ export default abstract class BasePreprocessProvider {
return new Promise((resolve) => setTimeout(resolve, ms))
}
public async readPdf(buffer: Buffer) {
public async readPdf(buffer: Buffer): Promise<PreprocessReadPdfResult> {
const pdfDoc = await PDFDocument.load(buffer, { ignoreEncryption: true })
return {
numPages: pdfDoc.getPageCount()

View File

@ -56,8 +56,6 @@ type QuotaResponse = {
export default class MineruPreprocessProvider extends BasePreprocessProvider {
constructor(provider: PreprocessProvider, userId?: string) {
super(provider, userId)
// TODO: remove after free period ends
this.provider.apiKey = this.provider.apiKey || import.meta.env.MAIN_VITE_MINERU_API_KEY
}
public async parseFile(
@ -65,6 +63,10 @@ export default class MineruPreprocessProvider extends BasePreprocessProvider {
file: FileMetadata
): Promise<{ processedFile: FileMetadata; quota: number }> {
try {
if (!this.provider.apiKey) {
throw new Error('MinerU API key is required')
}
const filePath = fileStorage.getFilePathById(file)
logger.info(`MinerU preprocess processing started: ${filePath}`)
await this.validateFile(filePath)
@ -96,6 +98,10 @@ export default class MineruPreprocessProvider extends BasePreprocessProvider {
public async checkQuota() {
try {
if (!this.provider.apiKey) {
throw new Error('MinerU API key is required')
}
const quota = await net.fetch(`${this.provider.apiHost}/api/v4/quota`, {
method: 'GET',
headers: {

View File

@ -0,0 +1,289 @@
import fs from 'node:fs'
import path from 'node:path'
import { loggerService } from '@logger'
import { fileStorage } from '@main/services/FileStorage'
import { getFileType } from '@main/utils/file'
import { MB } from '@shared/config/constant'
import type { FileMetadata, PreprocessProvider, PreprocessReadPdfResult } from '@types'
import { net } from 'electron'
import * as z from 'zod'
import BasePreprocessProvider from './BasePreprocessProvider'
const logger = loggerService.withContext('PaddleocrPreprocessProvider')
/**
* 50MB100100API只解析前100页
* PaddleOCR API https://aistudio.baidu.com/paddleocr
*/
export const PDF_SIZE_LIMIT_MB = 50
export const PDF_PAGE_LIMIT = 100
export const PDF_SIZE_LIMIT_BYTES = PDF_SIZE_LIMIT_MB * MB
enum FileType {
PDF = 0,
Image = 1
}
const ApiResponseSchema = z.looseObject({
result: z
.looseObject({
layoutParsingResults: z
.array(
z.looseObject({
markdown: z.looseObject({
text: z.string().min(1, 'Markdown text cannot be empty')
})
})
)
.min(1, 'At least one layout parsing result required')
})
.optional(),
errorCode: z.number().optional(),
errorMsg: z.string().optional()
})
type ApiResponse = z.infer<typeof ApiResponseSchema>
const isApiSuccess = (response: ApiResponse): boolean => {
const hasNoError = !response.errorCode || response.errorCode === 0
const hasSuccessMsg = !response.errorMsg || /success/i.test(response.errorMsg)
return hasNoError && hasSuccessMsg
}
function formatZodError(error: z.ZodError): string {
return error.issues
.map((issue) => {
const path = issue.path.join('.')
const code = issue.code
const message = issue.message
return `[${code}] ${path}: ${message}`
})
.join('; ')
}
function getErrorMessage(error: unknown): string {
if (error instanceof z.ZodError) {
return formatZodError(error)
} else if (error instanceof Error) {
return error.message
} else if (typeof error === 'string') {
return error
} else {
return 'Unknown error'
}
}
export default class PaddleocrPreprocessProvider extends BasePreprocessProvider {
constructor(provider: PreprocessProvider, userId?: string) {
super(provider, userId)
}
/**
* PaddleOCR PDF
* @param sourceId - ID/
* @param file - ext .pdf
* @returns {Promise<{processedFile: FileMetadata; quota: number}>} + PaddleOCR 0
* @throws {Error} PDF
*/
public async parseFile(
sourceId: string,
file: FileMetadata
): Promise<{ processedFile: FileMetadata; quota: number }> {
try {
const filePath = fileStorage.getFilePathById(file)
logger.info(`PaddleOCR preprocess processing started: ${filePath}`)
const fileBuffer = await this.validateFile(filePath)
// 进度条
await this.sendPreprocessProgress(sourceId, 25)
// 1.读取pdf文件并编码为base64
const fileData = fileBuffer.toString('base64')
await this.sendPreprocessProgress(sourceId, 50)
// 2. 调用PadlleOCR文档处理API
const apiResponse = await this.callPaddleOcrApi(fileData, FileType.PDF)
logger.info(`PaddleOCR API call completed`)
await this.sendPreprocessProgress(sourceId, 75)
// 3. 处理 API 错误场景
if (!isApiSuccess(apiResponse)) {
const errorCode = apiResponse.errorCode ?? -1
const errorMsg = apiResponse.errorMsg || 'Unknown error'
const fullErrorMsg = `PaddleOCR API processing failed [${errorCode}]: ${errorMsg}`
logger.error(fullErrorMsg)
throw new Error(fullErrorMsg)
}
// 4. 保存markdown文本
const outputDir = await this.saveResults(apiResponse.result, file)
await this.sendPreprocessProgress(sourceId, 100)
const processedFile = await this.createProcessedFileInfo(file, outputDir)
// 5. 创建处理后数据
return {
processedFile,
quota: 0
}
} catch (error: unknown) {
logger.error(`PaddleOCR preprocess processing failed for:`, error as Error)
throw new Error(getErrorMessage(error))
}
}
public async checkQuota(): Promise<number> {
// PaddleOCR doesn't have quota checking, return 0
return 0
}
private getMarkdownFileName(file: FileMetadata): string {
return file.origin_name.replace(/\.(pdf|jpg|jpeg|png)$/i, '.md')
}
private async validateFile(filePath: string): Promise<Buffer> {
// 阶段1校验文件类型
logger.info(`Validating PDF file: ${filePath}`)
const ext = path.extname(filePath).toLowerCase()
if (ext !== '.pdf') {
throw new Error(`File ${filePath} is not a PDF (extension: ${ext.slice(1)})`)
}
// 阶段2校验文件大小
const stats = await fs.promises.stat(filePath)
const fileSizeBytes = stats.size
if (fileSizeBytes > PDF_SIZE_LIMIT_BYTES) {
const fileSizeMB = Math.round(fileSizeBytes / MB)
throw new Error(`PDF file size (${fileSizeMB}MB) exceeds the limit of ${PDF_SIZE_LIMIT_MB}MB`)
}
// 阶段3校验页数兼容 PDF 解析失败的场景)
const pdfBuffer = await fs.promises.readFile(filePath)
let doc: PreprocessReadPdfResult | undefined
try {
doc = await this.readPdf(pdfBuffer)
} catch (error: unknown) {
// PDF 解析失败:抛异常,跳过页数校验
const errorMsg = getErrorMessage(error)
logger.error(
`Failed to parse PDF structure (file may be corrupted or use non-standard format). ` +
`Skipping page count validation. Will attempt to process with PaddleOCR API. ` +
`Error details: ${errorMsg}. ` +
`Suggestion: If processing fails, try repairing the PDF using tools like Adobe Acrobat or online PDF repair services.`
)
throw error
}
if (doc?.numPages > PDF_PAGE_LIMIT) {
throw new Error(`PDF page count (${doc.numPages}) exceeds the limit of ${PDF_PAGE_LIMIT} pages`)
}
logger.info(`PDF validation passed: ${doc.numPages} pages, ${Math.round(fileSizeBytes / MB)}MB`)
return pdfBuffer
}
private async createProcessedFileInfo(file: FileMetadata, outputDir: string): Promise<FileMetadata> {
const finalMdFileName = this.getMarkdownFileName(file)
const finalMdPath = path.join(outputDir, finalMdFileName)
const ext = path.extname(finalMdPath)
const type = getFileType(ext)
const fileSize = (await fs.promises.stat(finalMdPath)).size
return {
...file,
name: finalMdFileName,
path: finalMdPath,
type: type,
ext: ext,
size: fileSize
}
}
private async callPaddleOcrApi(fileData: string, fileType: number): Promise<ApiResponse> {
if (!this.provider.apiHost) {
throw new Error('PaddleOCR API host is not configured')
}
const endpoint = this.provider.apiHost
const payload = {
file: fileData,
fileType: fileType,
useDocOrientationClassify: false,
useDocUnwarping: false,
useTextlineOrientation: false,
useChartRecognition: false
}
try {
const response = await net.fetch(endpoint, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Client-Platform': 'cherry-studio',
Authorization: `token ${this.provider.apiKey}`
},
body: JSON.stringify(payload)
})
if (!response.ok) {
const errorText = await response.text()
logger.error(`PaddleOCR API error: HTTP ${response.status} - ${errorText}`)
throw new Error(`HTTP ${response.status}: ${response.statusText}`)
}
const rawData = await response.json()
logger.debug('PaddleOCR API response', { data: rawData })
// Zod 校验响应结构(不合法则直接抛错)
const validatedData = ApiResponseSchema.parse(rawData)
return validatedData // 返回完整响应
} catch (error: unknown) {
const errorMsg = getErrorMessage(error)
logger.error(`Failed to call PaddleOCR API: ${errorMsg}`, { error })
throw new Error(`Failed to call PaddleOCR API: ${errorMsg}`)
}
}
private async saveResults(result: ApiResponse['result'], file: FileMetadata): Promise<string> {
const outputDir = path.join(this.storageDir, file.id)
// 确保输出目录存在且为空
if (fs.existsSync(outputDir)) {
fs.rmSync(outputDir, { recursive: true, force: true })
}
fs.mkdirSync(outputDir, { recursive: true })
// 处理 result 为 undefined 的场景API 无解析结果)
if (!result) {
const errorMsg = `Parsing failed: No valid parsing result from PaddleOCR API for file [ID: ${file.id}]`
// Keep warning log for troubleshooting
logger.error(errorMsg)
// Throw exception to interrupt function execution (no empty file created)
throw new Error(errorMsg)
}
// Zod 保证result 存在时layoutParsingResults 必是非空数组
const markdownText = result.layoutParsingResults
.filter((layoutResult) => layoutResult?.markdown?.text)
.map((layoutResult) => layoutResult.markdown.text)
.join('\n\n')
// 直接构造目标文件名
const finalMdFileName = this.getMarkdownFileName(file)
const finalMdPath = path.join(outputDir, finalMdFileName)
// 保存 Markdown 文件
fs.writeFileSync(finalMdPath, markdownText, 'utf-8')
logger.info(`Saved markdown file: ${finalMdPath}`)
return outputDir
}
}

View File

@ -6,6 +6,8 @@ import Doc2xPreprocessProvider from './Doc2xPreprocessProvider'
import MineruPreprocessProvider from './MineruPreprocessProvider'
import MistralPreprocessProvider from './MistralPreprocessProvider'
import OpenMineruPreprocessProvider from './OpenMineruPreprocessProvider'
import PaddleocrPreprocessProvider from './PaddleocrPreprocessProvider'
export default class PreprocessProviderFactory {
static create(provider: PreprocessProvider, userId?: string): BasePreprocessProvider {
switch (provider.id) {
@ -17,6 +19,8 @@ export default class PreprocessProviderFactory {
return new MineruPreprocessProvider(provider, userId)
case 'open-mineru':
return new OpenMineruPreprocessProvider(provider, userId)
case 'paddleocr':
return new PaddleocrPreprocessProvider(provider, userId)
default:
return new DefaultPreprocessProvider(provider)
}

View File

@ -58,7 +58,7 @@ export default class GeneralReranker extends BaseReranker {
return this.getRerankResult(searchResults, rerankResults)
} catch (error: any) {
const errorDetails = this.formatErrorMessage(url, error, requestBody)
throw new Error(`重排序请求失败: ${error.message}\n请求详情: ${errorDetails}`)
throw new Error(`Rerank request failed: ${error.message}\nRequest details: ${errorDetails}`)
}
}
}

View File

@ -109,7 +109,7 @@ class DifyKnowledgeServer {
const parsed = SearchKnowledgeArgsSchema.safeParse(args)
if (!parsed.success) {
const errorDetails = JSON.stringify(parsed.error.format(), null, 2)
throw new Error(`无效的参数:\n${errorDetails}`)
throw new Error(`Invalid arguments:\n${errorDetails}`)
}
return await this.performSearchKnowledge(
parsed.data.id,
@ -144,7 +144,7 @@ class DifyKnowledgeServer {
if (!response.ok) {
const errorText = await response.text()
throw new Error(`API 请求失败,状态码 ${response.status}: ${errorText}`)
throw new Error(`API request failed, status code ${response.status}: ${errorText}`)
}
const apiResponse = await response.json()
@ -161,7 +161,7 @@ class DifyKnowledgeServer {
? knowledges.map((k) => `- **${k.name}** (ID: ${k.id})\n ${k.description || 'No Description'}`).join('\n')
: '- No knowledges found.'
const formattedText = `### 可用知识库:\n\n${listText}`
const formattedText = `### Available Knowledge Bases:\n\n${listText}`
return {
content: [{ type: 'text', text: formattedText }]
@ -206,13 +206,13 @@ class DifyKnowledgeServer {
if (!response.ok) {
const errorText = await response.text()
throw new Error(`API 请求失败,状态码 ${response.status}: ${errorText}`)
throw new Error(`API request failed, status code ${response.status}: ${errorText}`)
}
const searchResponse: DifySearchKnowledgeResponse = await response.json()
if (!searchResponse || !Array.isArray(searchResponse.records)) {
throw new Error(`从 Dify API 收到的响应格式无效: ${JSON.stringify(searchResponse)}`)
throw new Error(`Invalid response format from Dify API: ${JSON.stringify(searchResponse)}`)
}
const header = `### Query: ${query}\n\n`

View File

@ -9,6 +9,7 @@ import DiDiMcpServer from './didi-mcp'
import DifyKnowledgeServer from './dify-knowledge'
import FetchServer from './fetch'
import FileSystemServer from './filesystem'
import HubServer from './hub'
import MemoryServer from './memory'
import PythonServer from './python'
import ThinkingServer from './sequentialthinking'
@ -52,6 +53,9 @@ export function createInMemoryMCPServer(
case BuiltinMCPServerNames.browser: {
return new BrowserServer().server
}
case BuiltinMCPServerNames.hub: {
return new HubServer().server
}
default:
throw new Error(`Unknown in-memory MCP server: ${name}`)
}

View File

@ -0,0 +1,213 @@
# Hub MCP Server
A built-in MCP server that aggregates all active MCP servers in Cherry Studio and exposes them through `search` and `exec` tools.
## Overview
The Hub server enables LLMs to discover and call tools from all active MCP servers without needing to know the specific server names or tool signatures upfront.
## Auto Mode Integration
The Hub server is the core component of Cherry Studio's **Auto MCP Mode**. When an assistant is set to Auto mode:
1. **Automatic Injection**: The Hub server is automatically injected as the only MCP server for the assistant
2. **System Prompt**: A specialized system prompt (`HUB_MODE_SYSTEM_PROMPT`) is appended to guide the LLM on how to use the `search` and `exec` tools
3. **Dynamic Discovery**: The LLM can discover and use any tools from all active MCP servers without manual configuration
### MCP Modes
Cherry Studio supports three MCP modes per assistant:
| Mode | Description | Tools Available |
|------|-------------|-----------------|
| **Disabled** | No MCP tools | None |
| **Auto** | Hub server only | `search`, `exec` |
| **Manual** | User selects servers | Selected server tools |
### How Auto Mode Works
```
User Message
┌─────────────────────────────────────────┐
│ Assistant (mcpMode: 'auto') │
│ │
│ System Prompt + HUB_MODE_SYSTEM_PROMPT │
│ Tools: [hub.search, hub.exec] │
└─────────────────────────────────────────┘
┌─────────────────────────────────────────┐
│ LLM decides to use MCP tools │
│ │
│ 1. search({ query: "github,repo" }) │
│ 2. exec({ code: "await searchRepos()" })│
└─────────────────────────────────────────┘
┌─────────────────────────────────────────┐
│ Hub Server │
│ │
│ Aggregates all active MCP servers │
│ Routes tool calls to appropriate server │
└─────────────────────────────────────────┘
```
### Relevant Code
- **Type Definition**: `src/renderer/src/types/index.ts` - `McpMode` type and `getEffectiveMcpMode()`
- **Hub Server Constant**: `src/renderer/src/store/mcp.ts` - `hubMCPServer`
- **Server Selection**: `src/renderer/src/services/ApiService.ts` - `getMcpServersForAssistant()`
- **System Prompt**: `src/renderer/src/config/prompts.ts` - `HUB_MODE_SYSTEM_PROMPT`
- **Prompt Injection**: `src/renderer/src/aiCore/prepareParams/parameterBuilder.ts`
## Tools
### `search`
Search for available MCP tools by keywords.
**Parameters:**
| Name | Type | Required | Description |
|------|------|----------|-------------|
| `query` | string | Yes | Search keywords, comma-separated for OR matching |
| `limit` | number | No | Maximum results to return (default: 10, max: 50) |
**Example:**
```json
{
"query": "browser,chrome",
"limit": 5
}
```
**Returns:** JavaScript function declarations with JSDoc comments that can be used in the `exec` tool.
```javascript
// Found 2 tool(s):
/**
* Launch a browser instance
*
* @param {{ browser?: "chromium" | "firefox" | "webkit", headless?: boolean }} params
* @returns {Promise<unknown>}
*/
async function launchBrowser(params) {
return await __callTool("browser__launch_browser", params);
}
```
### `exec`
Execute JavaScript code that calls MCP tools.
**Parameters:**
| Name | Type | Required | Description |
|------|------|----------|-------------|
| `code` | string | Yes | JavaScript code to execute |
**Built-in Helpers:**
- `parallel(...promises)` - Run multiple tool calls concurrently (Promise.all)
- `settle(...promises)` - Run multiple tool calls and get all results (Promise.allSettled)
- `console.log/warn/error/info/debug` - Captured in output logs
**Example:**
```javascript
// Call a single tool
const result = await searchRepos({ query: "react" });
return result;
// Call multiple tools in parallel
const [users, repos] = await parallel(
getUsers({ limit: 10 }),
searchRepos({ query: "typescript" })
);
return { users, repos };
```
**Returns:**
```json
{
"result": { "users": [...], "repos": [...] },
"logs": ["[log] Processing..."],
"error": null
}
```
## Usage Flow
1. **Search** for tools using keywords:
```
search({ query: "github,repository" })
```
2. **Review** the returned function signatures and JSDoc
3. **Execute** code using the discovered tools:
```
exec({ code: 'return await searchRepos({ query: "react" })' })
```
## Configuration
The Hub server is a built-in server identified as `@cherry/hub`.
### Using Auto Mode (Recommended)
The easiest way to use the Hub server is through Auto mode:
1. Click the **MCP Tools** button (hammer icon) in the input bar
2. Select **Auto** mode
3. The Hub server is automatically enabled for the assistant
### Manual Configuration
Alternatively, you can enable the Hub server manually:
1. Go to **Settings** → **MCP Servers**
2. Find **Hub** in the built-in servers list
3. Toggle it on
4. In the assistant's MCP settings, select the Hub server
## Caching
- Tool definitions are cached for **10 minutes**
- Cache is automatically refreshed when expired
- Cache is invalidated when MCP servers connect/disconnect
## Limitations
- Code execution has a **60-second timeout**
- Console logs are limited to **1000 entries**
- Search results are limited to **50 tools** maximum
- The Hub server excludes itself from the aggregated server list
## Architecture
```
LLM
HubServer
├── search → ToolRegistry → SearchIndex
└── exec → Runtime → callMcpTool()
MCPService.callTool()
External MCP Servers
```
## Files
| File | Description |
|------|-------------|
| `index.ts` | Main HubServer class |
| `types.ts` | TypeScript type definitions |
| `generator.ts` | Converts MCP tools to JS functions with JSDoc |
| `tool-registry.ts` | In-memory tool cache with TTL |
| `search.ts` | Keyword-based tool search |
| `runtime.ts` | JavaScript code execution engine |
| `mcp-bridge.ts` | Bridge to Cherry Studio's MCPService |

View File

@ -0,0 +1,119 @@
import { describe, expect, it } from 'vitest'
import { generateToolFunction, generateToolsCode } from '../generator'
import type { GeneratedTool } from '../types'
describe('generator', () => {
describe('generateToolFunction', () => {
it('generates a simple tool function', () => {
const tool = {
id: 'test-id',
name: 'search_repos',
description: 'Search for GitHub repositories',
serverId: 'github',
serverName: 'github-server',
inputSchema: {
type: 'object' as const,
properties: {
query: { type: 'string', description: 'Search query' },
limit: { type: 'number', description: 'Max results' }
},
required: ['query']
},
type: 'mcp' as const
}
const existingNames = new Set<string>()
const callTool = async () => ({ success: true })
const result = generateToolFunction(tool, existingNames, callTool)
expect(result.functionName).toBe('githubServer_searchRepos')
expect(result.jsCode).toContain('async function githubServer_searchRepos')
expect(result.jsCode).toContain('Search for GitHub repositories')
expect(result.jsCode).toContain('__callTool')
})
it('handles unique function names', () => {
const tool = {
id: 'test-id',
name: 'search',
serverId: 'server1',
serverName: 'server1',
inputSchema: { type: 'object' as const, properties: {} },
type: 'mcp' as const
}
const existingNames = new Set<string>(['server1_search'])
const callTool = async () => ({})
const result = generateToolFunction(tool, existingNames, callTool)
expect(result.functionName).toBe('server1_search1')
})
it('handles enum types in schema', () => {
const tool = {
id: 'test-id',
name: 'launch_browser',
serverId: 'browser',
serverName: 'browser',
inputSchema: {
type: 'object' as const,
properties: {
browser: {
type: 'string',
enum: ['chromium', 'firefox', 'webkit']
}
}
},
type: 'mcp' as const
}
const existingNames = new Set<string>()
const callTool = async () => ({})
const result = generateToolFunction(tool, existingNames, callTool)
expect(result.jsCode).toContain('"chromium" | "firefox" | "webkit"')
})
})
describe('generateToolsCode', () => {
it('generates code for multiple tools', () => {
const tools: GeneratedTool[] = [
{
serverId: 's1',
serverName: 'server1',
toolName: 'tool1',
functionName: 'server1_tool1',
jsCode: 'async function server1_tool1() {}',
fn: async () => ({}),
signature: '{}',
returns: 'unknown'
},
{
serverId: 's2',
serverName: 'server2',
toolName: 'tool2',
functionName: 'server2_tool2',
jsCode: 'async function server2_tool2() {}',
fn: async () => ({}),
signature: '{}',
returns: 'unknown'
}
]
const result = generateToolsCode(tools)
expect(result).toContain('2 tool(s)')
expect(result).toContain('async function server1_tool1')
expect(result).toContain('async function server2_tool2')
})
it('returns message for empty tools', () => {
const result = generateToolsCode([])
expect(result).toBe('// No tools available')
})
})
})

View File

@ -0,0 +1,229 @@
import type { MCPTool } from '@types'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { HubServer } from '../index'
const mockTools: MCPTool[] = [
{
id: 'github__search_repos',
name: 'search_repos',
description: 'Search for GitHub repositories',
serverId: 'github',
serverName: 'GitHub',
inputSchema: {
type: 'object',
properties: {
query: { type: 'string', description: 'Search query' },
limit: { type: 'number', description: 'Max results' }
},
required: ['query']
},
type: 'mcp'
},
{
id: 'github__get_user',
name: 'get_user',
description: 'Get GitHub user profile',
serverId: 'github',
serverName: 'GitHub',
inputSchema: {
type: 'object',
properties: {
username: { type: 'string', description: 'GitHub username' }
},
required: ['username']
},
type: 'mcp'
},
{
id: 'database__query',
name: 'query',
description: 'Execute a database query',
serverId: 'database',
serverName: 'Database',
inputSchema: {
type: 'object',
properties: {
sql: { type: 'string', description: 'SQL query to execute' }
},
required: ['sql']
},
type: 'mcp'
}
]
vi.mock('@main/services/MCPService', () => ({
default: {
listAllActiveServerTools: vi.fn(async () => mockTools),
callToolById: vi.fn(async (toolId: string, args: unknown) => {
if (toolId === 'github__search_repos') {
return {
content: [{ type: 'text', text: JSON.stringify({ repos: ['repo1', 'repo2'], query: args }) }]
}
}
if (toolId === 'github__get_user') {
return {
content: [{ type: 'text', text: JSON.stringify({ username: (args as any).username, id: 123 }) }]
}
}
if (toolId === 'database__query') {
return {
content: [{ type: 'text', text: JSON.stringify({ rows: [{ id: 1 }, { id: 2 }] }) }]
}
}
return { content: [{ type: 'text', text: '{}' }] }
}),
abortTool: vi.fn(async () => true)
}
}))
import mcpService from '@main/services/MCPService'
describe('HubServer Integration', () => {
let hubServer: HubServer
beforeEach(() => {
vi.clearAllMocks()
hubServer = new HubServer()
})
afterEach(() => {
vi.clearAllMocks()
})
describe('full search → exec flow', () => {
it('searches for tools and executes them', async () => {
const searchResult = await (hubServer as any).handleSearch({ query: 'github,repos' })
expect(searchResult.content).toBeDefined()
const searchText = JSON.parse(searchResult.content[0].text)
expect(searchText.total).toBeGreaterThan(0)
expect(searchText.tools).toContain('github_searchRepos')
const execResult = await (hubServer as any).handleExec({
code: 'return await github_searchRepos({ query: "test" })'
})
expect(execResult.content).toBeDefined()
const execOutput = JSON.parse(execResult.content[0].text)
expect(execOutput.result).toEqual({ repos: ['repo1', 'repo2'], query: { query: 'test' } })
})
it('handles multiple tool calls in parallel', async () => {
await (hubServer as any).handleSearch({ query: 'github' })
const execResult = await (hubServer as any).handleExec({
code: `
const results = await parallel(
github_searchRepos({ query: "react" }),
github_getUser({ username: "octocat" })
);
return results
`
})
const execOutput = JSON.parse(execResult.content[0].text)
expect(execOutput.result).toHaveLength(2)
expect(execOutput.result[0]).toEqual({ repos: ['repo1', 'repo2'], query: { query: 'react' } })
expect(execOutput.result[1]).toEqual({ username: 'octocat', id: 123 })
})
it('searches across multiple servers', async () => {
const searchResult = await (hubServer as any).handleSearch({ query: 'query' })
const searchText = JSON.parse(searchResult.content[0].text)
expect(searchText.tools).toContain('database_query')
})
})
describe('tools caching', () => {
it('uses cached tools within TTL', async () => {
await (hubServer as any).handleSearch({ query: 'github' })
const firstCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
await (hubServer as any).handleSearch({ query: 'github' })
const secondCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
expect(secondCallCount).toBe(firstCallCount) // Should use cache
})
it('refreshes tools after cache invalidation', async () => {
await (hubServer as any).handleSearch({ query: 'github' })
const firstCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
hubServer.invalidateCache()
await (hubServer as any).handleSearch({ query: 'github' })
const secondCallCount = vi.mocked(mcpService.listAllActiveServerTools).mock.calls.length
expect(secondCallCount).toBe(firstCallCount + 1)
})
})
describe('error handling', () => {
it('throws error for invalid search query', async () => {
await expect((hubServer as any).handleSearch({})).rejects.toThrow('query parameter is required')
})
it('throws error for invalid exec code', async () => {
await expect((hubServer as any).handleExec({})).rejects.toThrow('code parameter is required')
})
it('handles runtime errors in exec', async () => {
const execResult = await (hubServer as any).handleExec({
code: 'throw new Error("test error")'
})
const execOutput = JSON.parse(execResult.content[0].text)
expect(execOutput.error).toBe('test error')
expect(execOutput.isError).toBe(true)
})
})
describe('exec timeouts', () => {
afterEach(() => {
vi.useRealTimers()
})
it('aborts in-flight tool calls and returns logs on timeout', async () => {
vi.useFakeTimers()
let toolCallStarted: (() => void) | null = null
const toolCallStartedPromise = new Promise<void>((resolve) => {
toolCallStarted = resolve
})
vi.mocked(mcpService.callToolById).mockImplementationOnce(async () => {
toolCallStarted?.()
return await new Promise(() => {})
})
const execPromise = (hubServer as any).handleExec({
code: `
console.log("starting");
return await github_searchRepos({ query: "hang" });
`
})
await toolCallStartedPromise
await vi.advanceTimersByTimeAsync(60000)
await vi.runAllTimersAsync()
const execResult = await execPromise
const execOutput = JSON.parse(execResult.content[0].text)
expect(execOutput.error).toBe('Execution timed out after 60000ms')
expect(execOutput.result).toBeUndefined()
expect(execOutput.isError).toBe(true)
expect(execOutput.logs).toContain('[log] starting')
expect(vi.mocked(mcpService.abortTool)).toHaveBeenCalled()
})
})
describe('server instance', () => {
it('creates a valid MCP server instance', () => {
expect(hubServer.server).toBeDefined()
expect(hubServer.server.setRequestHandler).toBeDefined()
})
})
})

View File

@ -0,0 +1,159 @@
import { describe, expect, it, vi } from 'vitest'
import { Runtime } from '../runtime'
import type { GeneratedTool } from '../types'
vi.mock('../mcp-bridge', () => ({
callMcpTool: vi.fn(async (toolId: string, params: unknown) => {
if (toolId === 'server__failing_tool') {
throw new Error('Tool failed')
}
return { toolId, params, success: true }
})
}))
const createMockTool = (partial: Partial<GeneratedTool>): GeneratedTool => ({
serverId: 'server1',
serverName: 'server1',
toolName: 'tool',
functionName: 'server1_mockTool',
jsCode: 'async function server1_mockTool() {}',
fn: async (params) => ({ result: params }),
signature: '{}',
returns: 'unknown',
...partial
})
describe('Runtime', () => {
describe('execute', () => {
it('executes simple code and returns result', async () => {
const runtime = new Runtime()
const tools: GeneratedTool[] = []
const result = await runtime.execute('return 1 + 1', tools)
expect(result.result).toBe(2)
expect(result.error).toBeUndefined()
})
it('executes async code', async () => {
const runtime = new Runtime()
const tools: GeneratedTool[] = []
const result = await runtime.execute('return await Promise.resolve(42)', tools)
expect(result.result).toBe(42)
})
it('calls tool functions', async () => {
const runtime = new Runtime()
const tools = [
createMockTool({
functionName: 'searchRepos',
fn: async (params) => ({ repos: ['repo1', 'repo2'], query: params })
})
]
const result = await runtime.execute('return await searchRepos({ query: "test" })', tools)
expect(result.result).toEqual({ toolId: 'searchRepos', params: { query: 'test' }, success: true })
})
it('captures console logs', async () => {
const runtime = new Runtime()
const tools: GeneratedTool[] = []
const result = await runtime.execute(
`
console.log("hello");
console.warn("warning");
return "done"
`,
tools
)
expect(result.result).toBe('done')
expect(result.logs).toContain('[log] hello')
expect(result.logs).toContain('[warn] warning')
})
it('handles errors gracefully', async () => {
const runtime = new Runtime()
const tools: GeneratedTool[] = []
const result = await runtime.execute('throw new Error("test error")', tools)
expect(result.result).toBeUndefined()
expect(result.error).toBe('test error')
expect(result.isError).toBe(true)
})
it('supports parallel helper', async () => {
const runtime = new Runtime()
const tools: GeneratedTool[] = []
const result = await runtime.execute(
`
const results = await parallel(
Promise.resolve(1),
Promise.resolve(2),
Promise.resolve(3)
);
return results
`,
tools
)
expect(result.result).toEqual([1, 2, 3])
})
it('supports settle helper', async () => {
const runtime = new Runtime()
const tools: GeneratedTool[] = []
const result = await runtime.execute(
`
const results = await settle(
Promise.resolve(1),
Promise.reject(new Error("fail"))
);
return results.map(r => r.status)
`,
tools
)
expect(result.result).toEqual(['fulfilled', 'rejected'])
})
it('returns last expression when no explicit return', async () => {
const runtime = new Runtime()
const tools: GeneratedTool[] = []
const result = await runtime.execute(
`
const x = 10;
const y = 20;
return x + y
`,
tools
)
expect(result.result).toBe(30)
})
it('stops execution when a tool throws', async () => {
const runtime = new Runtime()
const tools = [
createMockTool({
functionName: 'server__failing_tool'
})
]
const result = await runtime.execute('return await server__failing_tool({})', tools)
expect(result.result).toBeUndefined()
expect(result.error).toBe('Tool failed')
expect(result.isError).toBe(true)
})
})
})

View File

@ -0,0 +1,118 @@
import { describe, expect, it } from 'vitest'
import { searchTools } from '../search'
import type { GeneratedTool } from '../types'
const createMockTool = (partial: Partial<GeneratedTool>): GeneratedTool => {
const functionName = partial.functionName || 'server1_tool'
return {
serverId: 'server1',
serverName: 'server1',
toolName: partial.toolName || 'tool',
functionName,
jsCode: `async function ${functionName}() {}`,
fn: async () => ({}),
signature: '{}',
returns: 'unknown',
...partial
}
}
describe('search', () => {
describe('searchTools', () => {
it('returns all tools when query is empty', () => {
const tools = [
createMockTool({ toolName: 'tool1', functionName: 'tool1' }),
createMockTool({ toolName: 'tool2', functionName: 'tool2' })
]
const result = searchTools(tools, { query: '' })
expect(result.total).toBe(2)
expect(result.tools).toContain('tool1')
expect(result.tools).toContain('tool2')
})
it('filters tools by single keyword', () => {
const tools = [
createMockTool({ toolName: 'search_repos', functionName: 'searchRepos' }),
createMockTool({ toolName: 'get_user', functionName: 'getUser' }),
createMockTool({ toolName: 'search_users', functionName: 'searchUsers' })
]
const result = searchTools(tools, { query: 'search' })
expect(result.total).toBe(2)
expect(result.tools).toContain('searchRepos')
expect(result.tools).toContain('searchUsers')
expect(result.tools).not.toContain('getUser')
})
it('supports OR matching with comma-separated keywords', () => {
const tools = [
createMockTool({ toolName: 'browser_open', functionName: 'browserOpen' }),
createMockTool({ toolName: 'chrome_launch', functionName: 'chromeLaunch' }),
createMockTool({ toolName: 'file_read', functionName: 'fileRead' })
]
const result = searchTools(tools, { query: 'browser,chrome' })
expect(result.total).toBe(2)
expect(result.tools).toContain('browserOpen')
expect(result.tools).toContain('chromeLaunch')
expect(result.tools).not.toContain('fileRead')
})
it('matches against description', () => {
const tools = [
createMockTool({
toolName: 'launch',
functionName: 'launch',
description: 'Launch a browser instance'
}),
createMockTool({
toolName: 'close',
functionName: 'close',
description: 'Close a window'
})
]
const result = searchTools(tools, { query: 'browser' })
expect(result.total).toBe(1)
expect(result.tools).toContain('launch')
})
it('respects limit parameter', () => {
const tools = Array.from({ length: 20 }, (_, i) =>
createMockTool({ toolName: `tool${i}`, functionName: `server1_tool${i}` })
)
const result = searchTools(tools, { query: 'tool', limit: 5 })
expect(result.total).toBe(20)
const matches = (result.tools.match(/async function server1_tool\d+/g) || []).length
expect(matches).toBe(5)
})
it('is case insensitive', () => {
const tools = [createMockTool({ toolName: 'SearchRepos', functionName: 'searchRepos' })]
const result = searchTools(tools, { query: 'SEARCH' })
expect(result.total).toBe(1)
})
it('ranks exact matches higher', () => {
const tools = [
createMockTool({ toolName: 'searching', functionName: 'searching' }),
createMockTool({ toolName: 'search', functionName: 'search' }),
createMockTool({ toolName: 'search_more', functionName: 'searchMore' })
]
const result = searchTools(tools, { query: 'search', limit: 1 })
expect(result.tools).toContain('function search(')
})
})
})

View File

@ -0,0 +1,152 @@
import { generateMcpToolFunctionName } from '@shared/mcp'
import type { MCPTool } from '@types'
import type { GeneratedTool } from './types'
type PropertySchema = Record<string, unknown>
type InputSchema = {
type?: string
properties?: Record<string, PropertySchema>
required?: string[]
}
function schemaTypeToTS(prop: Record<string, unknown>): string {
const type = prop.type as string | string[] | undefined
const enumValues = prop.enum as unknown[] | undefined
if (enumValues && Array.isArray(enumValues)) {
return enumValues.map((v) => (typeof v === 'string' ? `"${v}"` : String(v))).join(' | ')
}
if (Array.isArray(type)) {
return type.map((t) => primitiveTypeToTS(t)).join(' | ')
}
if (type === 'array') {
const items = prop.items as Record<string, unknown> | undefined
if (items) {
return `${schemaTypeToTS(items)}[]`
}
return 'unknown[]'
}
if (type === 'object') {
return 'object'
}
return primitiveTypeToTS(type)
}
function primitiveTypeToTS(type: string | undefined): string {
switch (type) {
case 'string':
return 'string'
case 'number':
case 'integer':
return 'number'
case 'boolean':
return 'boolean'
case 'null':
return 'null'
default:
return 'unknown'
}
}
function jsonSchemaToSignature(schema: Record<string, unknown> | undefined): string {
if (!schema || typeof schema !== 'object') {
return '{}'
}
const properties = schema.properties as Record<string, Record<string, unknown>> | undefined
if (!properties) {
return '{}'
}
const required = (schema.required as string[]) || []
const parts: string[] = []
for (const [key, prop] of Object.entries(properties)) {
const isRequired = required.includes(key)
const typeStr = schemaTypeToTS(prop)
parts.push(`${key}${isRequired ? '' : '?'}: ${typeStr}`)
}
return `{ ${parts.join(', ')} }`
}
function generateJSDoc(tool: MCPTool, inputSchema: InputSchema | undefined, returns: string): string {
const lines: string[] = ['/**']
if (tool.description) {
const desc = tool.description.split('\n')[0]
lines.push(` * ${desc}`)
}
const properties = inputSchema?.properties || {}
const required = inputSchema?.required || []
if (Object.keys(properties).length > 0) {
lines.push(` * @param {Object} params`)
for (const [name, prop] of Object.entries(properties)) {
const isReq = required.includes(name)
const type = schemaTypeToTS(prop)
const paramName = isReq ? `params.${name}` : `[params.${name}]`
const desc = (prop.description as string)?.split('\n')[0] || ''
lines.push(` * @param {${type}} ${paramName} ${desc}`)
}
}
lines.push(` * @returns {Promise<${returns}>}`)
lines.push(` */`)
return lines.join('\n')
}
export function generateToolFunction(
tool: MCPTool,
existingNames: Set<string>,
callToolFn: (functionName: string, params: unknown) => Promise<unknown>
): GeneratedTool {
const functionName = generateMcpToolFunctionName(tool.serverName, tool.name, existingNames)
const inputSchema = tool.inputSchema as InputSchema | undefined
const outputSchema = tool.outputSchema as Record<string, unknown> | undefined
const signature = jsonSchemaToSignature(inputSchema)
const returns = outputSchema ? jsonSchemaToSignature(outputSchema) : 'unknown'
const jsDoc = generateJSDoc(tool, inputSchema, returns)
const jsCode = `${jsDoc}
async function ${functionName}(params) {
return await __callTool("${functionName}", params);
}`
const fn = async (params: unknown): Promise<unknown> => {
return await callToolFn(functionName, params)
}
return {
serverId: tool.serverId,
serverName: tool.serverName,
toolName: tool.name,
functionName,
jsCode,
fn,
signature,
returns,
description: tool.description
}
}
export function generateToolsCode(tools: GeneratedTool[]): string {
if (tools.length === 0) {
return '// No tools available'
}
const header = `// ${tools.length} tool(s). ALWAYS use: const r = await ToolName({...}); return r;`
const code = tools.map((t) => t.jsCode).join('\n\n')
return header + '\n\n' + code
}

View File

@ -0,0 +1,184 @@
import { loggerService } from '@logger'
import { CacheService } from '@main/services/CacheService'
import { Server } from '@modelcontextprotocol/sdk/server/index.js'
import { CallToolRequestSchema, ErrorCode, ListToolsRequestSchema, McpError } from '@modelcontextprotocol/sdk/types.js'
import { generateToolFunction } from './generator'
import { callMcpTool, clearToolMap, listAllTools, syncToolMapFromGeneratedTools } from './mcp-bridge'
import { Runtime } from './runtime'
import { searchTools } from './search'
import type { ExecInput, GeneratedTool, SearchQuery } from './types'
const logger = loggerService.withContext('MCPServer:Hub')
const TOOLS_CACHE_KEY = 'hub:tools'
const TOOLS_CACHE_TTL = 60 * 1000 // 1 minute
/**
* Hub MCP Server - A meta-server that aggregates all active MCP servers.
*
* This server is NOT included in builtinMCPServers because:
* 1. It aggregates tools from all other MCP servers, not a standalone tool provider
* 2. It's designed for LLM "code mode" - enabling AI to discover and call tools programmatically
* 3. It should be auto-enabled when code mode features are used, not manually installed by users
*
* The server exposes two tools:
* - `search`: Find available tools by keywords, returns JS function signatures
* - `exec`: Execute JavaScript code that calls discovered tools
*/
export class HubServer {
public server: Server
private runtime: Runtime
constructor() {
this.runtime = new Runtime()
this.server = new Server(
{
name: 'hub-server',
version: '1.0.0'
},
{
capabilities: {
tools: {}
}
}
)
this.setupRequestHandlers()
}
private setupRequestHandlers(): void {
this.server.setRequestHandler(ListToolsRequestSchema, async () => {
return {
tools: [
{
name: 'search',
description:
'Search for available MCP tools by keywords. Use this FIRST to discover tools. Returns JavaScript async function declarations with JSDoc showing exact function names, parameters, and return types for use in `exec`.',
inputSchema: {
type: 'object',
properties: {
query: {
type: 'string',
description:
'Comma-separated search keywords. A tool matches if ANY keyword appears in its name, description, or server name. Example: "chrome,browser,tab" matches tools related to Chrome OR browser OR tabs.'
},
limit: {
type: 'number',
description: 'Maximum number of tools to return (default: 10, max: 50)'
}
},
required: ['query']
}
},
{
name: 'exec',
description:
'Execute JavaScript that calls MCP tools discovered via `search`. IMPORTANT: You MUST explicitly `return` the final value, or the result will be `undefined`.',
inputSchema: {
type: 'object',
properties: {
code: {
type: 'string',
description:
'JavaScript code to execute. The code runs inside an async context, so use `await` directly. Do NOT wrap your code in `(async () => { ... })()` - this causes double-wrapping and returns undefined. All discovered tools are async functions (call as `await ToolName(params)`). Helpers: `parallel(...promises)`, `settle(...promises)`, `console.*`. You MUST `return` the final value. Examples: `const r = await Tool({ id: "1" }); return r` or `return await Tool({ x: 1 })`'
}
},
required: ['code']
}
}
]
}
})
this.server.setRequestHandler(CallToolRequestSchema, async (request) => {
const { name, arguments: args } = request.params
if (!args) {
throw new McpError(ErrorCode.InvalidParams, 'No arguments provided')
}
try {
switch (name) {
case 'search':
return await this.handleSearch(args as unknown as SearchQuery)
case 'exec':
return await this.handleExec(args as unknown as ExecInput)
default:
throw new McpError(ErrorCode.MethodNotFound, `Unknown tool: ${name}`)
}
} catch (error) {
if (error instanceof McpError) {
throw error
}
logger.error(`Error executing tool ${name}:`, error as Error)
throw new McpError(
ErrorCode.InternalError,
`Error executing tool ${name}: ${error instanceof Error ? error.message : String(error)}`
)
}
})
}
private async fetchTools(): Promise<GeneratedTool[]> {
const cached = CacheService.get<GeneratedTool[]>(TOOLS_CACHE_KEY)
if (cached) {
logger.debug('Returning cached tools')
syncToolMapFromGeneratedTools(cached)
return cached
}
logger.debug('Fetching fresh tools')
const allTools = await listAllTools()
const existingNames = new Set<string>()
const tools = allTools.map((tool) => generateToolFunction(tool, existingNames, callMcpTool))
CacheService.set(TOOLS_CACHE_KEY, tools, TOOLS_CACHE_TTL)
syncToolMapFromGeneratedTools(tools)
return tools
}
invalidateCache(): void {
CacheService.remove(TOOLS_CACHE_KEY)
clearToolMap()
logger.debug('Tools cache invalidated')
}
private async handleSearch(query: SearchQuery) {
if (!query.query || typeof query.query !== 'string') {
throw new McpError(ErrorCode.InvalidParams, 'query parameter is required and must be a string')
}
const tools = await this.fetchTools()
const result = searchTools(tools, query)
return {
content: [
{
type: 'text',
text: JSON.stringify(result, null, 2)
}
]
}
}
private async handleExec(input: ExecInput) {
if (!input.code || typeof input.code !== 'string') {
throw new McpError(ErrorCode.InvalidParams, 'code parameter is required and must be a string')
}
const tools = await this.fetchTools()
const result = await this.runtime.execute(input.code, tools)
return {
content: [
{
type: 'text',
text: JSON.stringify(result, null, 2)
}
],
isError: result.isError
}
}
}
export default HubServer

View File

@ -0,0 +1,96 @@
/**
* Bridge module for Hub server to access MCPService.
* Re-exports the methods needed by tool-registry and runtime.
*/
import mcpService from '@main/services/MCPService'
import { generateMcpToolFunctionName } from '@shared/mcp'
import type { MCPCallToolResponse, MCPTool, MCPToolResultContent } from '@types'
import type { GeneratedTool } from './types'
export const listAllTools = () => mcpService.listAllActiveServerTools()
const toolFunctionNameToIdMap = new Map<string, { serverId: string; toolName: string }>()
export async function refreshToolMap(): Promise<void> {
const tools = await listAllTools()
syncToolMapFromTools(tools)
}
export function syncToolMapFromTools(tools: MCPTool[]): void {
toolFunctionNameToIdMap.clear()
const existingNames = new Set<string>()
for (const tool of tools) {
const functionName = generateMcpToolFunctionName(tool.serverName, tool.name, existingNames)
toolFunctionNameToIdMap.set(functionName, { serverId: tool.serverId, toolName: tool.name })
}
}
export function syncToolMapFromGeneratedTools(tools: GeneratedTool[]): void {
toolFunctionNameToIdMap.clear()
for (const tool of tools) {
toolFunctionNameToIdMap.set(tool.functionName, { serverId: tool.serverId, toolName: tool.toolName })
}
}
export function clearToolMap(): void {
toolFunctionNameToIdMap.clear()
}
export const callMcpTool = async (functionName: string, params: unknown, callId?: string): Promise<unknown> => {
const toolInfo = toolFunctionNameToIdMap.get(functionName)
if (!toolInfo) {
await refreshToolMap()
const retryToolInfo = toolFunctionNameToIdMap.get(functionName)
if (!retryToolInfo) {
throw new Error(`Tool not found: ${functionName}`)
}
const toolId = `${retryToolInfo.serverId}__${retryToolInfo.toolName}`
const result = await mcpService.callToolById(toolId, params, callId)
throwIfToolError(result)
return extractToolResult(result)
}
const toolId = `${toolInfo.serverId}__${toolInfo.toolName}`
const result = await mcpService.callToolById(toolId, params, callId)
throwIfToolError(result)
return extractToolResult(result)
}
export const abortMcpTool = async (callId: string): Promise<boolean> => {
return mcpService.abortTool(null as unknown as Electron.IpcMainInvokeEvent, callId)
}
function extractToolResult(result: MCPCallToolResponse): unknown {
if (!result.content || result.content.length === 0) {
return null
}
const textContent = result.content.find((c) => c.type === 'text')
if (textContent?.text) {
try {
return JSON.parse(textContent.text)
} catch {
return textContent.text
}
}
return result.content
}
function throwIfToolError(result: MCPCallToolResponse): void {
if (!result.isError) {
return
}
const textContent = extractTextContent(result.content)
throw new Error(textContent ?? 'Tool execution failed')
}
function extractTextContent(content: MCPToolResultContent[] | undefined): string | undefined {
if (!content || content.length === 0) {
return undefined
}
const textBlock = content.find((item) => item.type === 'text' && item.text)
return textBlock?.text
}

View File

@ -0,0 +1,170 @@
import crypto from 'node:crypto'
import { Worker } from 'node:worker_threads'
import { loggerService } from '@logger'
import { abortMcpTool, callMcpTool } from './mcp-bridge'
import type {
ExecOutput,
GeneratedTool,
HubWorkerCallToolMessage,
HubWorkerExecMessage,
HubWorkerMessage,
HubWorkerResultMessage
} from './types'
import { hubWorkerSource } from './worker'
const logger = loggerService.withContext('MCPServer:Hub:Runtime')
const MAX_LOGS = 1000
const EXECUTION_TIMEOUT = 60000
export class Runtime {
async execute(code: string, tools: GeneratedTool[]): Promise<ExecOutput> {
return await new Promise<ExecOutput>((resolve) => {
const logs: string[] = []
const activeCallIds = new Map<string, string>()
let finished = false
let timedOut = false
let timeoutId: NodeJS.Timeout | null = null
const worker = new Worker(hubWorkerSource, { eval: true })
const addLog = (entry: string) => {
if (logs.length >= MAX_LOGS) {
return
}
logs.push(entry)
}
const finalize = async (output: ExecOutput, terminateWorker = true) => {
if (finished) {
return
}
finished = true
if (timeoutId) {
clearTimeout(timeoutId)
}
worker.removeAllListeners()
if (terminateWorker) {
try {
await worker.terminate()
} catch (error) {
logger.warn('Failed to terminate exec worker', error as Error)
}
}
resolve(output)
}
const abortActiveTools = async () => {
const callIds = Array.from(activeCallIds.values())
activeCallIds.clear()
if (callIds.length === 0) {
return
}
await Promise.allSettled(callIds.map((callId) => abortMcpTool(callId)))
}
const handleToolCall = async (message: HubWorkerCallToolMessage) => {
if (finished || timedOut) {
return
}
const callId = crypto.randomUUID()
activeCallIds.set(message.requestId, callId)
try {
const result = await callMcpTool(message.functionName, message.params, callId)
if (finished || timedOut) {
return
}
worker.postMessage({ type: 'toolResult', requestId: message.requestId, result })
} catch (error) {
if (finished || timedOut) {
return
}
const errorMessage = error instanceof Error ? error.message : String(error)
worker.postMessage({ type: 'toolError', requestId: message.requestId, error: errorMessage })
} finally {
activeCallIds.delete(message.requestId)
}
}
const handleResult = (message: HubWorkerResultMessage) => {
const resolvedLogs = message.logs && message.logs.length > 0 ? message.logs : logs
void finalize({
result: message.result,
logs: resolvedLogs.length > 0 ? resolvedLogs : undefined
})
}
const handleError = (errorMessage: string, messageLogs?: string[], terminateWorker = true) => {
const resolvedLogs = messageLogs && messageLogs.length > 0 ? messageLogs : logs
void finalize(
{
result: undefined,
logs: resolvedLogs.length > 0 ? resolvedLogs : undefined,
error: errorMessage,
isError: true
},
terminateWorker
)
}
const handleMessage = (message: HubWorkerMessage) => {
if (!message || typeof message !== 'object') {
return
}
switch (message.type) {
case 'log':
addLog(message.entry)
break
case 'callTool':
void handleToolCall(message)
break
case 'result':
handleResult(message)
break
case 'error':
handleError(message.error, message.logs)
break
default:
break
}
}
timeoutId = setTimeout(() => {
timedOut = true
void (async () => {
await abortActiveTools()
try {
await worker.terminate()
} catch (error) {
logger.warn('Failed to terminate exec worker after timeout', error as Error)
}
handleError(`Execution timed out after ${EXECUTION_TIMEOUT}ms`, undefined, false)
})()
}, EXECUTION_TIMEOUT)
worker.on('message', handleMessage)
worker.on('error', (error) => {
logger.error('Worker execution error', error)
handleError(error instanceof Error ? error.message : String(error))
})
worker.on('exit', (code) => {
if (finished || timedOut) {
return
}
const message = code === 0 ? 'Exec worker exited unexpectedly' : `Exec worker exited with code ${code}`
logger.error(message)
handleError(message, undefined, false)
})
const execMessage: HubWorkerExecMessage = {
type: 'exec',
code,
tools: tools.map((tool) => ({ functionName: tool.functionName }))
}
worker.postMessage(execMessage)
})
}
}

View File

@ -0,0 +1,109 @@
import { generateToolsCode } from './generator'
import type { GeneratedTool, SearchQuery, SearchResult } from './types'
const DEFAULT_LIMIT = 10
const MAX_LIMIT = 50
export function searchTools(tools: GeneratedTool[], query: SearchQuery): SearchResult {
const { query: queryStr, limit = DEFAULT_LIMIT } = query
const effectiveLimit = Math.min(Math.max(1, limit), MAX_LIMIT)
const keywords = queryStr
.toLowerCase()
.split(',')
.map((k) => k.trim())
.filter((k) => k.length > 0)
if (keywords.length === 0) {
const sliced = tools.slice(0, effectiveLimit)
return {
tools: generateToolsCode(sliced),
total: tools.length
}
}
const matchedTools = tools.filter((tool) => {
const searchText = buildSearchText(tool).toLowerCase()
return keywords.some((keyword) => searchText.includes(keyword))
})
const rankedTools = rankTools(matchedTools, keywords)
const sliced = rankedTools.slice(0, effectiveLimit)
return {
tools: generateToolsCode(sliced),
total: matchedTools.length
}
}
function buildSearchText(tool: GeneratedTool): string {
const combinedName = tool.serverName ? `${tool.serverName}_${tool.toolName}` : tool.toolName
const parts = [
tool.toolName,
tool.functionName,
tool.serverName,
combinedName,
tool.description || '',
tool.signature
]
return parts.join(' ')
}
function rankTools(tools: GeneratedTool[], keywords: string[]): GeneratedTool[] {
const scored = tools.map((tool) => ({
tool,
score: calculateScore(tool, keywords)
}))
scored.sort((a, b) => b.score - a.score)
return scored.map((s) => s.tool)
}
function calculateScore(tool: GeneratedTool, keywords: string[]): number {
let score = 0
const toolName = tool.toolName.toLowerCase()
const serverName = (tool.serverName || '').toLowerCase()
const functionName = tool.functionName.toLowerCase()
const description = (tool.description || '').toLowerCase()
for (const keyword of keywords) {
// Match tool name
if (toolName === keyword) {
score += 10
} else if (toolName.startsWith(keyword)) {
score += 5
} else if (toolName.includes(keyword)) {
score += 3
}
// Match server name
if (serverName === keyword) {
score += 8
} else if (serverName.startsWith(keyword)) {
score += 4
} else if (serverName.includes(keyword)) {
score += 2
}
// Match function name (serverName_toolName format)
if (functionName === keyword) {
score += 10
} else if (functionName.startsWith(keyword)) {
score += 5
} else if (functionName.includes(keyword)) {
score += 3
}
if (description.includes(keyword)) {
const count = (description.match(new RegExp(escapeRegex(keyword), 'g')) || []).length
score += Math.min(count, 3)
}
}
return score
}
function escapeRegex(str: string): string {
return str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
}

View File

@ -0,0 +1,113 @@
import type { MCPServer, MCPTool } from '@types'
export interface GeneratedTool {
serverId: string
serverName: string
toolName: string
functionName: string
jsCode: string
fn: (params: unknown) => Promise<unknown>
signature: string
returns: string
description?: string
}
export interface SearchQuery {
query: string
limit?: number
}
export interface SearchResult {
tools: string
total: number
}
export interface ExecInput {
code: string
}
export type ExecOutput = {
result: unknown
logs?: string[]
error?: string
isError?: boolean
}
export interface ToolRegistryOptions {
ttl?: number
}
export interface MCPToolWithServer extends MCPTool {
server: MCPServer
}
export interface ExecutionContext {
__callTool: (functionName: string, params: unknown) => Promise<unknown>
parallel: <T>(...promises: Promise<T>[]) => Promise<T[]>
settle: <T>(...promises: Promise<T>[]) => Promise<PromiseSettledResult<T>[]>
console: ConsoleMethods
[functionName: string]: unknown
}
export interface ConsoleMethods {
log: (...args: unknown[]) => void
warn: (...args: unknown[]) => void
error: (...args: unknown[]) => void
info: (...args: unknown[]) => void
debug: (...args: unknown[]) => void
}
export type HubWorkerTool = {
functionName: string
}
export type HubWorkerExecMessage = {
type: 'exec'
code: string
tools: HubWorkerTool[]
}
export type HubWorkerCallToolMessage = {
type: 'callTool'
requestId: string
functionName: string
params: unknown
}
export type HubWorkerToolResultMessage = {
type: 'toolResult'
requestId: string
result: unknown
}
export type HubWorkerToolErrorMessage = {
type: 'toolError'
requestId: string
error: string
}
export type HubWorkerResultMessage = {
type: 'result'
result: unknown
logs?: string[]
}
export type HubWorkerErrorMessage = {
type: 'error'
error: string
logs?: string[]
}
export type HubWorkerLogMessage = {
type: 'log'
entry: string
}
export type HubWorkerMessage =
| HubWorkerExecMessage
| HubWorkerCallToolMessage
| HubWorkerToolResultMessage
| HubWorkerToolErrorMessage
| HubWorkerResultMessage
| HubWorkerErrorMessage
| HubWorkerLogMessage

View File

@ -0,0 +1,133 @@
export const hubWorkerSource = `
const crypto = require('node:crypto')
const { parentPort } = require('node:worker_threads')
const MAX_LOGS = 1000
const logs = []
const pendingCalls = new Map()
let isExecuting = false
const stringify = (value) => {
if (value === undefined) return 'undefined'
if (value === null) return 'null'
if (typeof value === 'string') return value
if (typeof value === 'number' || typeof value === 'boolean') return String(value)
if (value instanceof Error) return value.message
try {
return JSON.stringify(value, null, 2)
} catch {
return String(value)
}
}
const pushLog = (level, args) => {
if (logs.length >= MAX_LOGS) {
return
}
const message = args.map((arg) => stringify(arg)).join(' ')
const entry = \`[\${level}] \${message}\`
logs.push(entry)
parentPort?.postMessage({ type: 'log', entry })
}
const capturedConsole = {
log: (...args) => pushLog('log', args),
warn: (...args) => pushLog('warn', args),
error: (...args) => pushLog('error', args),
info: (...args) => pushLog('info', args),
debug: (...args) => pushLog('debug', args)
}
const callTool = (functionName, params) =>
new Promise((resolve, reject) => {
const requestId = crypto.randomUUID()
pendingCalls.set(requestId, { resolve, reject })
parentPort?.postMessage({ type: 'callTool', requestId, functionName, params })
})
const buildContext = (tools) => {
const context = {
__callTool: callTool,
parallel: (...promises) => Promise.all(promises),
settle: (...promises) => Promise.allSettled(promises),
console: capturedConsole
}
for (const tool of tools) {
context[tool.functionName] = (params) => callTool(tool.functionName, params)
}
return context
}
const runCode = async (code, context) => {
const contextKeys = Object.keys(context)
const contextValues = contextKeys.map((key) => context[key])
const wrappedCode = \`
return (async () => {
\${code}
})()
\`
const fn = new Function(...contextKeys, wrappedCode)
return await fn(...contextValues)
}
const handleExec = async (code, tools) => {
if (isExecuting) {
return
}
isExecuting = true
try {
const context = buildContext(tools)
const result = await runCode(code, context)
parentPort?.postMessage({ type: 'result', result, logs: logs.length > 0 ? logs : undefined })
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error)
parentPort?.postMessage({ type: 'error', error: errorMessage, logs: logs.length > 0 ? logs : undefined })
} finally {
pendingCalls.clear()
}
}
const handleToolResult = (message) => {
const pending = pendingCalls.get(message.requestId)
if (!pending) {
return
}
pendingCalls.delete(message.requestId)
pending.resolve(message.result)
}
const handleToolError = (message) => {
const pending = pendingCalls.get(message.requestId)
if (!pending) {
return
}
pendingCalls.delete(message.requestId)
pending.reject(new Error(message.error))
}
parentPort?.on('message', (message) => {
if (!message || typeof message !== 'object') {
return
}
switch (message.type) {
case 'exec':
handleExec(message.code, message.tools ?? [])
break
case 'toolResult':
handleToolResult(message)
break
case 'toolError':
handleToolError(message)
break
default:
break
}
})
`

View File

@ -23,6 +23,22 @@ import { promisify } from 'util'
const execAsync = promisify(require('child_process').exec)
const logger = loggerService.withContext('CodeToolsService')
// Sensitive environment variable keys to redact in logs
const SENSITIVE_ENV_KEYS = ['API_KEY', 'APIKEY', 'AUTHORIZATION', 'TOKEN', 'SECRET', 'PASSWORD']
/**
* Sanitize environment variables for safe logging
* Redacts values of sensitive keys to prevent credential leakage
*/
function sanitizeEnvForLogging(env: Record<string, string>): Record<string, string> {
const sanitized: Record<string, string> = {}
for (const [key, value] of Object.entries(env)) {
const isSensitive = SENSITIVE_ENV_KEYS.some((k) => key.toUpperCase().includes(k))
sanitized[key] = isSensitive ? '<redacted>' : value
}
return sanitized
}
interface VersionInfo {
installed: string | null
latest: string | null
@ -87,6 +103,8 @@ class CodeToolsService {
return '@iflow-ai/iflow-cli'
case codeTools.githubCopilotCli:
return '@github/copilot'
case codeTools.kimiCli:
return 'kimi-cli' // Python package
default:
throw new Error(`Unsupported CLI tool: ${cliTool}`)
}
@ -106,6 +124,8 @@ class CodeToolsService {
return 'iflow'
case codeTools.githubCopilotCli:
return 'copilot'
case codeTools.kimiCli:
return 'kimi'
default:
throw new Error(`Unsupported CLI tool: ${cliTool}`)
}
@ -451,7 +471,7 @@ class CodeToolsService {
}
}
const needsUpdate = !!(installedVersion && latestVersion && installedVersion !== latestVersion)
const needsUpdate = !!(latestVersion && isInstalled && (!installedVersion || installedVersion !== latestVersion))
logger.info(
`Version check result for ${cliTool}: installed=${installedVersion}, latest=${latestVersion}, needsUpdate=${needsUpdate}`
)
@ -613,7 +633,7 @@ class CodeToolsService {
}
logger.info('Setting environment variables:', Object.keys(env))
logger.info('Environment variable values:', env)
logger.debug('Environment variable values:', sanitizeEnvForLogging(env))
if (isWindows) {
// Windows uses set command
@ -636,8 +656,7 @@ class CodeToolsService {
.map(([key, value]) => {
const sanitizedValue = String(value).replace(/\\/g, '\\\\').replace(/"/g, '\\"')
const exportCmd = `export ${key}="${sanitizedValue}"`
logger.info(`Setting env var: ${key}="${sanitizedValue}"`)
logger.info(`Export command: ${exportCmd}`)
logger.debug(`Setting env var: ${key}=<redacted>`)
return exportCmd
})
.join(' && ')
@ -647,26 +666,37 @@ class CodeToolsService {
let baseCommand = isWin ? `"${executablePath}"` : `"${bunPath}" "${executablePath}"`
// Add configuration parameters for OpenAI Codex
if (cliTool === codeTools.openaiCodex && env.OPENAI_MODEL_PROVIDER && env.OPENAI_MODEL_PROVIDER != 'openai') {
const provider = env.OPENAI_MODEL_PROVIDER
const model = env.OPENAI_MODEL
// delete the latest /
const baseUrl = env.OPENAI_BASE_URL.replace(/\/$/, '')
// Special handling for kimi-cli: use uvx instead of bun
if (cliTool === codeTools.kimiCli) {
const uvPath = path.join(os.homedir(), HOME_CHERRY_DIR, 'bin', await getBinaryName('uv'))
baseCommand = `${uvPath} tool run ${packageName}`
}
// Add configuration parameters for OpenAI Codex using command line args
if (cliTool === codeTools.openaiCodex && env.OPENAI_MODEL_PROVIDER) {
const providerId = env.OPENAI_MODEL_PROVIDER
const providerName = env.OPENAI_MODEL_PROVIDER_NAME || providerId
const normalizedBaseUrl = env.OPENAI_BASE_URL.replace(/\/$/, '')
const model = _model
const configParams = [
`--config model_provider="${provider}"`,
`--config model="${model}"`,
`--config model_providers.${provider}.name="${provider}"`,
`--config model_providers.${provider}.base_url="${baseUrl}"`,
`--config model_providers.${provider}.env_key="OPENAI_API_KEY"`
`--config model_provider="${providerId}"`,
`--config model_providers.${providerId}.name="${providerName}"`,
`--config model_providers.${providerId}.base_url="${normalizedBaseUrl}"`,
`--config model_providers.${providerId}.env_key="OPENAI_API_KEY"`,
`--config model_providers.${providerId}.wire_api="responses"`,
`--config model="${model}"`
].join(' ')
baseCommand = `${baseCommand} ${configParams}`
}
const bunInstallPath = path.join(os.homedir(), HOME_CHERRY_DIR)
if (isInstalled) {
// Special handling for kimi-cli: uvx handles installation automatically
if (cliTool === codeTools.kimiCli) {
// uvx will automatically download and run kimi-cli, no need to install
// Just use the base command directly
} else if (isInstalled) {
// If already installed, run executable directly (with optional update message)
if (updateMessage) {
baseCommand = `echo "Checking ${cliTool} version..."${updateMessage} && ${baseCommand}`
@ -777,14 +807,15 @@ class CodeToolsService {
terminalArgs = args
}
// Set cleanup task (delete temp file after 5 minutes)
// Set cleanup task (delete temp file after 60 seconds)
// Windows Terminal (UWP app) may take longer to initialize and read the file
setTimeout(() => {
try {
fs.existsSync(batFilePath) && fs.unlinkSync(batFilePath)
} catch (error) {
logger.warn(`Failed to cleanup temp bat file: ${error}`)
}
}, 10 * 1000) // Delete temp file after 10 seconds
}, 60 * 1000) // Delete temp file after 60 seconds
break
}

View File

@ -45,6 +45,7 @@ export enum ConfigKeys {
SelectionAssistantFilterMode = 'selectionAssistantFilterMode',
SelectionAssistantFilterList = 'selectionAssistantFilterList',
DisableHardwareAcceleration = 'disableHardwareAcceleration',
UseSystemTitleBar = 'useSystemTitleBar',
Proxy = 'proxy',
EnableDeveloperMode = 'enableDeveloperMode',
ClientId = 'clientId',
@ -251,6 +252,14 @@ export class ConfigManager {
this.set(ConfigKeys.DisableHardwareAcceleration, value)
}
getUseSystemTitleBar(): boolean {
return this.get<boolean>(ConfigKeys.UseSystemTitleBar, false)
}
setUseSystemTitleBar(value: boolean) {
this.set(ConfigKeys.UseSystemTitleBar, value)
}
setAndNotify(key: string, value: unknown) {
this.set(key, value, true)
}

View File

@ -8,6 +8,27 @@ import { v4 as uuidv4 } from 'uuid'
const logger = loggerService.withContext('DxtService')
/**
* Ensure a target path is within the base directory to prevent path traversal attacks.
* This is the correct approach: validate the final resolved path rather than sanitizing input.
*
* @param basePath - The base directory that the target must be within
* @param targetPath - The target path to validate
* @returns The resolved target path if valid
* @throws Error if the target path escapes the base directory
*/
export function ensurePathWithin(basePath: string, targetPath: string): string {
const resolvedBase = path.resolve(basePath)
const resolvedTarget = path.resolve(path.normalize(targetPath))
// Must be direct child of base directory, no subdirectories allowed
if (path.dirname(resolvedTarget) !== resolvedBase) {
throw new Error('Path traversal detected: target path must be direct child of base directory')
}
return resolvedTarget
}
// Type definitions
export interface DxtManifest {
dxt_version: string
@ -68,6 +89,76 @@ export interface DxtUploadResult {
error?: string
}
/**
* Validate and sanitize a command to prevent path traversal attacks.
* Commands should be either:
* 1. Simple command names (e.g., "node", "python", "npx") - looked up in PATH
* 2. Absolute paths (e.g., "/usr/bin/node", "C:\\Program Files\\node\\node.exe")
* 3. Relative paths starting with ./ or .\ (relative to extractDir)
*
* Rejects commands containing path traversal sequences (..)
*
* @param command - The command to validate
* @returns The validated command
* @throws Error if command contains path traversal or is invalid
*/
export function validateCommand(command: string): string {
if (!command || typeof command !== 'string') {
throw new Error('Invalid command: command must be a non-empty string')
}
const trimmed = command.trim()
if (!trimmed) {
throw new Error('Invalid command: command cannot be empty')
}
// Check for path traversal sequences
// This catches: .., ../, ..\, /../, \..\, etc.
if (/(?:^|[/\\])\.\.(?:[/\\]|$)/.test(trimmed) || trimmed === '..') {
throw new Error(`Invalid command: path traversal detected in "${command}"`)
}
// Check for null bytes
if (trimmed.includes('\0')) {
throw new Error('Invalid command: null byte detected')
}
return trimmed
}
/**
* Validate command arguments to prevent injection attacks.
* Rejects arguments containing path traversal sequences.
*
* @param args - The arguments array to validate
* @returns The validated arguments array
* @throws Error if any argument contains path traversal
*/
export function validateArgs(args: string[]): string[] {
if (!Array.isArray(args)) {
throw new Error('Invalid args: must be an array')
}
return args.map((arg, index) => {
if (typeof arg !== 'string') {
throw new Error(`Invalid args: argument at index ${index} must be a string`)
}
// Check for null bytes
if (arg.includes('\0')) {
throw new Error(`Invalid args: null byte detected in argument at index ${index}`)
}
// Check for path traversal in arguments that look like paths
// Only validate if the arg contains path separators (indicating it's meant to be a path)
if ((arg.includes('/') || arg.includes('\\')) && /(?:^|[/\\])\.\.(?:[/\\]|$)/.test(arg)) {
throw new Error(`Invalid args: path traversal detected in argument at index ${index}`)
}
return arg
})
}
export function performVariableSubstitution(
value: string,
extractDir: string,
@ -134,12 +225,16 @@ export function applyPlatformOverrides(mcpConfig: any, extractDir: string, userC
// Apply variable substitution to all string values
if (resolvedConfig.command) {
resolvedConfig.command = performVariableSubstitution(resolvedConfig.command, extractDir, userConfig)
// Validate command after substitution to prevent path traversal attacks
resolvedConfig.command = validateCommand(resolvedConfig.command)
}
if (resolvedConfig.args) {
resolvedConfig.args = resolvedConfig.args.map((arg: string) =>
performVariableSubstitution(arg, extractDir, userConfig)
)
// Validate args after substitution to prevent path traversal attacks
resolvedConfig.args = validateArgs(resolvedConfig.args)
}
if (resolvedConfig.env) {
@ -271,10 +366,8 @@ class DxtService {
}
// Use server name as the final extract directory for automatic version management
// Sanitize the name to prevent creating subdirectories
const sanitizedName = manifest.name.replace(/\//g, '-')
const serverDirName = `server-${sanitizedName}`
const finalExtractDir = path.join(this.mcpDir, serverDirName)
const serverDirName = `server-${manifest.name}`
const finalExtractDir = ensurePathWithin(this.mcpDir, path.join(this.mcpDir, serverDirName))
// Clean up any existing version of this server
if (fs.existsSync(finalExtractDir)) {
@ -354,27 +447,15 @@ class DxtService {
public cleanupDxtServer(serverName: string): boolean {
try {
// Handle server names that might contain slashes (e.g., "anthropic/sequential-thinking")
// by replacing slashes with the same separator used during installation
const sanitizedName = serverName.replace(/\//g, '-')
const serverDirName = `server-${sanitizedName}`
const serverDir = path.join(this.mcpDir, serverDirName)
const serverDirName = `server-${serverName}`
const serverDir = ensurePathWithin(this.mcpDir, path.join(this.mcpDir, serverDirName))
// First try the sanitized path
if (fs.existsSync(serverDir)) {
logger.debug(`Removing DXT server directory: ${serverDir}`)
fs.rmSync(serverDir, { recursive: true, force: true })
return true
}
// Fallback: try with original name in case it was stored differently
const originalServerDir = path.join(this.mcpDir, `server-${serverName}`)
if (fs.existsSync(originalServerDir)) {
logger.debug(`Removing DXT server directory: ${originalServerDir}`)
fs.rmSync(originalServerDir, { recursive: true, force: true })
return true
}
logger.warn(`Server directory not found: ${serverDir}`)
return false
} catch (error) {

View File

@ -2,6 +2,7 @@
// ExportService
import { loggerService } from '@logger'
import { t } from '@main/utils/locales'
import {
AlignmentType,
BorderStyle,
@ -391,8 +392,8 @@ export class ExportService {
const buffer = await Packer.toBuffer(doc)
const filePath = dialog.showSaveDialogSync({
title: '保存文件',
filters: [{ name: 'Word Document', extensions: ['docx'] }],
title: t('dialog.save_file'),
filters: [{ name: t('dialog.word_document'), extensions: ['docx'] }],
defaultPath: fileName
})

View File

@ -9,6 +9,7 @@ import {
readTextFileWithAutoEncoding,
scanDir
} from '@main/utils/file'
import { t } from '@main/utils/locales'
import { documentExts, imageExts, KB, MB } from '@shared/config/constant'
import { parseDataUrl } from '@shared/utils'
import type { FileMetadata, NotesTreeNode } from '@types'
@ -821,9 +822,9 @@ class FileStorage {
): Promise<{ fileName: string; filePath: string; content?: Buffer; size: number } | null> => {
try {
const result: OpenDialogReturnValue = await dialog.showOpenDialog({
title: '打开文件',
title: t('dialog.open_file'),
properties: ['openFile'],
filters: [{ name: '所有文件', extensions: ['*'] }],
filters: [{ name: t('dialog.all_files'), extensions: ['*'] }],
...options
})
@ -1437,7 +1438,7 @@ class FileStorage {
): Promise<string> => {
try {
const result: SaveDialogReturnValue = await dialog.showSaveDialog({
title: '保存文件',
title: t('dialog.save_file'),
defaultPath: fileName,
...options
})
@ -1461,7 +1462,7 @@ class FileStorage {
try {
const filePath = dialog.showSaveDialogSync({
defaultPath: `${name}.png`,
filters: [{ name: 'PNG Image', extensions: ['png'] }]
filters: [{ name: t('dialog.png_image'), extensions: ['png'] }]
})
if (filePath) {
@ -1476,7 +1477,7 @@ class FileStorage {
public selectFolder = async (_: Electron.IpcMainInvokeEvent, options: OpenDialogOptions): Promise<string | null> => {
try {
const result: OpenDialogReturnValue = await dialog.showOpenDialog({
title: '选择文件夹',
title: t('dialog.select_folder'),
properties: ['openDirectory'],
...options
})

View File

@ -3,9 +3,9 @@ import os from 'node:os'
import path from 'node:path'
import { loggerService } from '@logger'
import { getMCPServersFromRedux } from '@main/apiServer/utils/mcp'
import { createInMemoryMCPServer } from '@main/mcpServers/factory'
import { makeSureDirExists, removeEnvProxy } from '@main/utils'
import { buildFunctionCallToolName } from '@main/utils/mcp'
import { findCommandInShellEnv, getBinaryName, getBinaryPath, isBinaryExists } from '@main/utils/process'
import getLoginShellEnvironment from '@main/utils/shell-env'
import { TraceMethod, withSpanFunc } from '@mcp-trace/trace-core'
@ -35,6 +35,7 @@ import { HOME_CHERRY_DIR } from '@shared/config/constant'
import type { MCPProgressEvent } from '@shared/config/types'
import type { MCPServerLogEntry } from '@shared/config/types'
import { IpcChannel } from '@shared/IpcChannel'
import { buildFunctionCallToolName } from '@shared/mcp'
import { defaultAppHeaders } from '@shared/utils'
import {
BuiltinMCPServerNames,
@ -165,6 +166,67 @@ class McpService {
this.getServerLogs = this.getServerLogs.bind(this)
}
/**
* List all tools from all active MCP servers (excluding hub).
* Used by Hub server's tool registry.
*/
public async listAllActiveServerTools(): Promise<MCPTool[]> {
const servers = await getMCPServersFromRedux()
const activeServers = servers.filter((server) => server.isActive)
const results = await Promise.allSettled(
activeServers.map(async (server) => {
const tools = await this.listToolsImpl(server)
const disabledTools = new Set(server.disabledTools ?? [])
return disabledTools.size > 0 ? tools.filter((tool) => !disabledTools.has(tool.name)) : tools
})
)
const allTools: MCPTool[] = []
results.forEach((result, index) => {
if (result.status === 'fulfilled') {
allTools.push(...result.value)
} else {
logger.error(
`[listAllActiveServerTools] Failed to list tools from ${activeServers[index].name}:`,
result.reason as Error
)
}
})
return allTools
}
/**
* Call a tool by its full ID (serverId__toolName format).
* Used by Hub server's runtime.
*/
public async callToolById(toolId: string, params: unknown, callId?: string): Promise<MCPCallToolResponse> {
const parts = toolId.split('__')
if (parts.length < 2) {
throw new Error(`Invalid tool ID format: ${toolId}`)
}
const serverId = parts[0]
const toolName = parts.slice(1).join('__')
const servers = await getMCPServersFromRedux()
const server = servers.find((s) => s.id === serverId)
if (!server) {
throw new Error(`Server not found: ${serverId}`)
}
logger.debug(`[callToolById] Calling tool ${toolName} on server ${server.name}`)
return this.callTool(null as unknown as Electron.IpcMainInvokeEvent, {
server,
name: toolName,
args: params,
callId
})
}
private getServerKey(server: MCPServer): string {
return JSON.stringify({
baseUrl: server.baseUrl,

View File

@ -1083,18 +1083,33 @@ export class SelectionService {
this.lastCtrlkeyDownTime = -1
}
//check if the key is ctrl key
// Check if the key is ctrl key
// Windows: VK_LCONTROL(162), VK_RCONTROL(163)
// macOS: kVK_Control(59), kVK_RightControl(62)
private isCtrlkey(vkCode: number) {
if (isMac) {
return vkCode === 59 || vkCode === 62
}
return vkCode === 162 || vkCode === 163
}
//check if the key is shift key
// Check if the key is shift key
// Windows: VK_LSHIFT(160), VK_RSHIFT(161)
// macOS: kVK_Shift(56), kVK_RightShift(60)
private isShiftkey(vkCode: number) {
if (isMac) {
return vkCode === 56 || vkCode === 60
}
return vkCode === 160 || vkCode === 161
}
//check if the key is alt key
// Check if the key is alt/option key
// Windows: VK_LMENU(164), VK_RMENU(165)
// macOS: kVK_Option(58), kVK_RightOption(61)
private isAltkey(vkCode: number) {
if (isMac) {
return vkCode === 58 || vkCode === 61
}
return vkCode === 164 || vkCode === 165
}

View File

@ -1,7 +1,10 @@
import { t } from '@main/utils/locales'
import { IpcChannel } from '@shared/IpcChannel'
import { app, dialog, session, shell, webContents } from 'electron'
import { promises as fs } from 'fs'
import { configManager } from './ConfigManager'
/**
* init the useragent of the webview session
* remove the CherryStudio and Electron from the useragent
@ -13,9 +16,11 @@ export function initSessionUserAgent() {
wvSession.setUserAgent(newUA)
wvSession.webRequest.onBeforeSendHeaders((details, cb) => {
const language = configManager.getLanguage()
const headers = {
...details.requestHeaders,
'User-Agent': details.url.includes('google.com') ? originUA : newUA
'User-Agent': details.url.includes('google.com') ? originUA : newUA,
'Accept-Language': `${language}, en;q=0.9, *;q=0.5`
}
cb({ requestHeaders: headers })
})
@ -137,9 +142,9 @@ export async function printWebviewToPDF(webviewId: number): Promise<string | nul
// Show save dialog
const { canceled, filePath } = await dialog.showSaveDialog({
title: 'Save as PDF',
title: t('dialog.save_as_pdf'),
defaultPath: defaultFilename,
filters: [{ name: 'PDF Files', extensions: ['pdf'] }]
filters: [{ name: t('dialog.pdf_files'), extensions: ['pdf'] }]
})
if (canceled || !filePath) {
@ -186,11 +191,11 @@ export async function saveWebviewAsHTML(webviewId: number): Promise<string | nul
// Show save dialog
const { canceled, filePath } = await dialog.showSaveDialog({
title: 'Save as HTML',
title: t('dialog.save_as_html'),
defaultPath: defaultFilename,
filters: [
{ name: 'HTML Files', extensions: ['html', 'htm'] },
{ name: 'All Files', extensions: ['*'] }
{ name: t('dialog.html_files'), extensions: ['html', 'htm'] },
{ name: t('dialog.all_files'), extensions: ['*'] }
]
})

View File

@ -7,11 +7,11 @@ import { isDev, isLinux, isMac, isWin } from '@main/constant'
import { getFilesDir } from '@main/utils/file'
import { MIN_WINDOW_HEIGHT, MIN_WINDOW_WIDTH } from '@shared/config/constant'
import { IpcChannel } from '@shared/IpcChannel'
import { app, BrowserWindow, nativeTheme, screen, shell } from 'electron'
import { app, BrowserWindow, nativeImage, nativeTheme, screen, shell } from 'electron'
import windowStateKeeper from 'electron-window-state'
import { join } from 'path'
import icon from '../../../build/icon.png?asset'
import iconPath from '../../../build/icon.png?asset'
import { titleBarOverlayDark, titleBarOverlayLight } from '../config'
import { configManager } from './ConfigManager'
import { contextMenu } from './ContextMenu'
@ -23,6 +23,9 @@ const DEFAULT_MINIWINDOW_HEIGHT = 400
// const logger = loggerService.withContext('WindowService')
const logger = loggerService.withContext('WindowService')
// Create nativeImage for Linux window icon (required for Wayland)
const linuxIcon = isLinux ? nativeImage.createFromPath(iconPath) : undefined
export class WindowService {
private static instance: WindowService | null = null
private mainWindow: BrowserWindow | null = null
@ -75,11 +78,12 @@ export class WindowService {
trafficLightPosition: { x: 8, y: 13 }
}
: {
frame: false // Frameless window for Windows and Linux
// On Linux, allow using system title bar if setting is enabled
frame: isLinux && configManager.getUseSystemTitleBar() ? true : false
}),
backgroundColor: isMac ? undefined : nativeTheme.shouldUseDarkColors ? '#181818' : '#FFFFFF',
darkTheme: nativeTheme.shouldUseDarkColors,
...(isLinux ? { icon } : {}),
...(isLinux ? { icon: linuxIcon } : {}),
webPreferences: {
preload: join(__dirname, '../preload/index.js'),
sandbox: false,
@ -415,6 +419,23 @@ export class WindowService {
return
}
/**
* [Linux] Special handling for window activation
* When the window is visible but covered by other windows, simply calling show() and focus()
* is not enough to bring it to the front. We need to hide it first, then show it again.
* This mimics the "close to tray and reopen" behavior which works correctly.
*/
if (isLinux && this.mainWindow.isVisible() && !this.mainWindow.isFocused()) {
this.mainWindow.hide()
setImmediate(() => {
if (this.mainWindow && !this.mainWindow.isDestroyed()) {
this.mainWindow.show()
this.mainWindow.focus()
}
})
return
}
/**
* About setVisibleOnAllWorkspaces
*

View File

@ -0,0 +1,202 @@
import path from 'path'
import { describe, expect, it } from 'vitest'
import { ensurePathWithin, validateArgs, validateCommand } from '../DxtService'
describe('ensurePathWithin', () => {
const baseDir = '/home/user/mcp'
describe('valid paths', () => {
it('should accept direct child paths', () => {
expect(ensurePathWithin(baseDir, '/home/user/mcp/server-test')).toBe('/home/user/mcp/server-test')
expect(ensurePathWithin(baseDir, '/home/user/mcp/my-server')).toBe('/home/user/mcp/my-server')
})
it('should accept paths with unicode characters', () => {
expect(ensurePathWithin(baseDir, '/home/user/mcp/服务器')).toBe('/home/user/mcp/服务器')
expect(ensurePathWithin(baseDir, '/home/user/mcp/サーバー')).toBe('/home/user/mcp/サーバー')
})
})
describe('path traversal prevention', () => {
it('should reject paths that escape base directory', () => {
expect(() => ensurePathWithin(baseDir, '/home/user/mcp/../../../etc')).toThrow('Path traversal detected')
expect(() => ensurePathWithin(baseDir, '/etc/passwd')).toThrow('Path traversal detected')
expect(() => ensurePathWithin(baseDir, '/home/user')).toThrow('Path traversal detected')
})
it('should reject subdirectories', () => {
expect(() => ensurePathWithin(baseDir, '/home/user/mcp/sub/dir')).toThrow('Path traversal detected')
expect(() => ensurePathWithin(baseDir, '/home/user/mcp/a/b/c')).toThrow('Path traversal detected')
})
it('should reject Windows-style path traversal', () => {
const winBase = 'C:\\Users\\user\\mcp'
expect(() => ensurePathWithin(winBase, 'C:\\Users\\user\\mcp\\..\\..\\Windows\\System32')).toThrow(
'Path traversal detected'
)
})
it('should reject null byte attacks', () => {
const maliciousPath = path.join(baseDir, 'server\x00/../../../etc/passwd')
expect(() => ensurePathWithin(baseDir, maliciousPath)).toThrow('Path traversal detected')
})
it('should handle encoded traversal attempts', () => {
expect(() => ensurePathWithin(baseDir, '/home/user/mcp/../escape')).toThrow('Path traversal detected')
})
})
describe('edge cases', () => {
it('should reject base directory itself', () => {
expect(() => ensurePathWithin(baseDir, '/home/user/mcp')).toThrow('Path traversal detected')
})
it('should handle relative path construction', () => {
const target = path.join(baseDir, 'server-name')
expect(ensurePathWithin(baseDir, target)).toBe('/home/user/mcp/server-name')
})
})
})
describe('validateCommand', () => {
describe('valid commands', () => {
it('should accept simple command names', () => {
expect(validateCommand('node')).toBe('node')
expect(validateCommand('python')).toBe('python')
expect(validateCommand('npx')).toBe('npx')
expect(validateCommand('uvx')).toBe('uvx')
})
it('should accept absolute paths', () => {
expect(validateCommand('/usr/bin/node')).toBe('/usr/bin/node')
expect(validateCommand('/usr/local/bin/python3')).toBe('/usr/local/bin/python3')
expect(validateCommand('C:\\Program Files\\nodejs\\node.exe')).toBe('C:\\Program Files\\nodejs\\node.exe')
})
it('should accept relative paths starting with ./', () => {
expect(validateCommand('./node_modules/.bin/tsc')).toBe('./node_modules/.bin/tsc')
expect(validateCommand('.\\scripts\\run.bat')).toBe('.\\scripts\\run.bat')
})
it('should trim whitespace', () => {
expect(validateCommand(' node ')).toBe('node')
expect(validateCommand('\tpython\n')).toBe('python')
})
})
describe('path traversal prevention', () => {
it('should reject commands with path traversal (Unix style)', () => {
expect(() => validateCommand('../../../bin/sh')).toThrow('path traversal detected')
expect(() => validateCommand('../../etc/passwd')).toThrow('path traversal detected')
expect(() => validateCommand('/usr/../../../bin/sh')).toThrow('path traversal detected')
})
it('should reject commands with path traversal (Windows style)', () => {
expect(() => validateCommand('..\\..\\..\\Windows\\System32\\cmd.exe')).toThrow('path traversal detected')
expect(() => validateCommand('..\\..\\Windows\\System32\\calc.exe')).toThrow('path traversal detected')
expect(() => validateCommand('C:\\..\\..\\Windows\\System32\\cmd.exe')).toThrow('path traversal detected')
})
it('should reject just ".."', () => {
expect(() => validateCommand('..')).toThrow('path traversal detected')
})
it('should reject mixed style path traversal', () => {
expect(() => validateCommand('../..\\mixed/..\\attack')).toThrow('path traversal detected')
})
})
describe('null byte injection', () => {
it('should reject commands with null bytes', () => {
expect(() => validateCommand('node\x00.exe')).toThrow('null byte detected')
expect(() => validateCommand('python\0')).toThrow('null byte detected')
})
})
describe('edge cases', () => {
it('should reject empty strings', () => {
expect(() => validateCommand('')).toThrow('command must be a non-empty string')
expect(() => validateCommand(' ')).toThrow('command cannot be empty')
})
it('should reject non-string input', () => {
// @ts-expect-error - testing runtime behavior
expect(() => validateCommand(null)).toThrow('command must be a non-empty string')
// @ts-expect-error - testing runtime behavior
expect(() => validateCommand(undefined)).toThrow('command must be a non-empty string')
// @ts-expect-error - testing runtime behavior
expect(() => validateCommand(123)).toThrow('command must be a non-empty string')
})
})
describe('real-world attack scenarios', () => {
it('should prevent Windows system32 command injection', () => {
expect(() => validateCommand('../../../../Windows/System32/cmd.exe')).toThrow('path traversal detected')
expect(() => validateCommand('..\\..\\..\\..\\Windows\\System32\\powershell.exe')).toThrow(
'path traversal detected'
)
})
it('should prevent Unix bin injection', () => {
expect(() => validateCommand('../../../../bin/bash')).toThrow('path traversal detected')
expect(() => validateCommand('../../../usr/bin/curl')).toThrow('path traversal detected')
})
})
})
describe('validateArgs', () => {
describe('valid arguments', () => {
it('should accept normal arguments', () => {
expect(validateArgs(['--version'])).toEqual(['--version'])
expect(validateArgs(['-y', '@anthropic/mcp-server'])).toEqual(['-y', '@anthropic/mcp-server'])
expect(validateArgs(['install', 'package-name'])).toEqual(['install', 'package-name'])
})
it('should accept arguments with safe paths', () => {
expect(validateArgs(['./src/index.ts'])).toEqual(['./src/index.ts'])
expect(validateArgs(['/absolute/path/file.js'])).toEqual(['/absolute/path/file.js'])
})
it('should accept empty array', () => {
expect(validateArgs([])).toEqual([])
})
})
describe('path traversal prevention', () => {
it('should reject arguments with path traversal', () => {
expect(() => validateArgs(['../../../etc/passwd'])).toThrow('path traversal detected')
expect(() => validateArgs(['--config', '../../secrets.json'])).toThrow('path traversal detected')
expect(() => validateArgs(['..\\..\\Windows\\System32\\config'])).toThrow('path traversal detected')
})
it('should only check path-like arguments', () => {
// Arguments without path separators should pass even with dots
expect(validateArgs(['..version'])).toEqual(['..version'])
expect(validateArgs(['test..name'])).toEqual(['test..name'])
})
})
describe('null byte injection', () => {
it('should reject arguments with null bytes', () => {
expect(() => validateArgs(['file\x00.txt'])).toThrow('null byte detected')
expect(() => validateArgs(['--config', 'path\0name'])).toThrow('null byte detected')
})
})
describe('edge cases', () => {
it('should reject non-array input', () => {
// @ts-expect-error - testing runtime behavior
expect(() => validateArgs('not an array')).toThrow('must be an array')
// @ts-expect-error - testing runtime behavior
expect(() => validateArgs(null)).toThrow('must be an array')
})
it('should reject non-string elements', () => {
// @ts-expect-error - testing runtime behavior
expect(() => validateArgs([123])).toThrow('must be a string')
// @ts-expect-error - testing runtime behavior
expect(() => validateArgs(['valid', null])).toThrow('must be a string')
})
})
})

View File

@ -0,0 +1,75 @@
import type { MCPServer, MCPTool } from '@types'
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
vi.mock('@main/apiServer/utils/mcp', () => ({
getMCPServersFromRedux: vi.fn()
}))
vi.mock('@main/services/WindowService', () => ({
windowService: {
getMainWindow: vi.fn(() => null)
}
}))
import { getMCPServersFromRedux } from '@main/apiServer/utils/mcp'
import mcpService from '@main/services/MCPService'
const baseInputSchema: { type: 'object'; properties: Record<string, unknown>; required: string[] } = {
type: 'object',
properties: {},
required: []
}
const createTool = (overrides: Partial<MCPTool>): MCPTool => ({
id: `${overrides.serverId}__${overrides.name}`,
name: overrides.name ?? 'tool',
description: overrides.description,
serverId: overrides.serverId ?? 'server',
serverName: overrides.serverName ?? 'server',
inputSchema: baseInputSchema,
type: 'mcp',
...overrides
})
describe('MCPService.listAllActiveServerTools', () => {
beforeEach(() => {
vi.clearAllMocks()
})
afterEach(() => {
vi.restoreAllMocks()
})
it('filters disabled tools per server', async () => {
const servers: MCPServer[] = [
{
id: 'alpha',
name: 'Alpha',
isActive: true,
disabledTools: ['disabled_tool']
},
{
id: 'beta',
name: 'Beta',
isActive: true
}
]
vi.mocked(getMCPServersFromRedux).mockResolvedValue(servers)
const listToolsSpy = vi.spyOn(mcpService as any, 'listToolsImpl').mockImplementation(async (server: any) => {
if (server.id === 'alpha') {
return [
createTool({ name: 'enabled_tool', serverId: server.id, serverName: server.name }),
createTool({ name: 'disabled_tool', serverId: server.id, serverName: server.name })
]
}
return [createTool({ name: 'beta_tool', serverId: server.id, serverName: server.name })]
})
const tools = await mcpService.listAllActiveServerTools()
expect(listToolsSpy).toHaveBeenCalledTimes(2)
expect(tools.map((tool) => tool.name)).toEqual(['enabled_tool', 'beta_tool'])
})
})

View File

@ -2,7 +2,7 @@ import { loggerService } from '@logger'
import { mcpApiService } from '@main/apiServer/services/mcp'
import type { ModelValidationError } from '@main/apiServer/utils'
import { validateModelId } from '@main/apiServer/utils'
import { buildFunctionCallToolName } from '@main/utils/mcp'
import { buildFunctionCallToolName } from '@shared/mcp'
import type { AgentType, MCPTool, SlashCommand, Tool } from '@types'
import { objectKeys } from '@types'
import fs from 'fs'

View File

@ -16,6 +16,7 @@ import { loggerService } from '@logger'
import { config as apiConfigService } from '@main/apiServer/config'
import { validateModelId } from '@main/apiServer/utils'
import { isWin } from '@main/constant'
import { configManager } from '@main/services/ConfigManager'
import { autoDiscoverGitBash } from '@main/utils/process'
import getLoginShellEnvironment from '@main/utils/shell-env'
import { withoutTrailingApiVersion } from '@shared/utils'
@ -34,6 +35,9 @@ const DEFAULT_AUTO_ALLOW_TOOLS = new Set(['Read', 'Glob', 'Grep'])
const shouldAutoApproveTools = process.env.CHERRY_AUTO_ALLOW_TOOLS === '1'
const NO_RESUME_COMMANDS = ['/clear']
const getLanguageInstruction = () =>
`IMPORTANT: You MUST use ${configManager.getLanguage()} language for ALL your outputs, including: (1) text responses, (2) tool call parameters like "description" fields, and (3) any user-facing content. Never use English unless the content is code, file paths, or technical identifiers.`
type UserInputMessage = {
type: 'user'
parent_tool_use_id: string | null
@ -255,9 +259,13 @@ class ClaudeCodeService implements AgentServiceInterface {
? {
type: 'preset',
preset: 'claude_code',
append: session.instructions
append: `${session.instructions}\n\n${getLanguageInstruction()}`
}
: { type: 'preset', preset: 'claude_code' },
: {
type: 'preset',
preset: 'claude_code',
append: getLanguageInstruction()
},
settingSources: ['project'],
includePartialMessages: true,
permissionMode: session.configuration?.permission_mode,

View File

@ -72,7 +72,10 @@ const generateMessageId = (): string => `msg_${uuidv4().replace(/-/g, '')}`
* Removes any local command stdout/stderr XML wrappers that should never surface to the UI.
*/
export const stripLocalCommandTags = (text: string): string => {
return text.replace(/<local-command-(stdout|stderr)>(.*?)<\/local-command-\1>/gs, '$2')
return text
.replace(/<local-command-(stdout|stderr)>(.*?)<\/local-command-\1>/gs, '$2')
.replace('(no content)', '')
.trim()
}
/**
@ -316,7 +319,46 @@ function handleUserMessage(
const chunks: AgentStreamPart[] = []
const providerMetadata = sdkMessageToProviderMetadata(message)
const content = message.message.content
const isSynthetic = message.isSynthetic ?? false
// Check if content contains tool_result blocks (synthetic tool result messages)
// This handles both SDK-flagged messages and standard tool_result content
const contentArray = Array.isArray(content) ? content : []
const hasToolResults = contentArray.some((block: any) => block.type === 'tool_result')
if (hasToolResults || message.tool_use_result || message.parent_tool_use_id) {
if (!Array.isArray(content)) {
return chunks
}
for (const block of content) {
if (block.type === 'tool_result') {
const toolResult = block as ToolResultContent
const pendingCall = state.consumePendingToolCall(toolResult.tool_use_id)
const toolCallId = pendingCall?.toolCallId ?? state.getNamespacedToolCallId(toolResult.tool_use_id)
if (toolResult.is_error) {
chunks.push({
type: 'tool-error',
toolCallId,
toolName: pendingCall?.toolName ?? 'unknown',
input: pendingCall?.input,
error: toolResult.content,
providerExecuted: true
} as AgentStreamPart)
} else {
chunks.push({
type: 'tool-result',
toolCallId,
toolName: pendingCall?.toolName ?? 'unknown',
input: pendingCall?.input,
output: toolResult.content,
providerExecuted: true
})
}
}
}
return chunks
}
// For non-synthetic messages (user-initiated content), render text content
if (typeof content === 'string') {
if (!content) {
return chunks
@ -347,39 +389,12 @@ function handleUserMessage(
return chunks
}
if (!Array.isArray(content)) {
return chunks
}
// For non-synthetic array content, render text blocks
for (const block of content) {
if (block.type === 'tool_result') {
const toolResult = block as ToolResultContent
const pendingCall = state.consumePendingToolCall(toolResult.tool_use_id)
const toolCallId = pendingCall?.toolCallId ?? state.getNamespacedToolCallId(toolResult.tool_use_id)
if (toolResult.is_error) {
chunks.push({
type: 'tool-error',
toolCallId,
toolName: pendingCall?.toolName ?? 'unknown',
input: pendingCall?.input,
error: toolResult.content,
providerExecuted: true
} as AgentStreamPart)
} else {
chunks.push({
type: 'tool-result',
toolCallId,
toolName: pendingCall?.toolName ?? 'unknown',
input: pendingCall?.input,
output: toolResult.content,
providerExecuted: true
})
}
} else if (block.type === 'text' && !isSynthetic) {
if (block.type === 'text') {
const rawText = (block as { text: string }).text
const filteredText = filterCommandTags(rawText)
// Only push text chunks if there's content after filtering
if (filteredText) {
const id = message.uuid?.toString() || generateMessageId()
chunks.push({
@ -399,8 +414,6 @@ function handleUserMessage(
providerMetadata
})
}
} else {
logger.warn('Unhandled user content block', { type: (block as any).type })
}
}

View File

@ -67,7 +67,8 @@ export class PpocrService extends OcrBaseService {
} satisfies OcrPayload
const headers: Record<string, string> = {
'Content-Type': 'application/json'
'Content-Type': 'application/json',
'Client-Platform': 'cherry-studio'
}
if (options.accessToken) {

View File

@ -1,225 +0,0 @@
import { describe, expect, it } from 'vitest'
import { buildFunctionCallToolName } from '../mcp'
describe('buildFunctionCallToolName', () => {
describe('basic format', () => {
it('should return format mcp__{server}__{tool}', () => {
const result = buildFunctionCallToolName('github', 'search_issues')
expect(result).toBe('mcp__github__search_issues')
})
it('should handle simple server and tool names', () => {
expect(buildFunctionCallToolName('fetch', 'get_page')).toBe('mcp__fetch__get_page')
expect(buildFunctionCallToolName('database', 'query')).toBe('mcp__database__query')
expect(buildFunctionCallToolName('cherry_studio', 'search')).toBe('mcp__cherry_studio__search')
})
})
describe('valid JavaScript identifier', () => {
it('should always start with mcp__ prefix (valid JS identifier start)', () => {
const result = buildFunctionCallToolName('123server', '456tool')
expect(result).toMatch(/^mcp__/)
expect(result).toBe('mcp__123server__456tool')
})
it('should only contain alphanumeric chars and underscores', () => {
const result = buildFunctionCallToolName('my-server', 'my-tool')
expect(result).toBe('mcp__my_server__my_tool')
expect(result).toMatch(/^[a-zA-Z][a-zA-Z0-9_]*$/)
})
it('should be a valid JavaScript identifier', () => {
const testCases = [
['github', 'create_issue'],
['my-server', 'fetch-data'],
['test@server', 'tool#name'],
['server.name', 'tool.action'],
['123abc', 'def456']
]
for (const [server, tool] of testCases) {
const result = buildFunctionCallToolName(server, tool)
// Valid JS identifiers match this pattern
expect(result).toMatch(/^[a-zA-Z_][a-zA-Z0-9_]*$/)
}
})
})
describe('character sanitization', () => {
it('should replace dashes with underscores', () => {
const result = buildFunctionCallToolName('my-server', 'my-tool-name')
expect(result).toBe('mcp__my_server__my_tool_name')
})
it('should replace special characters with underscores', () => {
const result = buildFunctionCallToolName('test@server!', 'tool#name$')
expect(result).toBe('mcp__test_server__tool_name')
})
it('should replace dots with underscores', () => {
const result = buildFunctionCallToolName('server.name', 'tool.action')
expect(result).toBe('mcp__server_name__tool_action')
})
it('should replace spaces with underscores', () => {
const result = buildFunctionCallToolName('my server', 'my tool')
expect(result).toBe('mcp__my_server__my_tool')
})
it('should collapse consecutive underscores', () => {
const result = buildFunctionCallToolName('my--server', 'my___tool')
expect(result).toBe('mcp__my_server__my_tool')
expect(result).not.toMatch(/_{3,}/)
})
it('should trim leading and trailing underscores from parts', () => {
const result = buildFunctionCallToolName('_server_', '_tool_')
expect(result).toBe('mcp__server__tool')
})
it('should handle names with only special characters', () => {
const result = buildFunctionCallToolName('---', '###')
expect(result).toBe('mcp____')
})
})
describe('length constraints', () => {
it('should not exceed 63 characters', () => {
const longServerName = 'a'.repeat(50)
const longToolName = 'b'.repeat(50)
const result = buildFunctionCallToolName(longServerName, longToolName)
expect(result.length).toBeLessThanOrEqual(63)
})
it('should truncate server name to max 20 chars', () => {
const longServerName = 'abcdefghijklmnopqrstuvwxyz' // 26 chars
const result = buildFunctionCallToolName(longServerName, 'tool')
expect(result).toBe('mcp__abcdefghijklmnopqrst__tool')
expect(result).toContain('abcdefghijklmnopqrst') // First 20 chars
expect(result).not.toContain('uvwxyz') // Truncated
})
it('should truncate tool name to max 35 chars', () => {
const longToolName = 'a'.repeat(40)
const result = buildFunctionCallToolName('server', longToolName)
const expectedTool = 'a'.repeat(35)
expect(result).toBe(`mcp__server__${expectedTool}`)
})
it('should not end with underscores after truncation', () => {
// Create a name that would end with underscores after truncation
const longServerName = 'a'.repeat(20)
const longToolName = 'b'.repeat(35) + '___extra'
const result = buildFunctionCallToolName(longServerName, longToolName)
expect(result).not.toMatch(/_+$/)
expect(result.length).toBeLessThanOrEqual(63)
})
it('should handle max length edge case exactly', () => {
// mcp__ (5) + server (20) + __ (2) + tool (35) = 62 chars
const server = 'a'.repeat(20)
const tool = 'b'.repeat(35)
const result = buildFunctionCallToolName(server, tool)
expect(result.length).toBe(62)
expect(result).toBe(`mcp__${'a'.repeat(20)}__${'b'.repeat(35)}`)
})
})
describe('edge cases', () => {
it('should handle empty server name', () => {
const result = buildFunctionCallToolName('', 'tool')
expect(result).toBe('mcp____tool')
})
it('should handle empty tool name', () => {
const result = buildFunctionCallToolName('server', '')
expect(result).toBe('mcp__server__')
})
it('should handle both empty names', () => {
const result = buildFunctionCallToolName('', '')
expect(result).toBe('mcp____')
})
it('should handle whitespace-only names', () => {
const result = buildFunctionCallToolName(' ', ' ')
expect(result).toBe('mcp____')
})
it('should trim whitespace from names', () => {
const result = buildFunctionCallToolName(' server ', ' tool ')
expect(result).toBe('mcp__server__tool')
})
it('should handle unicode characters', () => {
const result = buildFunctionCallToolName('服务器', '工具')
// Unicode chars are replaced with underscores, then collapsed
expect(result).toMatch(/^mcp__/)
})
it('should handle mixed case', () => {
const result = buildFunctionCallToolName('MyServer', 'MyTool')
expect(result).toBe('mcp__MyServer__MyTool')
})
})
describe('deterministic output', () => {
it('should produce consistent results for same input', () => {
const serverName = 'github'
const toolName = 'search_repos'
const result1 = buildFunctionCallToolName(serverName, toolName)
const result2 = buildFunctionCallToolName(serverName, toolName)
const result3 = buildFunctionCallToolName(serverName, toolName)
expect(result1).toBe(result2)
expect(result2).toBe(result3)
})
it('should produce different results for different inputs', () => {
const result1 = buildFunctionCallToolName('server1', 'tool')
const result2 = buildFunctionCallToolName('server2', 'tool')
const result3 = buildFunctionCallToolName('server', 'tool1')
const result4 = buildFunctionCallToolName('server', 'tool2')
expect(result1).not.toBe(result2)
expect(result3).not.toBe(result4)
})
})
describe('real-world scenarios', () => {
it('should handle GitHub MCP server', () => {
expect(buildFunctionCallToolName('github', 'create_issue')).toBe('mcp__github__create_issue')
expect(buildFunctionCallToolName('github', 'search_repositories')).toBe('mcp__github__search_repositories')
expect(buildFunctionCallToolName('github', 'get_pull_request')).toBe('mcp__github__get_pull_request')
})
it('should handle filesystem MCP server', () => {
expect(buildFunctionCallToolName('filesystem', 'read_file')).toBe('mcp__filesystem__read_file')
expect(buildFunctionCallToolName('filesystem', 'write_file')).toBe('mcp__filesystem__write_file')
expect(buildFunctionCallToolName('filesystem', 'list_directory')).toBe('mcp__filesystem__list_directory')
})
it('should handle hyphenated server names (common in npm packages)', () => {
expect(buildFunctionCallToolName('cherry-fetch', 'get_page')).toBe('mcp__cherry_fetch__get_page')
expect(buildFunctionCallToolName('mcp-server-github', 'search')).toBe('mcp__mcp_server_github__search')
})
it('should handle scoped npm package style names', () => {
const result = buildFunctionCallToolName('@anthropic/mcp-server', 'chat')
expect(result).toBe('mcp__anthropic_mcp_server__chat')
})
it('should handle tools with long descriptive names', () => {
const result = buildFunctionCallToolName('github', 'search_repositories_by_language_and_stars')
expect(result.length).toBeLessThanOrEqual(63)
expect(result).toMatch(/^mcp__github__search_repositories_by_lan/)
})
})
})

View File

@ -1,3 +1,5 @@
import { configManager } from '@main/services/ConfigManager'
import EnUs from '../../renderer/src/i18n/locales/en-us.json'
import ZhCn from '../../renderer/src/i18n/locales/zh-cn.json'
import ZhTw from '../../renderer/src/i18n/locales/zh-tw.json'
@ -27,4 +29,21 @@ const locales = Object.fromEntries(
].map(([locale, translation]) => [locale, { translation }])
)
export { locales }
/**
* Get translation by key path (e.g., 'dialog.save_file')
* This is a simplified version for main process, similar to i18next's t() function
*/
const t = (key: string): string => {
const locale = locales[configManager.getLanguage()]
const keys = key.split('.')
let result: any = locale.translation
for (const k of keys) {
result = result?.[k]
if (result === undefined) {
return key
}
}
return typeof result === 'string' ? result : key
}
export { locales, t }

View File

@ -1,29 +0,0 @@
/**
* Builds a valid JavaScript function name for MCP tool calls.
* Format: mcp__{server_name}__{tool_name}
*
* @param serverName - The MCP server name
* @param toolName - The tool name from the server
* @returns A valid JS identifier in format mcp__{server}__{tool}, max 63 chars
*/
export function buildFunctionCallToolName(serverName: string, toolName: string): string {
// Sanitize to valid JS identifier chars (alphanumeric + underscore only)
const sanitize = (str: string): string =>
str
.trim()
.replace(/[^a-zA-Z0-9]/g, '_') // Replace all non-alphanumeric with underscore
.replace(/_{2,}/g, '_') // Collapse multiple underscores
.replace(/^_+|_+$/g, '') // Trim leading/trailing underscores
const server = sanitize(serverName).slice(0, 20) // Keep server name short
const tool = sanitize(toolName).slice(0, 35) // More room for tool name
let name = `mcp__${server}__${tool}`
// Ensure max 63 chars and clean trailing underscores
if (name.length > 63) {
name = name.slice(0, 63).replace(/_+$/, '')
}
return name
}

View File

@ -503,6 +503,7 @@ const api = {
quoteToMainWindow: (text: string) => ipcRenderer.invoke(IpcChannel.App_QuoteToMain, text),
setDisableHardwareAcceleration: (isDisable: boolean) =>
ipcRenderer.invoke(IpcChannel.App_SetDisableHardwareAcceleration, isDisable),
setUseSystemTitleBar: (isActive: boolean) => ipcRenderer.invoke(IpcChannel.App_SetUseSystemTitleBar, isActive),
trace: {
saveData: (topicId: string) => ipcRenderer.invoke(IpcChannel.TRACE_SAVE_DATA, topicId),
getData: (topicId: string, traceId: string, modelName?: string) =>

View File

@ -238,15 +238,16 @@ export class AiSdkToChunkAdapter {
// === 工具调用相关事件(原始 AI SDK 事件,如果没有被中间件处理) ===
// case 'tool-input-start':
// case 'tool-input-delta':
// case 'tool-input-end':
// this.toolCallHandler.handleToolCallCreated(chunk)
// break
case 'tool-input-start':
this.toolCallHandler.handleToolInputStart(chunk)
break
case 'tool-input-delta':
this.toolCallHandler.handleToolInputDelta(chunk)
break
case 'tool-input-end':
this.toolCallHandler.handleToolInputEnd(chunk)
break
// case 'tool-input-delta':
// this.toolCallHandler.handleToolCallCreated(chunk)
// break
case 'tool-call':
this.toolCallHandler.handleToolCall(chunk)
break

View File

@ -5,18 +5,12 @@
*/
import { loggerService } from '@logger'
import { CallToolResultSchema } from '@modelcontextprotocol/sdk/types.js'
import { processKnowledgeReferences } from '@renderer/services/KnowledgeService'
import type {
BaseTool,
MCPCallToolResponse,
MCPTool,
MCPToolResponse,
MCPToolResultContent,
NormalToolResponse
} from '@renderer/types'
import type { BaseTool, MCPTool, MCPToolResponse, NormalToolResponse } from '@renderer/types'
import type { Chunk } from '@renderer/types/chunk'
import { ChunkType } from '@renderer/types/chunk'
import type { ToolSet, TypedToolCall, TypedToolError, TypedToolResult } from 'ai'
import type { ProviderMetadata, ToolSet, TypedToolCall, TypedToolError, TypedToolResult } from 'ai'
const logger = loggerService.withContext('ToolCallChunkHandler')
@ -26,6 +20,8 @@ export type ToolcallsMap = {
args: any
// mcpTool 现在可以是 MCPTool 或我们为 Provider 工具创建的通用类型
tool: BaseTool
// Streaming arguments buffer
streamingArgs?: string
}
/**
*
@ -71,110 +67,169 @@ export class ToolCallChunkHandler {
return ToolCallChunkHandler.addActiveToolCallImpl(toolCallId, map)
}
// /**
// * 设置 onChunk 回调
// */
// public setOnChunk(callback: (chunk: Chunk) => void): void {
// this.onChunk = callback
// }
/**
*
*/
private determineToolType(toolName: string, toolCallId: string): BaseTool {
let mcpTool: MCPTool | undefined
if (toolName.startsWith('builtin_')) {
return {
id: toolCallId,
name: toolName,
description: toolName,
type: 'builtin'
} as BaseTool
} else if ((mcpTool = this.mcpTools.find((t) => t.id === toolName) as MCPTool)) {
return mcpTool
} else {
return {
id: toolCallId,
name: toolName,
description: toolName,
type: 'provider'
}
}
}
// handleToolCallCreated(
// chunk:
// | {
// type: 'tool-input-start'
// id: string
// toolName: string
// providerMetadata?: ProviderMetadata
// providerExecuted?: boolean
// }
// | {
// type: 'tool-input-end'
// id: string
// providerMetadata?: ProviderMetadata
// }
// | {
// type: 'tool-input-delta'
// id: string
// delta: string
// providerMetadata?: ProviderMetadata
// }
// ): void {
// switch (chunk.type) {
// case 'tool-input-start': {
// // 能拿到说明是mcpTool
// // if (this.activeToolCalls.get(chunk.id)) return
/**
* -
*/
public handleToolInputStart(chunk: {
type: 'tool-input-start'
id: string
toolName: string
providerMetadata?: ProviderMetadata
providerExecuted?: boolean
}): void {
const { id: toolCallId, toolName, providerExecuted } = chunk
// const tool: BaseTool | MCPTool = {
// id: chunk.id,
// name: chunk.toolName,
// description: chunk.toolName,
// type: chunk.toolName.startsWith('builtin_') ? 'builtin' : 'provider'
// }
// this.activeToolCalls.set(chunk.id, {
// toolCallId: chunk.id,
// toolName: chunk.toolName,
// args: '',
// tool
// })
// const toolResponse: MCPToolResponse | NormalToolResponse = {
// id: chunk.id,
// tool: tool,
// arguments: {},
// status: 'pending',
// toolCallId: chunk.id
// }
// this.onChunk({
// type: ChunkType.MCP_TOOL_PENDING,
// responses: [toolResponse]
// })
// break
// }
// case 'tool-input-delta': {
// const toolCall = this.activeToolCalls.get(chunk.id)
// if (!toolCall) {
// logger.warn(`🔧 [ToolCallChunkHandler] Tool call not found: ${chunk.id}`)
// return
// }
// toolCall.args += chunk.delta
// break
// }
// case 'tool-input-end': {
// const toolCall = this.activeToolCalls.get(chunk.id)
// this.activeToolCalls.delete(chunk.id)
// if (!toolCall) {
// logger.warn(`🔧 [ToolCallChunkHandler] Tool call not found: ${chunk.id}`)
// return
// }
// // const toolResponse: ToolCallResponse = {
// // id: toolCall.toolCallId,
// // tool: toolCall.tool,
// // arguments: toolCall.args,
// // status: 'pending',
// // toolCallId: toolCall.toolCallId
// // }
// // logger.debug('toolResponse', toolResponse)
// // this.onChunk({
// // type: ChunkType.MCP_TOOL_PENDING,
// // responses: [toolResponse]
// // })
// break
// }
// }
// // if (!toolCall) {
// // Logger.warn(`🔧 [ToolCallChunkHandler] Tool call not found: ${chunk.id}`)
// // return
// // }
// // this.onChunk({
// // type: ChunkType.MCP_TOOL_CREATED,
// // tool_calls: [
// // {
// // id: chunk.id,
// // name: chunk.toolName,
// // status: 'pending'
// // }
// // ]
// // })
// }
if (!toolCallId || !toolName) {
logger.warn(`🔧 [ToolCallChunkHandler] Invalid tool-input-start chunk: missing id or toolName`)
return
}
// 如果已存在,跳过
if (this.activeToolCalls.has(toolCallId)) {
return
}
let tool: BaseTool
if (providerExecuted) {
tool = {
id: toolCallId,
name: toolName,
description: toolName,
type: 'provider'
} as BaseTool
} else {
tool = this.determineToolType(toolName, toolCallId)
}
// 初始化流式工具调用
this.addActiveToolCall(toolCallId, {
toolCallId,
toolName,
args: undefined,
tool,
streamingArgs: ''
})
logger.info(`🔧 [ToolCallChunkHandler] Tool input streaming started: ${toolName} (${toolCallId})`)
// 发送初始 streaming chunk
const toolResponse: MCPToolResponse | NormalToolResponse = {
id: toolCallId,
tool: tool,
arguments: undefined,
status: 'streaming',
toolCallId: toolCallId,
partialArguments: ''
}
this.onChunk({
type: ChunkType.MCP_TOOL_STREAMING,
responses: [toolResponse]
})
}
/**
* -
*/
public handleToolInputDelta(chunk: {
type: 'tool-input-delta'
id: string
delta: string
providerMetadata?: ProviderMetadata
}): void {
const { id: toolCallId, delta } = chunk
const toolCall = this.activeToolCalls.get(toolCallId)
if (!toolCall) {
logger.warn(`🔧 [ToolCallChunkHandler] Tool call not found for delta: ${toolCallId}`)
return
}
// 累积流式参数
toolCall.streamingArgs = (toolCall.streamingArgs || '') + delta
// 发送 streaming chunk 更新
const toolResponse: MCPToolResponse | NormalToolResponse = {
id: toolCallId,
tool: toolCall.tool,
arguments: undefined,
status: 'streaming',
toolCallId: toolCallId,
partialArguments: toolCall.streamingArgs
}
this.onChunk({
type: ChunkType.MCP_TOOL_STREAMING,
responses: [toolResponse]
})
}
/**
* -
*/
public handleToolInputEnd(chunk: { type: 'tool-input-end'; id: string; providerMetadata?: ProviderMetadata }): void {
const { id: toolCallId } = chunk
const toolCall = this.activeToolCalls.get(toolCallId)
if (!toolCall) {
logger.warn(`🔧 [ToolCallChunkHandler] Tool call not found for end: ${toolCallId}`)
return
}
// 尝试解析完整的 JSON 参数
let parsedArgs: any = undefined
if (toolCall.streamingArgs) {
try {
parsedArgs = JSON.parse(toolCall.streamingArgs)
toolCall.args = parsedArgs
} catch (e) {
logger.warn(`🔧 [ToolCallChunkHandler] Failed to parse streaming args for ${toolCallId}:`, e as Error)
// 保留原始字符串
toolCall.args = toolCall.streamingArgs
}
}
logger.info(`🔧 [ToolCallChunkHandler] Tool input streaming completed: ${toolCall.toolName} (${toolCallId})`)
// 发送 streaming 完成 chunk
const toolResponse: MCPToolResponse | NormalToolResponse = {
id: toolCallId,
tool: toolCall.tool,
arguments: parsedArgs,
status: 'pending',
toolCallId: toolCallId,
partialArguments: toolCall.streamingArgs
}
this.onChunk({
type: ChunkType.MCP_TOOL_STREAMING,
responses: [toolResponse]
})
}
/**
*
@ -191,6 +246,15 @@ export class ToolCallChunkHandler {
return
}
// Check if this tool call was already processed via streaming events
const existingToolCall = this.activeToolCalls.get(toolCallId)
if (existingToolCall?.streamingArgs !== undefined) {
// Tool call was already processed via streaming events (tool-input-start/delta/end)
// Update args if needed, but don't emit duplicate pending chunk
existingToolCall.args = args
return
}
let tool: BaseTool
let mcpTool: MCPTool | undefined
// 根据 providerExecuted 标志区分处理逻辑
@ -216,11 +280,6 @@ export class ToolCallChunkHandler {
// 如果是客户端执行的 MCP 工具,沿用现有逻辑
// toolName is mcpTool.id (registered with id as key in convertMcpToolsToAiSdkTools)
logger.info(`[ToolCallChunkHandler] Handling client-side MCP tool: ${toolName}`)
// mcpTool = this.mcpTools.find((t) => t.name === toolName) as MCPTool
// if (!mcpTool) {
// logger.warn(`[ToolCallChunkHandler] MCP tool not found: ${toolName}`)
// return
// }
tool = mcpTool
} else {
tool = {
@ -357,40 +416,20 @@ export class ToolCallChunkHandler {
export const addActiveToolCall = ToolCallChunkHandler.addActiveToolCall.bind(ToolCallChunkHandler)
/**
* 使 MCP SDK
*/
function extractImagesFromToolOutput(output: unknown): string[] {
if (!output) {
return []
}
const contents: unknown[] = []
if (isMcpCallToolResponse(output)) {
contents.push(...output.content)
} else if (Array.isArray(output)) {
contents.push(...output)
} else if (hasContentArray(output)) {
contents.push(...output.content)
const result = CallToolResultSchema.safeParse(output)
if (result.success) {
return result.data.content
.filter((c) => c.type === 'image')
.map((content) => `data:${content.mimeType ?? 'image/png'};base64,${content.data}`)
}
return contents
.filter(isMcpImageContent)
.map((content) => `data:${content.mimeType ?? 'image/png'};base64,${content.data}`)
}
function isMcpCallToolResponse(value: unknown): value is MCPCallToolResponse {
return typeof value === 'object' && value !== null && Array.isArray((value as MCPCallToolResponse).content)
}
function hasContentArray(value: unknown): value is { content: unknown[] } {
return typeof value === 'object' && value !== null && Array.isArray((value as { content?: unknown }).content)
}
function isMcpImageContent(content: unknown): content is MCPToolResultContent & { data: string } {
if (typeof content !== 'object' || content === null) {
return false
}
const resultContent = content as MCPToolResultContent
return resultContent.type === 'image' && typeof resultContent.data === 'string'
return []
}

View File

@ -26,7 +26,7 @@ const logger = loggerService.withContext('PluginBuilder')
*
*/
export function buildPlugins(
middlewareConfig: AiSdkMiddlewareConfig & { assistant: Assistant; topicId?: string }
middlewareConfig: AiSdkMiddlewareConfig & { assistant: Assistant; topicId?: string; mcpMode?: string }
): AiPlugin[] {
const plugins: AiPlugin<any, any>[] = []
@ -112,7 +112,8 @@ export function buildPlugins(
if (middlewareConfig.isPromptToolUse) {
plugins.push(
createPromptToolUsePlugin({
enabled: true
enabled: true,
mcpMode: middlewareConfig.mcpMode
})
)
}

View File

@ -0,0 +1,85 @@
/**
* Anthropic Prompt Caching Middleware
* @see https://ai-sdk.dev/providers/ai-sdk-providers/anthropic#cache-control
*/
import type { LanguageModelV2Message } from '@ai-sdk/provider'
import { estimateTextTokens } from '@renderer/services/TokenService'
import type { Provider } from '@renderer/types'
import type { LanguageModelMiddleware } from 'ai'
const cacheProviderOptions = {
anthropic: { cacheControl: { type: 'ephemeral' } }
}
function estimateContentTokens(content: LanguageModelV2Message['content']): number {
if (typeof content === 'string') return estimateTextTokens(content)
if (Array.isArray(content)) {
return content.reduce((acc, part) => {
if (part.type === 'text') {
return acc + estimateTextTokens(part.text as string)
}
return acc
}, 0)
}
return 0
}
export function anthropicCacheMiddleware(provider: Provider): LanguageModelMiddleware {
return {
middlewareVersion: 'v2',
transformParams: async ({ params }) => {
const settings = provider.anthropicCacheControl
if (!settings?.tokenThreshold || !Array.isArray(params.prompt) || params.prompt.length === 0) {
return params
}
const { tokenThreshold, cacheSystemMessage, cacheLastNMessages } = settings
const messages = [...params.prompt]
let cachedCount = 0
// Cache system message (providerOptions on message object)
if (cacheSystemMessage) {
for (let i = 0; i < messages.length; i++) {
const msg = messages[i] as LanguageModelV2Message
if (msg.role === 'system' && estimateContentTokens(msg.content) >= tokenThreshold) {
messages[i] = { ...msg, providerOptions: cacheProviderOptions }
break
}
}
}
// Cache last N non-system messages (providerOptions on content parts)
if (cacheLastNMessages > 0) {
const cumsumTokens = [] as Array<number>
let tokenSum = 0 as number
for (let i = 0; i < messages.length; i++) {
const msg = messages[i] as LanguageModelV2Message
tokenSum += estimateContentTokens(msg.content)
cumsumTokens.push(tokenSum)
}
for (let i = messages.length - 1; i >= 0 && cachedCount < cacheLastNMessages; i--) {
const msg = messages[i] as LanguageModelV2Message
if (msg.role === 'system' || cumsumTokens[i] < tokenThreshold || msg.content.length === 0) {
continue
}
const newContent = [...msg.content]
const lastIndex = newContent.length - 1
newContent[lastIndex] = {
...newContent[lastIndex],
providerOptions: cacheProviderOptions
}
messages[i] = {
...msg,
content: newContent
} as LanguageModelV2Message
cachedCount++
}
}
return { ...params, prompt: messages }
}
}
}

View File

@ -263,7 +263,7 @@ describe('messageConverter', () => {
})
describe('convertMessagesToSdkMessages', () => {
it('collapses to [system?, user(image)] for image enhancement models', async () => {
it('preserves conversation history and merges images for image enhancement models', async () => {
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
const initialUser = createMessage('user')
initialUser.__mockContent = 'Start editing'
@ -277,7 +277,16 @@ describe('messageConverter', () => {
const result = await convertMessagesToSdkMessages([initialUser, assistant, finalUser], model)
// Preserves all conversation history, only merges images into the last user message
expect(result).toEqual([
{
role: 'user',
content: [{ type: 'text', text: 'Start editing' }]
},
{
role: 'assistant',
content: [{ type: 'text', text: 'Here is the current preview' }]
},
{
role: 'user',
content: [
@ -288,7 +297,7 @@ describe('messageConverter', () => {
])
})
it('preserves system messages and collapses others for enhancement payloads', async () => {
it('preserves system messages and conversation history for enhancement payloads', async () => {
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
const fileUser = createMessage('user')
fileUser.__mockContent = 'Use this document as inspiration'
@ -309,8 +318,17 @@ describe('messageConverter', () => {
const result = await convertMessagesToSdkMessages([fileUser, assistant, finalUser], model)
// Preserves system message, conversation history, and merges images into the last user message
expect(result).toEqual([
{ role: 'system', content: 'fileid://reference' },
{
role: 'user',
content: [{ type: 'text', text: 'Use this document as inspiration' }]
},
{
role: 'assistant',
content: [{ type: 'text', text: 'Generated previews ready' }]
},
{
role: 'user',
content: [
@ -321,7 +339,7 @@ describe('messageConverter', () => {
])
})
it('handles no previous assistant message with images', async () => {
it('returns messages as-is when no previous assistant message with images', async () => {
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
const user1 = createMessage('user')
user1.__mockContent = 'Start'
@ -331,7 +349,12 @@ describe('messageConverter', () => {
const result = await convertMessagesToSdkMessages([user1, user2], model)
// No images to merge, returns all messages as-is
expect(result).toEqual([
{
role: 'user',
content: [{ type: 'text', text: 'Start' }]
},
{
role: 'user',
content: [{ type: 'text', text: 'Continue without images' }]
@ -339,7 +362,7 @@ describe('messageConverter', () => {
])
})
it('handles assistant message without images', async () => {
it('returns messages as-is when assistant message has no images', async () => {
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
const user1 = createMessage('user')
user1.__mockContent = 'Start'
@ -353,7 +376,16 @@ describe('messageConverter', () => {
const result = await convertMessagesToSdkMessages([user1, assistant, user2], model)
// No images to merge, returns all messages as-is
expect(result).toEqual([
{
role: 'user',
content: [{ type: 'text', text: 'Start' }]
},
{
role: 'assistant',
content: [{ type: 'text', text: 'Text only response' }]
},
{
role: 'user',
content: [{ type: 'text', text: 'Follow up' }]
@ -361,7 +393,7 @@ describe('messageConverter', () => {
])
})
it('handles multiple assistant messages by using the most recent one', async () => {
it('merges images from the most recent assistant message', async () => {
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
const user1 = createMessage('user')
user1.__mockContent = 'Start'
@ -382,7 +414,24 @@ describe('messageConverter', () => {
const result = await convertMessagesToSdkMessages([user1, assistant1, user2, assistant2, user3], model)
// Preserves all history, merges only the most recent assistant's images
expect(result).toEqual([
{
role: 'user',
content: [{ type: 'text', text: 'Start' }]
},
{
role: 'assistant',
content: [{ type: 'text', text: 'First response' }]
},
{
role: 'user',
content: [{ type: 'text', text: 'Continue' }]
},
{
role: 'assistant',
content: [{ type: 'text', text: 'Second response' }]
},
{
role: 'user',
content: [
@ -393,7 +442,7 @@ describe('messageConverter', () => {
])
})
it('handles conversation ending with assistant message', async () => {
it('returns messages as-is when conversation ends with assistant message', async () => {
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
const user = createMessage('user')
user.__mockContent = 'Start'
@ -406,15 +455,20 @@ describe('messageConverter', () => {
// The user message is the last user message, but since the assistant comes after,
// there's no "previous" assistant message (search starts from messages.length-2 backwards)
// So no images to merge, returns all messages as-is
expect(result).toEqual([
{
role: 'user',
content: [{ type: 'text', text: 'Start' }]
},
{
role: 'assistant',
content: [{ type: 'text', text: 'Response with image' }]
}
])
})
it('handles empty content in last user message', async () => {
it('merges images even when last user message has empty content', async () => {
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
const user1 = createMessage('user')
user1.__mockContent = 'Start'
@ -428,12 +482,79 @@ describe('messageConverter', () => {
const result = await convertMessagesToSdkMessages([user1, assistant, user2], model)
// Preserves history, merges images into last user message (even if empty)
expect(result).toEqual([
{
role: 'user',
content: [{ type: 'text', text: 'Start' }]
},
{
role: 'assistant',
content: [{ type: 'text', text: 'Here is the preview' }]
},
{
role: 'user',
content: [{ type: 'image', image: 'https://example.com/preview.png' }]
}
])
})
it('allows using LLM conversation context for image generation', async () => {
// This test verifies the key use case: switching from LLM to image enhancement model
// and using the previous conversation as context for image generation
const model = createModel({ id: 'qwen-image-edit', name: 'Qwen Image Edit', provider: 'qwen', group: 'qwen' })
// Simulate a conversation that started with a regular LLM
const user1 = createMessage('user')
user1.__mockContent = 'Help me design a futuristic robot with blue lights'
const assistant1 = createMessage('assistant')
assistant1.__mockContent =
'Great idea! The robot could have a sleek metallic body with glowing blue LED strips...'
assistant1.__mockImageBlocks = [] // LLM response, no images
const user2 = createMessage('user')
user2.__mockContent = 'Yes, and add some chrome accents'
const assistant2 = createMessage('assistant')
assistant2.__mockContent = 'Perfect! Chrome accents would complement the blue lights beautifully...'
assistant2.__mockImageBlocks = [] // Still LLM response, no images
// User switches to image enhancement model and asks for image generation
const user3 = createMessage('user')
user3.__mockContent = 'Now generate an image based on our discussion'
const result = await convertMessagesToSdkMessages([user1, assistant1, user2, assistant2, user3], model)
// All conversation history should be preserved for context
// No images to merge since previous assistant had no images
expect(result).toEqual([
{
role: 'user',
content: [{ type: 'text', text: 'Help me design a futuristic robot with blue lights' }]
},
{
role: 'assistant',
content: [
{
type: 'text',
text: 'Great idea! The robot could have a sleek metallic body with glowing blue LED strips...'
}
]
},
{
role: 'user',
content: [{ type: 'text', text: 'Yes, and add some chrome accents' }]
},
{
role: 'assistant',
content: [{ type: 'text', text: 'Perfect! Chrome accents would complement the blue lights beautifully...' }]
},
{
role: 'user',
content: [{ type: 'text', text: 'Now generate an image based on our discussion' }]
}
])
})
})
})

View File

@ -229,23 +229,15 @@ export async function convertMessagesToSdkMessages(messages: Message[], model: M
sdkMessages.push(...(Array.isArray(sdkMessage) ? sdkMessage : [sdkMessage]))
}
// Special handling for image enhancement models
// Target behavior: Collapse the conversation into [system?, user(image)].
// Explanation of why we don't simply use slice:
// 1) We need to preserve all system messages: During the convertMessageToSdkParam process, native file uploads may insert `system(fileid://...)`.
// Directly slicing the original messages or already converted sdkMessages could easily result in missing these system instructions.
// Therefore, we first perform a full conversion and then aggregate the system messages afterward.
// 2) The conversion process may split messages: A single user message might be broken into two SDK messages—[system, user].
// Slicing either side could lead to obtaining semantically incorrect fragments (e.g., only the split-out system message).
// 3) The “previous assistant message” is not necessarily the second-to-last one: There might be system messages or other message blocks inserted in between,
// making a simple slice(-2) assumption too rigid. Here, we trace back from the end of the original messages to locate the most recent assistant message, which better aligns with business semantics.
// 4) This is a “collapse” rather than a simple “slice”: Ultimately, we need to synthesize a new user message
// (with text from the last user message and images from the previous assistant message). Using slice can only extract subarrays,
// which still require reassembly; constructing directly according to the target structure is clearer and more reliable.
// These models support multi-turn conversations but need images from previous assistant messages
// to be merged into the current user message for editing/enhancement operations.
//
// Key behaviors:
// 1. Preserve all conversation history for context
// 2. Find images from the previous assistant message and merge them into the last user message
// 3. This allows users to switch from LLM conversations and use that context for image generation
if (isImageEnhancementModel(model)) {
// Collect all system messages (including ones generated from file uploads)
const systemMessages = sdkMessages.filter((m): m is SystemModelMessage => m.role === 'system')
// Find the last user message (SDK converted)
// Find the last user SDK message index
const lastUserSdkIndex = (() => {
for (let i = sdkMessages.length - 1; i >= 0; i--) {
if (sdkMessages[i].role === 'user') return i
@ -253,7 +245,10 @@ export async function convertMessagesToSdkMessages(messages: Message[], model: M
return -1
})()
const lastUserSdk = lastUserSdkIndex >= 0 ? (sdkMessages[lastUserSdkIndex] as UserModelMessage) : null
// If no user message found, return messages as-is
if (lastUserSdkIndex < 0) {
return sdkMessages
}
// Find the nearest preceding assistant message in original messages
let prevAssistant: Message | null = null
@ -264,31 +259,33 @@ export async function convertMessagesToSdkMessages(messages: Message[], model: M
}
}
// Build the final user content parts
// Check if there are images from the previous assistant message
const imageBlocks = prevAssistant ? findImageBlocks(prevAssistant) : []
const imageParts = await convertImageBlockToImagePart(imageBlocks)
// If no images to merge, return messages as-is
if (imageParts.length === 0) {
return sdkMessages
}
// Build the new last user message with merged images
const lastUserSdk = sdkMessages[lastUserSdkIndex] as UserModelMessage
let finalUserParts: Array<TextPart | FilePart | ImagePart> = []
if (lastUserSdk) {
if (typeof lastUserSdk.content === 'string') {
finalUserParts.push({ type: 'text', text: lastUserSdk.content })
} else if (Array.isArray(lastUserSdk.content)) {
finalUserParts = [...lastUserSdk.content]
}
if (typeof lastUserSdk.content === 'string') {
finalUserParts.push({ type: 'text', text: lastUserSdk.content })
} else if (Array.isArray(lastUserSdk.content)) {
finalUserParts = [...lastUserSdk.content]
}
// Append images from the previous assistant message if any
if (prevAssistant) {
const imageBlocks = findImageBlocks(prevAssistant)
const imageParts = await convertImageBlockToImagePart(imageBlocks)
if (imageParts.length > 0) {
finalUserParts.push(...imageParts)
}
}
// Append images from the previous assistant message
finalUserParts.push(...imageParts)
// If we couldn't find a last user message, fall back to returning collected system messages only
if (!lastUserSdk) {
return systemMessages
}
// Replace the last user message with the merged version
const result = [...sdkMessages]
result[lastUserSdkIndex] = { role: 'user', content: finalUserParts }
return [...systemMessages, { role: 'user', content: finalUserParts }]
return result
}
return sdkMessages

View File

@ -26,11 +26,13 @@ import {
isSupportedThinkingTokenModel,
isWebSearchModel
} from '@renderer/config/models'
import { getHubModeSystemPrompt } from '@renderer/config/prompts-code-mode'
import { fetchAllActiveServerTools } from '@renderer/services/ApiService'
import { getDefaultModel } from '@renderer/services/AssistantService'
import store from '@renderer/store'
import type { CherryWebSearchConfig } from '@renderer/store/websearch'
import type { Model } from '@renderer/types'
import { type Assistant, type MCPTool, type Provider, SystemProviderIds } from '@renderer/types'
import { type Assistant, getEffectiveMcpMode, type MCPTool, type Provider, SystemProviderIds } from '@renderer/types'
import type { StreamTextParams } from '@renderer/types/aiCoreTypes'
import { mapRegexToPatterns } from '@renderer/utils/blacklistMatchPattern'
import { replacePromptVariables } from '@renderer/utils/prompt'
@ -241,8 +243,18 @@ export async function buildStreamTextParams(
params.tools = tools
}
if (assistant.prompt) {
params.system = await replacePromptVariables(assistant.prompt, model.name)
let systemPrompt = assistant.prompt ? await replacePromptVariables(assistant.prompt, model.name) : ''
if (getEffectiveMcpMode(assistant) === 'auto') {
const allActiveTools = await fetchAllActiveServerTools()
const autoModePrompt = getHubModeSystemPrompt(allActiveTools)
if (autoModePrompt) {
systemPrompt = systemPrompt ? `${systemPrompt}\n\n${autoModePrompt}` : autoModePrompt
}
}
if (systemPrompt) {
params.system = systemPrompt
}
logger.debug('params', params)

View File

@ -1,5 +1,5 @@
import { formatPrivateKey, hasProviderConfig, ProviderConfigFactory } from '@cherrystudio/ai-core/provider'
import { isOpenAIChatCompletionOnlyModel } from '@renderer/config/models'
import { isOpenAIChatCompletionOnlyModel, isOpenAIReasoningModel } from '@renderer/config/models'
import {
getAwsBedrockAccessKeyId,
getAwsBedrockApiKey,
@ -11,6 +11,7 @@ import { createVertexProvider, isVertexAIConfigured } from '@renderer/hooks/useV
import { getProviderByModel } from '@renderer/services/AssistantService'
import { getProviderById } from '@renderer/services/ProviderService'
import store from '@renderer/store'
import type { EndpointType } from '@renderer/types'
import { isSystemProvider, type Model, type Provider, SystemProviderIds } from '@renderer/types'
import type { OpenAICompletionsStreamOptions } from '@renderer/types/aiCoreTypes'
import {
@ -29,6 +30,7 @@ import {
isNewApiProvider,
isOllamaProvider,
isPerplexityProvider,
isSupportDeveloperRoleProvider,
isSupportStreamOptionsProvider,
isVertexProvider
} from '@renderer/utils/provider'
@ -139,6 +141,48 @@ export function adaptProvider({ provider, model }: { provider: Provider; model?:
return adaptedProvider
}
interface BaseExtraOptions {
fetch?: typeof fetch
endpoint: string
mode?: 'responses' | 'chat'
headers: Record<string, string>
}
interface AzureOpenAIExtraOptions extends BaseExtraOptions {
apiVersion: string
useDeploymentBasedUrls: true | undefined
}
interface BedrockApiKeyExtraOptions extends BaseExtraOptions {
region: string
apiKey: string
}
interface BedrockAccessKeyExtraOptions extends BaseExtraOptions {
region: string
accessKeyId: string
secretAccessKey: string
}
type BedrockExtraOptions = BedrockApiKeyExtraOptions | BedrockAccessKeyExtraOptions
interface VertexExtraOptions extends BaseExtraOptions {
project: string
location: string
googleCredentials: {
privateKey: string
clientEmail: string
}
}
interface CherryInExtraOptions extends BaseExtraOptions {
endpointType?: EndpointType
anthropicBaseURL?: string
geminiBaseURL?: string
}
type ExtraOptions = BedrockExtraOptions | AzureOpenAIExtraOptions | VertexExtraOptions | CherryInExtraOptions
/**
* Provider AI SDK
*
@ -157,6 +201,8 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A
includeUsage = store.getState().settings.openAI?.streamOptions?.includeUsage
}
// Specially, some providers which need to early return
// Copilot
const isCopilotProvider = actualProvider.id === SystemProviderIds.copilot
if (isCopilotProvider) {
const storedHeaders = store.getState().copilot.defaultHeaders ?? {}
@ -176,6 +222,7 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A
}
}
// Ollama
if (isOllamaProvider(actualProvider)) {
return {
providerId: 'ollama',
@ -189,98 +236,142 @@ export function providerToAiSdkConfig(actualProvider: Provider, model: Model): A
}
}
// 处理OpenAI模式
const extraOptions: any = {}
extraOptions.endpoint = endpoint
if (actualProvider.type === 'openai-response' && !isOpenAIChatCompletionOnlyModel(model)) {
extraOptions.mode = 'responses'
} else if (aiSdkProviderId === 'openai' || (aiSdkProviderId === 'cherryin' && actualProvider.type === 'openai')) {
extraOptions.mode = 'chat'
// Generally, construct extraOptions according to provider & model
// Consider as OpenAI like provider
// Construct baseExtraOptions first
// About mode of azure:
// https://learn.microsoft.com/en-us/azure/ai-foundry/openai/latest
// https://learn.microsoft.com/en-us/azure/ai-foundry/openai/how-to/responses?tabs=python-key#responses-api
let mode: BaseExtraOptions['mode']
if (
(actualProvider.type === 'openai-response' && !isOpenAIChatCompletionOnlyModel(model)) ||
aiSdkProviderId === 'azure-responses'
) {
mode = 'responses'
} else if (
aiSdkProviderId === 'openai' ||
(aiSdkProviderId === 'cherryin' && actualProvider.type === 'openai') ||
aiSdkProviderId === 'azure'
) {
mode = 'chat'
}
extraOptions.headers = {
const headers: BaseExtraOptions['headers'] = {
...defaultAppHeaders(),
...actualProvider.extra_headers
}
if (aiSdkProviderId === 'openai') {
extraOptions.headers['X-Api-Key'] = baseConfig.apiKey
if (actualProvider.extra_headers?.['X-Api-Key'] === undefined) {
headers['X-Api-Key'] = baseConfig.apiKey
}
}
// azure
// https://learn.microsoft.com/en-us/azure/ai-foundry/openai/latest
// https://learn.microsoft.com/en-us/azure/ai-foundry/openai/how-to/responses?tabs=python-key#responses-api
if (aiSdkProviderId === 'azure-responses') {
extraOptions.mode = 'responses'
} else if (aiSdkProviderId === 'azure') {
extraOptions.mode = 'chat'
let _fetch: typeof fetch | undefined
// Apply developer-to-system role conversion for providers that don't support developer role
// bug: https://github.com/vercel/ai/issues/10982
// fixPR: https://github.com/vercel/ai/pull/11127
// TODO: but the PR don't backport to v5, the code will be removed when upgrading to v6
if (!isSupportDeveloperRoleProvider(actualProvider) || !isOpenAIReasoningModel(model)) {
_fetch = createDeveloperToSystemFetch(fetch)
}
const baseExtraOptions = {
fetch: _fetch,
endpoint,
mode,
headers
} as const satisfies BaseExtraOptions
// Create specifical fields in extraOptions for different provider
let extraOptions: ExtraOptions | undefined
if (isAzureOpenAIProvider(actualProvider)) {
const apiVersion = actualProvider.apiVersion?.trim()
let useDeploymentBasedUrls: true | undefined
if (apiVersion) {
extraOptions.apiVersion = apiVersion
if (!['preview', 'v1'].includes(apiVersion)) {
extraOptions.useDeploymentBasedUrls = true
useDeploymentBasedUrls = true
}
}
}
// bedrock
if (aiSdkProviderId === 'bedrock') {
extraOptions = {
...baseExtraOptions,
apiVersion,
useDeploymentBasedUrls
} satisfies AzureOpenAIExtraOptions
} else if (aiSdkProviderId === 'bedrock') {
// bedrock
const authType = getAwsBedrockAuthType()
extraOptions.region = getAwsBedrockRegion()
const region = getAwsBedrockRegion()
if (authType === 'apiKey') {
extraOptions.apiKey = getAwsBedrockApiKey()
extraOptions = {
...baseExtraOptions,
region,
apiKey: getAwsBedrockApiKey()
} satisfies BedrockApiKeyExtraOptions
} else {
extraOptions.accessKeyId = getAwsBedrockAccessKeyId()
extraOptions.secretAccessKey = getAwsBedrockSecretAccessKey()
extraOptions = {
...baseExtraOptions,
region,
accessKeyId: getAwsBedrockAccessKeyId(),
secretAccessKey: getAwsBedrockSecretAccessKey()
} satisfies BedrockAccessKeyExtraOptions
}
}
// google-vertex
if (aiSdkProviderId === 'google-vertex' || aiSdkProviderId === 'google-vertex-anthropic') {
} else if (aiSdkProviderId === 'google-vertex' || aiSdkProviderId === 'google-vertex-anthropic') {
// google-vertex
if (!isVertexAIConfigured()) {
throw new Error('VertexAI is not configured. Please configure project, location and service account credentials.')
}
const { project, location, googleCredentials } = createVertexProvider(actualProvider)
extraOptions.project = project
extraOptions.location = location
extraOptions.googleCredentials = {
...googleCredentials,
privateKey: formatPrivateKey(googleCredentials.privateKey)
}
extraOptions = {
...baseExtraOptions,
project,
location,
googleCredentials: {
...googleCredentials,
privateKey: formatPrivateKey(googleCredentials.privateKey)
}
} satisfies VertexExtraOptions
baseConfig.baseURL += aiSdkProviderId === 'google-vertex' ? '/publishers/google' : '/publishers/anthropic/models'
}
// cherryin
if (aiSdkProviderId === 'cherryin') {
if (model.endpoint_type) {
extraOptions.endpointType = model.endpoint_type
}
} else if (aiSdkProviderId === 'cherryin') {
// CherryIN API Host
const cherryinProvider = getProviderById(SystemProviderIds.cherryin)
const endpointType: EndpointType | undefined = model.endpoint_type
let anthropicBaseURL: string | undefined
let geminiBaseURL: string | undefined
if (cherryinProvider) {
extraOptions.anthropicBaseURL = cherryinProvider.anthropicApiHost + '/v1'
extraOptions.geminiBaseURL = cherryinProvider.apiHost + '/v1beta/models'
anthropicBaseURL = cherryinProvider.anthropicApiHost + '/v1'
geminiBaseURL = cherryinProvider.apiHost + '/v1beta/models'
}
extraOptions = {
...baseExtraOptions,
endpointType,
anthropicBaseURL,
geminiBaseURL
} satisfies CherryInExtraOptions
} else {
extraOptions = baseExtraOptions
}
if (hasProviderConfig(aiSdkProviderId) && aiSdkProviderId !== 'openai-compatible') {
// if the provider has a specific aisdk provider
const options = ProviderConfigFactory.fromProvider(aiSdkProviderId, baseConfig, extraOptions)
return {
providerId: aiSdkProviderId,
options
}
}
// 否则fallback到openai-compatible
const options = ProviderConfigFactory.createOpenAICompatible(baseConfig.baseURL, baseConfig.apiKey)
return {
providerId: 'openai-compatible',
options: {
...options,
name: actualProvider.id,
...extraOptions,
includeUsage
} else {
// otherwise, fallback to openai-compatible
const options = ProviderConfigFactory.createOpenAICompatible(baseConfig.baseURL, baseConfig.apiKey)
return {
providerId: 'openai-compatible',
options: {
...options,
name: actualProvider.id,
...extraOptions,
includeUsage
}
}
}
}
@ -302,6 +393,44 @@ export function isModernSdkSupported(provider: Provider): boolean {
return hasProviderConfig(aiSdkProviderId)
}
/**
* Creates a custom fetch wrapper that converts 'developer' role to 'system' role in request body.
* This is needed for providers that don't support the 'developer' role (e.g., Azure DeepSeek R1).
*
* @param originalFetch - Optional original fetch function to wrap
* @returns A fetch function that transforms the request body
*/
function createDeveloperToSystemFetch(originalFetch?: typeof fetch): typeof fetch {
const baseFetch = originalFetch ?? fetch
return async (input: RequestInfo | URL, init?: RequestInit) => {
let options = init
if (options?.body && typeof options.body === 'string') {
try {
const body = JSON.parse(options.body)
if (body.input && Array.isArray(body.input)) {
let hasChanges = false
body.input = body.input.map((msg: { role: string }) => {
if (msg.role === 'developer') {
hasChanges = true
return { ...msg, role: 'system' }
}
return msg
})
if (hasChanges) {
options = {
...options,
body: JSON.stringify(body)
}
}
}
} catch {
// If parsing fails, just use original body
}
}
return baseFetch(input, options)
}
}
/**
* provider的配置,
*/
@ -360,5 +489,6 @@ export async function prepareSpecialProviderConfig(
}
}
}
return config
}

View File

@ -3,7 +3,7 @@ import { type AnthropicProviderOptions } from '@ai-sdk/anthropic'
import type { GoogleGenerativeAIProviderOptions } from '@ai-sdk/google'
import type { OpenAIResponsesProviderOptions } from '@ai-sdk/openai'
import type { XaiProviderOptions } from '@ai-sdk/xai'
import { baseProviderIdSchema, customProviderIdSchema } from '@cherrystudio/ai-core/provider'
import { baseProviderIdSchema, customProviderIdSchema, hasProviderConfig } from '@cherrystudio/ai-core/provider'
import { loggerService } from '@logger'
import {
getModelSupportedVerbosity,
@ -616,9 +616,14 @@ function buildGenericProviderOptions(
}
if (enableReasoning) {
if (isInterleavedThinkingModel(model)) {
providerOptions = {
...providerOptions,
sendReasoning: true
// sendReasoning is a patch specific to @ai-sdk/openai-compatible
// Only apply when provider will actually use openai-compatible SDK
// (i.e., no dedicated SDK registered OR explicitly openai-compatible)
if (!hasProviderConfig(providerId) || providerId === 'openai-compatible') {
providerOptions = {
...providerOptions,
sendReasoning: true
}
}
}
}
@ -648,6 +653,10 @@ function buildGenericProviderOptions(
}
}
if (isOpenAIModel(model)) {
providerOptions.strictJsonSchema = false
}
return {
[providerId]: providerOptions
}

View File

@ -28,6 +28,7 @@ import {
isSupportedThinkingTokenDoubaoModel,
isSupportedThinkingTokenGeminiModel,
isSupportedThinkingTokenHunyuanModel,
isSupportedThinkingTokenKimiModel,
isSupportedThinkingTokenMiMoModel,
isSupportedThinkingTokenModel,
isSupportedThinkingTokenQwenModel,
@ -82,10 +83,10 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
// providers that use enable_thinking
if (
isSupportEnableThinkingProvider(provider) &&
(isSupportedThinkingTokenQwenModel(model) ||
isSupportedThinkingTokenHunyuanModel(model) ||
(provider.id === SystemProviderIds.dashscope && isDeepSeekHybridInferenceModel(model)))
(isSupportEnableThinkingProvider(provider) &&
(isSupportedThinkingTokenQwenModel(model) || isSupportedThinkingTokenHunyuanModel(model))) ||
(provider.id === SystemProviderIds.dashscope &&
(isDeepSeekHybridInferenceModel(model) || isSupportedThinkingTokenZhipuModel(model)))
) {
return { enable_thinking: false }
}
@ -109,7 +110,11 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
}
// use thinking, doubao, zhipu, etc.
if (isSupportedThinkingTokenDoubaoModel(model) || isSupportedThinkingTokenZhipuModel(model)) {
if (
isSupportedThinkingTokenDoubaoModel(model) ||
isSupportedThinkingTokenZhipuModel(model) ||
isSupportedThinkingTokenKimiModel(model)
) {
if (provider.id === SystemProviderIds.cerebras) {
return {
disable_reasoning: true
@ -309,18 +314,24 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
}
}
// https://help.aliyun.com/zh/model-studio/deep-thinking
if (provider.id === SystemProviderIds.dashscope) {
// For dashscope: Qwen, DeepSeek, and GLM models use enable_thinking to control thinking
// No effort, only on/off
if (isQwenReasoningModel(model) || isSupportedThinkingTokenZhipuModel(model)) {
return {
enable_thinking: true,
thinking_budget: budgetTokens
}
}
}
// Qwen models, use enable_thinking
if (isQwenReasoningModel(model)) {
const thinkConfig = {
enable_thinking: isQwenAlwaysThinkModel(model) || !isSupportEnableThinkingProvider(provider) ? undefined : true,
thinking_budget: budgetTokens
}
if (provider.id === SystemProviderIds.dashscope) {
return {
...thinkConfig,
incremental_output: true
}
}
return thinkConfig
}
@ -413,7 +424,7 @@ export function getReasoningEffort(assistant: Assistant, model: Model): Reasonin
return { thinking: { type: 'enabled' } }
}
if (isSupportedThinkingTokenMiMoModel(model)) {
if (isSupportedThinkingTokenMiMoModel(model) || isSupportedThinkingTokenKimiModel(model)) {
return {
thinking: { type: 'enabled' }
}

View File

@ -182,7 +182,7 @@ const HtmlArtifactsPopup: React.FC<HtmlArtifactsPopupProps> = ({ open, title, ht
ref={previewFrameRef}
key={html} // Force recreate iframe when preview content changes
srcDoc={html}
title="HTML Preview"
title={t('common.html_preview')}
sandbox="allow-scripts allow-same-origin allow-forms"
/>
) : (

View File

@ -128,7 +128,7 @@ export const CodeBlockView: React.FC<Props> = memo(({ children, language, onSave
}, [])
const handleCopySource = useCallback(() => {
navigator.clipboard.writeText(children)
navigator.clipboard.writeText(children.trimEnd())
window.toast.success(t('code_block.copy.success'))
}, [children, t])

View File

@ -0,0 +1,555 @@
import CodeViewer from '@renderer/components/CodeViewer'
import { useCodeStyle } from '@renderer/context/CodeStyleProvider'
import type { SerializedAiSdkError, SerializedAiSdkErrorUnion, SerializedError } from '@renderer/types/error'
import {
isSerializedAiSdkAPICallError,
isSerializedAiSdkDownloadError,
isSerializedAiSdkError,
isSerializedAiSdkErrorUnion,
isSerializedAiSdkInvalidArgumentError,
isSerializedAiSdkInvalidDataContentError,
isSerializedAiSdkInvalidMessageRoleError,
isSerializedAiSdkInvalidPromptError,
isSerializedAiSdkInvalidToolInputError,
isSerializedAiSdkJSONParseError,
isSerializedAiSdkMessageConversionError,
isSerializedAiSdkNoObjectGeneratedError,
isSerializedAiSdkNoSpeechGeneratedError,
isSerializedAiSdkNoSuchModelError,
isSerializedAiSdkNoSuchProviderError,
isSerializedAiSdkNoSuchToolError,
isSerializedAiSdkRetryError,
isSerializedAiSdkToolCallRepairError,
isSerializedAiSdkTooManyEmbeddingValuesForCallError,
isSerializedAiSdkTypeValidationError,
isSerializedAiSdkUnsupportedFunctionalityError,
isSerializedError
} from '@renderer/types/error'
import { formatAiSdkError, formatError, safeToString } from '@renderer/utils/error'
import { parseDataUrl } from '@shared/utils'
import { Button } from 'antd'
import { Modal } from 'antd'
import React, { memo, useCallback, useEffect, useRef, useState } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
interface ErrorDetailModalProps {
open: boolean
onClose: () => void
error?: SerializedError
}
const truncateLargeData = (
data: string,
t: (key: string) => string
): { content: string; truncated: boolean; isLikelyBase64: boolean } => {
const parsed = parseDataUrl(data)
const isLikelyBase64 = parsed?.isBase64 ?? false
if (!data || data.length <= 100_000) {
return { content: data, truncated: false, isLikelyBase64 }
}
if (isLikelyBase64) {
return {
content: `[${t('error.base64DataTruncated')}]`,
truncated: true,
isLikelyBase64: true
}
}
return {
content: data.slice(0, 100_000) + `\n\n... [${t('error.truncated')}]`,
truncated: true,
isLikelyBase64: false
}
}
// --- Styled Components ---
const ErrorDetailContainer = styled.div`
max-height: 60vh;
overflow-y: auto;
`
const ErrorDetailList = styled.div`
display: flex;
flex-direction: column;
gap: 16px;
`
const ErrorDetailItem = styled.div`
display: flex;
flex-direction: column;
gap: 8px;
`
const ErrorDetailLabel = styled.div`
font-weight: 600;
color: var(--color-text);
font-size: 14px;
`
const ErrorDetailValue = styled.div`
font-family: var(--code-font-family);
font-size: 12px;
padding: 8px;
background: var(--color-code-background);
border-radius: 4px;
border: 1px solid var(--color-border);
word-break: break-word;
color: var(--color-text);
`
const StackTrace = styled.div`
background: var(--color-background-soft);
border: 1px solid var(--color-error);
border-radius: 6px;
padding: 12px;
pre {
margin: 0;
white-space: pre-wrap;
word-break: break-word;
font-family: var(--code-font-family);
font-size: 12px;
line-height: 1.4;
color: var(--color-error);
}
`
const TruncatedBadge = styled.span`
margin-left: 8px;
padding: 2px 6px;
font-size: 10px;
font-weight: normal;
color: var(--color-warning);
background: var(--color-warning-bg, rgba(250, 173, 20, 0.1));
border-radius: 4px;
`
// --- Sub-Components ---
const BuiltinError = memo(({ error }: { error: SerializedError }) => {
const { t } = useTranslation()
return (
<>
{error.name && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.name')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.name}</ErrorDetailValue>
</ErrorDetailItem>
)}
{error.message && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.message')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.message}</ErrorDetailValue>
</ErrorDetailItem>
)}
{error.stack && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.stack')}:</ErrorDetailLabel>
<StackTrace>
<pre>{error.stack}</pre>
</StackTrace>
</ErrorDetailItem>
)}
</>
)
})
const AiSdkErrorBase = memo(({ error }: { error: SerializedAiSdkError }) => {
const { t } = useTranslation()
const tRef = useRef(t)
useEffect(() => {
tRef.current = t
}, [t])
const { highlightCode } = useCodeStyle()
const [highlightedString, setHighlightedString] = useState('')
const [isTruncated, setIsTruncated] = useState(false)
const cause = error.cause
useEffect(() => {
const highlight = async () => {
try {
const { content: truncatedCause, truncated, isLikelyBase64 } = truncateLargeData(cause || '', tRef.current)
setIsTruncated(truncated)
if (isLikelyBase64) {
setHighlightedString(truncatedCause)
return
}
try {
const parsed = JSON.parse(truncatedCause || '{}')
const formatted = JSON.stringify(parsed, null, 2)
const result = await highlightCode(formatted, 'json')
setHighlightedString(result)
} catch {
setHighlightedString(truncatedCause || '')
}
} catch {
setHighlightedString(cause || '')
}
}
const timer = setTimeout(highlight, 0)
return () => clearTimeout(timer)
}, [highlightCode, cause])
return (
<>
<BuiltinError error={error} />
{cause && (
<ErrorDetailItem>
<ErrorDetailLabel>
{t('error.cause')}:{isTruncated && <TruncatedBadge>{t('error.truncatedBadge')}</TruncatedBadge>}
</ErrorDetailLabel>
<ErrorDetailValue>
<div
className="markdown [&_pre]:!bg-transparent [&_pre_span]:whitespace-pre-wrap"
dangerouslySetInnerHTML={{ __html: highlightedString }}
/>
</ErrorDetailValue>
</ErrorDetailItem>
)}
</>
)
})
const TruncatedCodeViewer = memo(
({ value, label, language = 'json' }: { value: string; label: string; language?: string }) => {
const { t } = useTranslation()
const { content, truncated, isLikelyBase64 } = truncateLargeData(value, t)
return (
<ErrorDetailItem>
<ErrorDetailLabel>
{label}:{truncated && <TruncatedBadge>{t('error.truncatedBadge')}</TruncatedBadge>}
</ErrorDetailLabel>
{isLikelyBase64 ? (
<ErrorDetailValue>{content}</ErrorDetailValue>
) : (
<CodeViewer value={content} className="source-view" language={language} expanded />
)}
</ErrorDetailItem>
)
}
)
const AiSdkError = memo(({ error }: { error: SerializedAiSdkErrorUnion }) => {
const { t } = useTranslation()
return (
<ErrorDetailList>
{(isSerializedAiSdkAPICallError(error) || isSerializedAiSdkDownloadError(error)) && error.url && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.requestUrl')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.url}</ErrorDetailValue>
</ErrorDetailItem>
)}
{isSerializedAiSdkAPICallError(error) && error.responseBody && (
<TruncatedCodeViewer value={error.responseBody} label={t('error.responseBody')} />
)}
{(isSerializedAiSdkAPICallError(error) || isSerializedAiSdkDownloadError(error)) && error.statusCode && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.statusCode')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.statusCode}</ErrorDetailValue>
</ErrorDetailItem>
)}
{isSerializedAiSdkAPICallError(error) && (
<>
{error.responseHeaders && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.responseHeaders')}:</ErrorDetailLabel>
<CodeViewer
value={JSON.stringify(error.responseHeaders, null, 2)}
className="source-view"
language="json"
expanded
/>
</ErrorDetailItem>
)}
{error.requestBodyValues && (
<TruncatedCodeViewer value={safeToString(error.requestBodyValues)} label={t('error.requestBodyValues')} />
)}
{error.data && <TruncatedCodeViewer value={safeToString(error.data)} label={t('error.data')} />}
</>
)}
{isSerializedAiSdkDownloadError(error) && error.statusText && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.statusText')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.statusText}</ErrorDetailValue>
</ErrorDetailItem>
)}
{isSerializedAiSdkInvalidArgumentError(error) && error.parameter && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.parameter')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.parameter}</ErrorDetailValue>
</ErrorDetailItem>
)}
{(isSerializedAiSdkInvalidArgumentError(error) || isSerializedAiSdkTypeValidationError(error)) && error.value && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.value')}:</ErrorDetailLabel>
<ErrorDetailValue>{safeToString(error.value)}</ErrorDetailValue>
</ErrorDetailItem>
)}
{isSerializedAiSdkInvalidDataContentError(error) && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.content')}:</ErrorDetailLabel>
<ErrorDetailValue>{safeToString(error.content)}</ErrorDetailValue>
</ErrorDetailItem>
)}
{isSerializedAiSdkInvalidMessageRoleError(error) && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.role')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.role}</ErrorDetailValue>
</ErrorDetailItem>
)}
{isSerializedAiSdkInvalidPromptError(error) && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.prompt')}:</ErrorDetailLabel>
<ErrorDetailValue>{safeToString(error.prompt)}</ErrorDetailValue>
</ErrorDetailItem>
)}
{isSerializedAiSdkInvalidToolInputError(error) && (
<>
{error.toolName && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.toolName')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.toolName}</ErrorDetailValue>
</ErrorDetailItem>
)}
{error.toolInput && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.toolInput')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.toolInput}</ErrorDetailValue>
</ErrorDetailItem>
)}
</>
)}
{(isSerializedAiSdkJSONParseError(error) || isSerializedAiSdkNoObjectGeneratedError(error)) && error.text && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.text')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.text}</ErrorDetailValue>
</ErrorDetailItem>
)}
{isSerializedAiSdkMessageConversionError(error) && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.originalMessage')}:</ErrorDetailLabel>
<ErrorDetailValue>{safeToString(error.originalMessage)}</ErrorDetailValue>
</ErrorDetailItem>
)}
{isSerializedAiSdkNoSpeechGeneratedError(error) && error.responses && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.responses')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.responses.join(', ')}</ErrorDetailValue>
</ErrorDetailItem>
)}
{isSerializedAiSdkNoObjectGeneratedError(error) && (
<>
{error.response && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.response')}:</ErrorDetailLabel>
<ErrorDetailValue>{safeToString(error.response)}</ErrorDetailValue>
</ErrorDetailItem>
)}
{error.usage && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.usage')}:</ErrorDetailLabel>
<ErrorDetailValue>{safeToString(error.usage)}</ErrorDetailValue>
</ErrorDetailItem>
)}
{error.finishReason && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.finishReason')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.finishReason}</ErrorDetailValue>
</ErrorDetailItem>
)}
</>
)}
{(isSerializedAiSdkNoSuchModelError(error) ||
isSerializedAiSdkNoSuchProviderError(error) ||
isSerializedAiSdkTooManyEmbeddingValuesForCallError(error)) &&
error.modelId && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.modelId')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.modelId}</ErrorDetailValue>
</ErrorDetailItem>
)}
{(isSerializedAiSdkNoSuchModelError(error) || isSerializedAiSdkNoSuchProviderError(error)) && error.modelType && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.modelType')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.modelType}</ErrorDetailValue>
</ErrorDetailItem>
)}
{isSerializedAiSdkNoSuchProviderError(error) && (
<>
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.providerId')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.providerId}</ErrorDetailValue>
</ErrorDetailItem>
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.availableProviders')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.availableProviders.join(', ')}</ErrorDetailValue>
</ErrorDetailItem>
</>
)}
{isSerializedAiSdkNoSuchToolError(error) && (
<>
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.toolName')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.toolName}</ErrorDetailValue>
</ErrorDetailItem>
{error.availableTools && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.availableTools')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.availableTools?.join(', ') || t('common.none')}</ErrorDetailValue>
</ErrorDetailItem>
)}
</>
)}
{isSerializedAiSdkRetryError(error) && (
<>
{error.reason && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.reason')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.reason}</ErrorDetailValue>
</ErrorDetailItem>
)}
{error.lastError && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.lastError')}:</ErrorDetailLabel>
<ErrorDetailValue>{safeToString(error.lastError)}</ErrorDetailValue>
</ErrorDetailItem>
)}
{error.errors && error.errors.length > 0 && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.errors')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.errors.map((e) => safeToString(e)).join('\n\n')}</ErrorDetailValue>
</ErrorDetailItem>
)}
</>
)}
{isSerializedAiSdkTooManyEmbeddingValuesForCallError(error) && (
<>
{error.provider && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.provider')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.provider}</ErrorDetailValue>
</ErrorDetailItem>
)}
{error.maxEmbeddingsPerCall && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.maxEmbeddingsPerCall')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.maxEmbeddingsPerCall}</ErrorDetailValue>
</ErrorDetailItem>
)}
{error.values && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.values')}:</ErrorDetailLabel>
<ErrorDetailValue>{safeToString(error.values)}</ErrorDetailValue>
</ErrorDetailItem>
)}
</>
)}
{isSerializedAiSdkToolCallRepairError(error) && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.originalError')}:</ErrorDetailLabel>
<ErrorDetailValue>{safeToString(error.originalError)}</ErrorDetailValue>
</ErrorDetailItem>
)}
{isSerializedAiSdkUnsupportedFunctionalityError(error) && (
<ErrorDetailItem>
<ErrorDetailLabel>{t('error.functionality')}:</ErrorDetailLabel>
<ErrorDetailValue>{error.functionality}</ErrorDetailValue>
</ErrorDetailItem>
)}
<AiSdkErrorBase error={error} />
</ErrorDetailList>
)
})
// --- Main Component ---
const ErrorDetailModal: React.FC<ErrorDetailModalProps> = ({ open, onClose, error }) => {
const { t } = useTranslation()
const copyErrorDetails = useCallback(() => {
if (!error) return
let errorText: string
if (isSerializedAiSdkError(error)) {
errorText = formatAiSdkError(error)
} else if (isSerializedError(error)) {
errorText = formatError(error)
} else {
errorText = safeToString(error)
}
navigator.clipboard.writeText(errorText)
window.toast.addToast({ title: t('message.copied') })
}, [error, t])
const renderErrorDetails = (error?: SerializedError) => {
if (!error) return <div>{t('error.unknown')}</div>
if (isSerializedAiSdkErrorUnion(error)) {
return <AiSdkError error={error} />
}
return (
<ErrorDetailList>
<BuiltinError error={error} />
</ErrorDetailList>
)
}
return (
<Modal
centered
title={t('error.detail')}
open={open}
onCancel={onClose}
footer={[
<Button key="copy" variant="text" color="default" onClick={copyErrorDetails}>
{t('common.copy')}
</Button>,
<Button key="close" variant="text" color="default" onClick={onClose}>
{t('common.close')}
</Button>
]}
width="80%"
style={{ maxWidth: '1200px', minWidth: '600px' }}>
<ErrorDetailContainer>{renderErrorDetails(error)}</ErrorDetailContainer>
</Modal>
)
}
export { ErrorDetailModal }
export default ErrorDetailModal
export type { ErrorDetailModalProps }

View File

@ -3,6 +3,7 @@ import NavigationService from '@renderer/services/NavigationService'
import type { Model } from '@renderer/types'
import { ArrowUpRight } from 'lucide-react'
import type { FC, MouseEvent } from 'react'
import { useTranslation } from 'react-i18next'
import styled from 'styled-components'
import IndicatorLight from './IndicatorLight'
@ -15,6 +16,8 @@ interface Props {
}
export const FreeTrialModelTag: FC<Props> = ({ model, showLabel = true }) => {
const { t } = useTranslation()
if (model.provider !== 'cherryai') {
return null
}
@ -57,7 +60,7 @@ export const FreeTrialModelTag: FC<Props> = ({ model, showLabel = true }) => {
return (
<Container>
<IndicatorLight size={6} color="var(--color-primary)" animation={false} shadow={false} />
<PoweredBy>Powered by </PoweredBy>
<PoweredBy>{t('common.powered_by')}</PoweredBy>
<LinkText onClick={onSelectProvider}>{getProviderLabel(providerId)}</LinkText>
</Container>
)

View File

@ -1,27 +1,38 @@
import { CheckCircleFilled, CloseCircleFilled, ExclamationCircleFilled, LoadingOutlined } from '@ant-design/icons'
import { HealthStatus } from '@renderer/types/healthCheck'
import { Flex, Tooltip, Typography } from 'antd'
import React, { memo } from 'react'
import React, { memo, useCallback } from 'react'
import styled from 'styled-components'
import type { HealthResult } from './types'
import { useHealthStatus } from './useHealthStatus'
export interface HealthStatusIndicatorProps {
interface HealthStatusIndicatorProps {
results: HealthResult[]
loading?: boolean
showLatency?: boolean
onErrorClick?: (result: HealthResult) => void
}
const HealthStatusIndicator: React.FC<HealthStatusIndicatorProps> = ({
results,
loading = false,
showLatency = false
showLatency = false,
onErrorClick
}) => {
const { overallStatus, tooltip, latencyText } = useHealthStatus({
results,
showLatency
})
const handleClick = useCallback(() => {
if (!onErrorClick) return
const failedResult = results.find((r) => r.status === HealthStatus.FAILED)
if (failedResult) {
onErrorClick(failedResult)
}
}, [onErrorClick, results])
if (loading) {
return (
<IndicatorWrapper $type="checking">
@ -32,17 +43,19 @@ const HealthStatusIndicator: React.FC<HealthStatusIndicatorProps> = ({
if (overallStatus === 'not_checked') return null
const isClickable = onErrorClick && results.some((r) => r.status === HealthStatus.FAILED)
let icon: React.ReactNode = null
switch (overallStatus) {
case 'success':
icon = <CheckCircleFilled />
break
case 'error':
icon = <CloseCircleFilled />
break
case 'partial':
icon = <ExclamationCircleFilled />
case 'partial': {
const IconComponent = overallStatus === 'error' ? CloseCircleFilled : ExclamationCircleFilled
icon = <IconComponent />
break
}
default:
return null
}
@ -51,19 +64,25 @@ const HealthStatusIndicator: React.FC<HealthStatusIndicatorProps> = ({
<Flex align="center" gap={6}>
{latencyText && <LatencyText type="secondary">{latencyText}</LatencyText>}
<Tooltip title={tooltip} styles={{ body: { userSelect: 'text' } }}>
<IndicatorWrapper $type={overallStatus}>{icon}</IndicatorWrapper>
<IndicatorWrapper
$type={overallStatus}
$clickable={isClickable}
onClick={isClickable ? handleClick : undefined}>
{icon}
</IndicatorWrapper>
</Tooltip>
</Flex>
)
}
const IndicatorWrapper = styled.div<{ $type: string }>`
const IndicatorWrapper = styled.div<{ $type: string; $clickable?: boolean }>`
display: flex;
align-items: center;
justify-content: center;
font-size: 14px;
color: ${(props) => {
switch (props.$type) {
cursor: ${({ $clickable }) => ($clickable ? 'pointer' : 'auto')};
color: ${({ $type }) => {
switch ($type) {
case 'success':
return 'var(--color-status-success)'
case 'error':

View File

@ -1,3 +1,4 @@
import type { SerializedError } from '@renderer/types/error'
import type { HealthStatus } from '@renderer/types/healthCheck'
/**
@ -6,7 +7,7 @@ import type { HealthStatus } from '@renderer/types/healthCheck'
export interface HealthResult {
status: HealthStatus
latency?: number
error?: string
error?: SerializedError
// 用于在 Tooltip 中显示额外上下文信息,例如 API Key 或模型名称
label?: string
}

View File

@ -77,7 +77,7 @@ export const useHealthStatus = ({ results, showLatency = false }: UseHealthStatu
return (
<li key={idx} style={{ marginBottom: idx === results.length - 1 ? 0 : '10px' }}>
<Flex align="center" justify="space-between">
<Flex align="flex-start" gap={5}>
<strong style={{ color: statusColor }}>{statusText}</strong>
{result.label}
</Flex>
@ -86,8 +86,8 @@ export const useHealthStatus = ({ results, showLatency = false }: UseHealthStatu
{t('settings.provider.api.key.check.latency')}: {formatLatency(result.latency)}
</div>
)}
{result.error && result.status === HealthStatus.FAILED && (
<div style={{ marginTop: 2 }}>{result.error}</div>
{result.status === HealthStatus.FAILED && result.error?.message && (
<div style={{ marginTop: 2 }}>{result.error.message}</div>
)}
</li>
)

View File

@ -1,4 +1,4 @@
import { DEFAULT_MIN_APPS } from '@renderer/config/minapps'
import { allMinApps } from '@renderer/config/minapps'
import type { MinAppType } from '@renderer/types'
import type { FC } from 'react'
@ -10,10 +10,10 @@ interface Props {
}
const MinAppIcon: FC<Props> = ({ app, size = 48, style, sidebar = false }) => {
// First try to find in DEFAULT_MIN_APPS for predefined styling
const _app = DEFAULT_MIN_APPS.find((item) => item.id === app.id)
// First try to find in allMinApps for predefined styling
const _app = allMinApps.find((item) => item.id === app.id)
// If found in DEFAULT_MIN_APPS, use predefined styling
// If found in allMinApps, use predefined styling
if (_app) {
return (
<img
@ -34,7 +34,7 @@ const MinAppIcon: FC<Props> = ({ app, size = 48, style, sidebar = false }) => {
)
}
// If not found in DEFAULT_MIN_APPS but app has logo, use it (for temporary apps)
// If not found in allMinApps but app has logo, use it (for temporary apps)
if (app.logo) {
return (
<img

View File

@ -4,7 +4,7 @@ import { describe, expect, it, vi } from 'vitest'
import MinAppIcon from '../MinAppIcon'
vi.mock('@renderer/config/minapps', () => ({
DEFAULT_MIN_APPS: [
allMinApps: [
{
id: 'test-app-1',
name: 'Test App 1',
@ -52,7 +52,7 @@ describe('MinAppIcon', () => {
})
})
it('should return null when app is not found in DEFAULT_MIN_APPS', () => {
it('should return null when app is not found in allMinApps', () => {
const unknownApp = {
id: 'unknown-app',
name: 'Unknown App',

View File

@ -77,7 +77,7 @@ const InputEmbeddingDimension = ({
<Tooltip title={t('knowledge.dimensions_auto_set')}>
<Button
role="button"
aria-label="Get embedding dimension"
aria-label={t('common.get_embedding_dimension')}
disabled={disabled || loading}
onClick={handleFetchDimension}
icon={<RefreshIcon size={16} className={loading ? 'animation-rotate' : ''} />}

View File

@ -1,7 +1,7 @@
import { loggerService } from '@logger'
import MinAppIcon from '@renderer/components/Icons/MinAppIcon'
import IndicatorLight from '@renderer/components/IndicatorLight'
import { loadCustomMiniApp, ORIGIN_DEFAULT_MIN_APPS, updateDefaultMinApps } from '@renderer/config/minapps'
import { loadCustomMiniApp, ORIGIN_DEFAULT_MIN_APPS, updateAllMinApps } from '@renderer/config/minapps'
import { useMinappPopup } from '@renderer/hooks/useMinappPopup'
import { useMinapps } from '@renderer/hooks/useMinapps'
import { useRuntime } from '@renderer/hooks/useRuntime'
@ -93,7 +93,7 @@ const MinApp: FC<Props> = ({ app, onClick, size = 60, isLast }) => {
await window.api.file.writeWithId('custom-minapps.json', JSON.stringify(updatedApps, null, 2))
window.toast.success(t('settings.miniapps.custom.remove_success'))
const reloadedApps = [...ORIGIN_DEFAULT_MIN_APPS, ...(await loadCustomMiniApp())]
updateDefaultMinApps(reloadedApps)
updateAllMinApps(reloadedApps)
updateMinapps(minapps.filter((item) => item.id !== app.id))
updatePinnedMinapps(pinned.filter((item) => item.id !== app.id))
updateDisabledMinapps(disabled.filter((item) => item.id !== app.id))
@ -122,7 +122,7 @@ const MinApp: FC<Props> = ({ app, onClick, size = 60, isLast }) => {
</StyledIndicator>
)}
</IconContainer>
<AppTitle>{isLast ? t('settings.miniapps.custom.title') : app.name}</AppTitle>
<AppTitle>{isLast ? t('settings.miniapps.custom.title') : app.nameKey ? t(app.nameKey) : app.name}</AppTitle>
</Container>
</Dropdown>
)

View File

@ -13,7 +13,7 @@ import {
import { loggerService } from '@logger'
import WindowControls from '@renderer/components/WindowControls'
import { isDev, isLinux, isMac, isWin } from '@renderer/config/constant'
import { DEFAULT_MIN_APPS } from '@renderer/config/minapps'
import { allMinApps } from '@renderer/config/minapps'
import { useBridge } from '@renderer/hooks/useBridge'
import { useMinappPopup } from '@renderer/hooks/useMinappPopup'
import { useMinapps } from '@renderer/hooks/useMinapps'
@ -246,7 +246,7 @@ const MinappPopupContainer: React.FC = () => {
(acc, app) => ({
...acc,
[app.id]: {
canPinned: DEFAULT_MIN_APPS.some((item) => item.id === app.id),
canPinned: allMinApps.some((item) => item.id === app.id),
isPinned: pinned.some((item) => item.id === app.id),
canOpenExternalLink: app.url.startsWith('http://') || app.url.startsWith('https://')
}

View File

@ -8,7 +8,7 @@ import { isPreprocessProviderId, isWebSearchProviderId } from '@renderer/types'
import type { ApiKeyConnectivity, ApiKeyWithStatus } from '@renderer/types/healthCheck'
import { HealthStatus } from '@renderer/types/healthCheck'
import { formatApiKeys, splitApiKeyString } from '@renderer/utils/api'
import { formatErrorMessage } from '@renderer/utils/error'
import { serializeHealthCheckError } from '@renderer/utils/error'
import type { TFunction } from 'i18next'
import { isEmpty } from 'lodash'
import { useCallback, useMemo, useState } from 'react'
@ -218,17 +218,19 @@ export function useApiKeys({ provider, updateProvider }: UseApiKeysProps) {
latency,
error: undefined
})
} catch (error: any) {
} catch (error: unknown) {
// 连通性检查失败
const serializedError = serializeHealthCheckError(error)
updateConnectivityState(keyToCheck, {
checking: false,
status: HealthStatus.FAILED,
error: formatErrorMessage(error),
error: serializedError,
model: undefined,
latency: undefined
})
logger.error('failed to validate the connectivity of the api key', error)
logger.error('failed to validate the connectivity of the api key', error as Error)
}
},
[keys, connectivityStates, updateConnectivityState, provider]

View File

@ -151,7 +151,7 @@ const PopupContainer: React.FC<Props> = ({
{showTranslate && (
<TranslateButton
onClick={handleTranslate}
aria-label="Translate text"
aria-label={t('common.translate_text')}
disabled={isTranslating || !textValue.trim()}>
{isTranslating ? <LoadingOutlined spin /> : <Languages size={16} />}
</TranslateButton>

View File

@ -4,9 +4,11 @@ import { NodeViewContent, NodeViewWrapper, type ReactNodeViewProps, ReactNodeVie
import { Button, Select, Tooltip } from 'antd'
import type { FC } from 'react'
import { useCallback, useEffect, useState } from 'react'
import { useTranslation } from 'react-i18next'
const CodeBlockNodeView: FC<ReactNodeViewProps> = (props) => {
const { node, updateAttributes } = props
const { t } = useTranslation()
const [languageOptions, setLanguageOptions] = useState<string[]>(DEFAULT_LANGUAGES)
// Detect language from node attrs or fallback
@ -65,7 +67,7 @@ const CodeBlockNodeView: FC<ReactNodeViewProps> = (props) => {
options={languageOptions.map((lang) => ({ value: lang, label: lang }))}
style={{ minWidth: 90 }}
/>
<Tooltip title="Copy">
<Tooltip title={t('common.copy')}>
<Button
size="small"
type="text"

View File

@ -580,7 +580,7 @@ const RichEditor = ({
<GripVertical />
</Tooltip>
</DragHandle>
<EditorContent style={{ height: '100%' }} editor={editor} />
<EditorContent style={{ minHeight: '100%' }} editor={editor} />
</StyledEditorContent>
</Scrollbar>
{enableContentSearch && (

View File

@ -2,12 +2,13 @@ import { PlusOutlined } from '@ant-design/icons'
import { loggerService } from '@logger'
import { Sortable, useDndReorder } from '@renderer/components/dnd'
import HorizontalScrollContainer from '@renderer/components/HorizontalScrollContainer'
import { isMac } from '@renderer/config/constant'
import { DEFAULT_MIN_APPS } from '@renderer/config/minapps'
import { isLinux, isMac } from '@renderer/config/constant'
import { allMinApps } from '@renderer/config/minapps'
import { useTheme } from '@renderer/context/ThemeProvider'
import { useFullscreen } from '@renderer/hooks/useFullscreen'
import { useMinappPopup } from '@renderer/hooks/useMinappPopup'
import { useMinapps } from '@renderer/hooks/useMinapps'
import { useSettings } from '@renderer/hooks/useSettings'
import { getThemeModeLabel, getTitleLabel } from '@renderer/i18n/label'
import tabsService from '@renderer/services/TabsService'
import { useAppDispatch, useAppSelector } from '@renderer/store'
@ -57,7 +58,7 @@ const getTabIcon = (
// Check if it's a minapp tab (format: apps:appId)
if (tabId.startsWith('apps:')) {
const appId = tabId.replace('apps:', '')
let app = [...DEFAULT_MIN_APPS, ...minapps].find((app) => app.id === appId)
let app = [...allMinApps, ...minapps].find((app) => app.id === appId)
// If not found in permanent apps, search in temporary apps cache
// The cache stores apps opened via openSmartMinapp() for top navbar mode
@ -122,6 +123,7 @@ const TabsContainer: React.FC<TabsContainerProps> = ({ children }) => {
const { settedTheme, toggleTheme } = useTheme()
const { hideMinappPopup, minAppsCache } = useMinappPopup()
const { minapps } = useMinapps()
const { useSystemTitleBar } = useSettings()
const { t } = useTranslation()
const getTabId = (path: string): string => {
@ -138,7 +140,7 @@ const TabsContainer: React.FC<TabsContainerProps> = ({ children }) => {
// Check if it's a minapp tab
if (tabId.startsWith('apps:')) {
const appId = tabId.replace('apps:', '')
let app = [...DEFAULT_MIN_APPS, ...minapps].find((app) => app.id === appId)
let app = [...allMinApps, ...minapps].find((app) => app.id === appId)
// If not found in permanent apps, search in temporary apps cache
// This ensures temporary MinApps display proper titles while being used
@ -268,7 +270,7 @@ const TabsContainer: React.FC<TabsContainerProps> = ({ children }) => {
<PlusOutlined />
</AddTabButton>
</HorizontalScrollContainer>
<RightButtonsContainer>
<RightButtonsContainer style={{ paddingRight: isLinux && useSystemTitleBar ? '12px' : undefined }}>
<Tooltip
title={t('settings.theme.title') + ': ' + getThemeModeLabel(settedTheme)}
mouseEnterDelay={0.8}

Some files were not shown because too many files have changed in this diff Show More